From 84e332335323c6149e8d524af34fc8bc4307f454 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Thu, 19 Nov 2020 17:08:22 +0700 Subject: [PATCH 01/31] add UnixNanoNow to TimeSource Signed-off-by: William Albertus Dembo --- src/limiter/cache.go | 16 ++++++ src/utils/time.go | 4 ++ test/mocks/limiter/limiter.go | 92 ++++++++++++++++++++++++++++++++++- 3 files changed, 110 insertions(+), 2 deletions(-) diff --git a/src/limiter/cache.go b/src/limiter/cache.go index 5ca07edea..59a79a6ba 100644 --- a/src/limiter/cache.go +++ b/src/limiter/cache.go @@ -6,6 +6,22 @@ import ( "golang.org/x/net/context" ) +// Interface for a time source. +type TimeSource interface { + // @return the current unix time in seconds. + UnixNow() int64 + // @return the current unix time in nanoseconds. + UnixNanoNow() int64 +} + +// Interface for a rand Source for expiration jitter. +type JitterRandSource interface { + // @return a non-negative pseudo-random 63-bit integer as an int64. + Int63() int64 + // @param seed initializes pseudo-random generator to a deterministic state. + Seed(seed int64) +} + // Interface for interacting with a cache backend for rate limiting. type RateLimitCache interface { // Contact the cache and perform rate limiting for a set of descriptors and limits. diff --git a/src/utils/time.go b/src/utils/time.go index e7978cc6c..f3fdf53d3 100644 --- a/src/utils/time.go +++ b/src/utils/time.go @@ -24,6 +24,10 @@ func (this *timeSourceImpl) UnixNow() int64 { return time.Now().Unix() } +func (this *timeSourceImpl) UnixNanoNow() int64 { + return time.Now().UnixNano() +} + // rand for jitter. type lockedSource struct { lk sync.Mutex diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go index 48f995a1f..6b73f2dc4 100644 --- a/test/mocks/limiter/limiter.go +++ b/test/mocks/limiter/limiter.go @@ -49,8 +49,96 @@ func (mr *MockRateLimitCacheMockRecorder) DoLimit(arg0, arg1, arg2 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), arg0, arg1, arg2) } -// Flush mocks base method -func (m *MockRateLimitCache) Flush() { +// MockTimeSource is a mock of TimeSource interface +type MockTimeSource struct { + ctrl *gomock.Controller + recorder *MockTimeSourceMockRecorder +} + +// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource +type MockTimeSourceMockRecorder struct { + mock *MockTimeSource +} + +// NewMockTimeSource creates a new mock instance +func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { + mock := &MockTimeSource{ctrl: ctrl} + mock.recorder = &MockTimeSourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { + return m.recorder +} + +// UnixNanoNow mocks base method +func (m *MockTimeSource) UnixNanoNow() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnixNanoNow") + ret0, _ := ret[0].(int64) + return ret0 +} + +// UnixNanoNow indicates an expected call of UnixNanoNow +func (mr *MockTimeSourceMockRecorder) UnixNanoNow() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNanoNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNanoNow)) +} + +// UnixNow mocks base method +func (m *MockTimeSource) UnixNow() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnixNow") + ret0, _ := ret[0].(int64) + return ret0 +} + +// UnixNow indicates an expected call of UnixNow +func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) +} + +// MockJitterRandSource is a mock of JitterRandSource interface +type MockJitterRandSource struct { + ctrl *gomock.Controller + recorder *MockJitterRandSourceMockRecorder +} + +// MockJitterRandSourceMockRecorder is the mock recorder for MockJitterRandSource +type MockJitterRandSourceMockRecorder struct { + mock *MockJitterRandSource +} + +// NewMockJitterRandSource creates a new mock instance +func NewMockJitterRandSource(ctrl *gomock.Controller) *MockJitterRandSource { + mock := &MockJitterRandSource{ctrl: ctrl} + mock.recorder = &MockJitterRandSourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockJitterRandSource) EXPECT() *MockJitterRandSourceMockRecorder { + return m.recorder +} + +// Int63 mocks base method +func (m *MockJitterRandSource) Int63() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Int63") + ret0, _ := ret[0].(int64) + return ret0 +} + +// Int63 indicates an expected call of Int63 +func (mr *MockJitterRandSourceMockRecorder) Int63() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int63", reflect.TypeOf((*MockJitterRandSource)(nil).Int63)) +} + +// Seed mocks base method +func (m *MockJitterRandSource) Seed(arg0 int64) { m.ctrl.T.Helper() m.ctrl.Call(m, "Flush") } From 8f517f421aeceb7f58c777466a02858863f37b65 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Thu, 19 Nov 2020 17:20:39 +0700 Subject: [PATCH 02/31] add cache implementation for windowed rate limit Signed-off-by: William Albertus Dembo --- src/redis/windowed_cache_impl.go | 229 +++++++++++++++++++++++++ test/redis/windowed_cache_impl_test.go | 125 ++++++++++++++ 2 files changed, 354 insertions(+) create mode 100644 src/redis/windowed_cache_impl.go create mode 100644 test/redis/windowed_cache_impl_test.go diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go new file mode 100644 index 000000000..326261cdc --- /dev/null +++ b/src/redis/windowed_cache_impl.go @@ -0,0 +1,229 @@ +package redis + +import ( + "math" + "math/rand" + + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/assert" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/utils" + "github.com/golang/protobuf/ptypes/duration" + logger "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +type windowedRateLimitCacheImpl struct { + client Client + // Optional Client for a dedicated cache of per second limits. + // If this client is nil, then the Cache will use the client for all + // limits regardless of unit. If this client is not nil, then it + // is used for limits that have a SECOND unit. + perSecondClient Client + timeSource limiter.TimeSource + jitterRand *rand.Rand + expirationJitterMaxSeconds int64 + cacheKeyGenerator limiter.CacheKeyGenerator + localCache *freecache.Cache + nearLimitRatio float32 +} + +func maxInt64(a int64, b int64) int64 { + if a > b { + return a + } + return b +} + +func minInt64(a int64, b int64) int64 { + if a < b { + return a + } + return b +} + +func nanosecondsToDuration(nanoseconds int64) *duration.Duration { + nanos := nanoseconds + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration.Duration{Seconds: secs, Nanos: int32(nanos)} +} + +func secondsToNanoseconds(second int64) int64 { + return second * 1e9 +} + +func nanosecondsToSeconds(nanoseconds int64) int64 { + return nanoseconds / 1e9 +} + +func windowedPipelineAppend(client Client, pipeline *Pipeline, key string, result *int64, expirationSeconds int64) { + *pipeline = client.PipeAppend(*pipeline, nil, "SETNX", key, int64(0)) + *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) + *pipeline = client.PipeAppend(*pipeline, result, "GET", key) +} + +func windowedSetNewTatPipelineAppend(client Client, pipeline *Pipeline, key string, newTat int64, expirationSeconds int64) { + *pipeline = client.PipeAppend(*pipeline, nil, "SET", key, newTat) + *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) +} + +func (this *windowedRateLimitCacheImpl) DoLimit( + ctx context.Context, + request *pb.RateLimitRequest, + limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { + + logger.Debugf("starting windowed cache lookup") + + // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. + hitsAddend := max(1, request.HitsAddend) + + // First build a list of all cache keys that we are actually going to hit. GenerateCacheKey() + // returns an empty string in the key if there is no limit so that we can keep the arrays + // all the same size. + assert.Assert(len(request.Descriptors) == len(limits)) + cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) + for i := 0; i < len(request.Descriptors); i++ { + cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey( + request.Domain, request.Descriptors[i], limits[i], 0) + + // Increase statistics for limits hit by their respective requests. + if limits[i] != nil { + limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) + } + } + + // Get existing tat value for each cache keys + tats := make([]int64, len(request.Descriptors)) + var pipeline, perSecondPipeline Pipeline + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + continue + } + + logger.Debugf("looking up tat for cache key: %s", cacheKey.Key) + + expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit) + + // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. + if this.perSecondClient != nil && cacheKey.PerSecond { + if perSecondPipeline == nil { + perSecondPipeline = Pipeline{} + } + windowedPipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, &tats[i], expirationSeconds) + } else { + if pipeline == nil { + pipeline = Pipeline{} + } + windowedPipelineAppend(this.client, &pipeline, cacheKey.Key, &tats[i], expirationSeconds) + } + } + + if pipeline != nil { + checkError(this.client.PipeDo(pipeline)) + pipeline = nil + } + if perSecondPipeline != nil { + checkError(this.perSecondClient.PipeDo(perSecondPipeline)) + perSecondPipeline = nil + } + + // Rate limit GCRA logic + responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) + now := this.timeSource.UnixNanoNow() + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + responseDescriptorStatuses[i] = + &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: nil, + LimitRemaining: 0, + } + continue + } + + // Time during computation should be in nanosecond + limit := int64(limits[i].Limit.RequestsPerUnit) + period := secondsToNanoseconds(utils.UnitToDivider(limits[i].Limit.Unit)) + quantity := int64(hitsAddend) + arrivedAt := now + + emissionInterval := period / limit + increment := emissionInterval * quantity + tat := maxInt64(tats[i], arrivedAt) + newTat := tat + increment + delayVariationTolerance := limit * emissionInterval + previousAllowAt := tat - delayVariationTolerance + allowAt := newTat - delayVariationTolerance + diff := arrivedAt - allowAt + limitRemaining := int64(math.Ceil(float64((arrivedAt - allowAt) / emissionInterval))) + previousLimitRemaining := int64(math.Ceil(float64((arrivedAt - previousAllowAt) / emissionInterval))) + previousLimitRemaining = maxInt64(previousLimitRemaining, 0) + nearLimitWindow := int64(math.Ceil(float64(float32(limits[i].Limit.RequestsPerUnit) * (1.0 - this.nearLimitRatio)))) + + if diff < 0 { + responseDescriptorStatuses[i] = + &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limits[i].Limit, + LimitRemaining: 0, + DurationUntilReset: nanosecondsToDuration(int64(math.Ceil(float64(tat - arrivedAt)))), + } + + limits[i].Stats.OverLimit.Add(uint64(quantity - previousLimitRemaining)) + limits[i].Stats.NearLimit.Add(uint64(minInt64(previousLimitRemaining, nearLimitWindow))) + continue + } + + responseDescriptorStatuses[i] = + &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limits[i].Limit, + LimitRemaining: uint32(limitRemaining), + DurationUntilReset: nanosecondsToDuration(newTat - arrivedAt), + } + + hitNearLimit := quantity - (maxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) + if hitNearLimit > 0 { + limits[i].Stats.NearLimit.Add(uint64(hitNearLimit)) + } + + // Store newTat + expirationSeconds := nanosecondsToSeconds(newTat-arrivedAt) + 1 + if this.perSecondClient != nil && cacheKey.PerSecond { + if perSecondPipeline == nil { + perSecondPipeline = Pipeline{} + } + windowedSetNewTatPipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, newTat, expirationSeconds) + } else { + if pipeline == nil { + pipeline = Pipeline{} + } + windowedSetNewTatPipelineAppend(this.client, &pipeline, cacheKey.Key, newTat, expirationSeconds) + } + } + if pipeline != nil { + checkError(this.client.PipeDo(pipeline)) + } + if perSecondPipeline != nil { + checkError(this.perSecondClient.PipeDo(perSecondPipeline)) + } + return responseDescriptorStatuses +} + +func NewWindowedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32) limiter.RateLimitCache { + return &windowedRateLimitCacheImpl{ + client: client, + perSecondClient: perSecondClient, + timeSource: timeSource, + jitterRand: jitterRand, + expirationJitterMaxSeconds: expirationJitterMaxSeconds, + cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + localCache: localCache, + nearLimitRatio: nearLimitRatio, + } +} + +// TODO: Test nearlimit diff --git a/test/redis/windowed_cache_impl_test.go b/test/redis/windowed_cache_impl_test.go new file mode 100644 index 000000000..49038e7a6 --- /dev/null +++ b/test/redis/windowed_cache_impl_test.go @@ -0,0 +1,125 @@ +package redis_test + +import ( + "math/rand" + "testing" + + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/test/common" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" + mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes/duration" + stats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" +) + +func TestRedisWindowed(t *testing.T) { + t.Run("WithoutPerSecondRedis", testRedisWindowed(false)) + t.Run("WithPerSecondRedis", testRedisWindowed(true)) +} + +func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { + return func(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + client := mock_redis.NewMockClient(controller) + perSecondClient := mock_redis.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + var cache limiter.RateLimitCache + if usePerSecondRedis { + cache = redis.NewWindowedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + } else { + cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + } + statsStore := stats.NewStore(stats.NewNullSink(), false) + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + var clientUsed *mock_redis.MockClient + if usePerSecondRedis { + clientUsed = perSecondClient + } else { + clientUsed = client + } + + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(1)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(0)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(1e9+1e8)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(1)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Nanos: 1e8}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + + clientUsed = client + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key2_value2_subkey2_subvalue2_0", int64(0)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key2_value2_subkey2_subvalue2_0", int64(60)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key2_value2_subkey2_subvalue2_0").SetArg(1, int64(70e9)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + + request = common.NewRateLimitRequest( + "domain", + [][][2]string{ + {{"key2", "value2"}}, + {{"key2", "value2"}, {"subkey2", "subvalue2"}}, + }, 1) + limits = []*config.RateLimit{ + nil, + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 69}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) + + clientUsed = client + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(5) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key3_value3_0", int64(0)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_0", int64(60*60)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key3_value3_0").SetArg(1, int64(60*60*1e9)).DoAndReturn(pipeAppend) + + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key3_value3_subkey3_subvalue3_0", int64(0)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_subkey3_subvalue3_0", int64(60*60*24)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key3_value3_subkey3_subvalue3_0").SetArg(1, int64(60*60*24*1e9)).DoAndReturn(pipeAppend) + + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + + request = common.NewRateLimitRequest( + "domain", + [][][2]string{ + {{"key3", "value3"}}, + {{"key3", "value3"}, {"subkey3", "subvalue3"}}, + }, 1) + limits = []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: (60 * 60) - 1}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: (60 * 60 * 24) - 1}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + } +} From b1aadd1b12703230f47d2ce74dff80484f407ee7 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Fri, 20 Nov 2020 13:24:22 +0700 Subject: [PATCH 03/31] add near limit test for windowed rate limit implementation Signed-off-by: William Albertus Dembo --- src/redis/windowed_cache_impl.go | 2 - test/redis/windowed_cache_impl_test.go | 68 ++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index 326261cdc..ecbaa04bd 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -225,5 +225,3 @@ func NewWindowedRateLimitCacheImpl(client Client, perSecondClient Client, timeSo nearLimitRatio: nearLimitRatio, } } - -// TODO: Test nearlimit diff --git a/test/redis/windowed_cache_impl_test.go b/test/redis/windowed_cache_impl_test.go index 49038e7a6..a58813a42 100644 --- a/test/redis/windowed_cache_impl_test.go +++ b/test/redis/windowed_cache_impl_test.go @@ -123,3 +123,71 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) } } + +func TestNearLimitWindowed(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + client := mock_redis.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + statsStore := stats.NewStore(stats.NewNullSink(), false) + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key4_value4", statsStore)} + + // Test Near Limit Stats. Under Near Limit Ratio + timeSource.EXPECT().UnixNanoNow().Return(int64(50e9)).MaxTimes(1) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_0").SetArg(1, int64(50e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key4_value4_0", int64(50e9+6e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(50+6-50+1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Seconds: 6}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + + // Test Near Limit Stats. At Near Limit Ratio, still OK + timeSource.EXPECT().UnixNanoNow().Return(int64(50e9)).MaxTimes(1) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_0").SetArg(1, int64(98e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key4_value4_0", int64(98e9+6e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(98+6-50+1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: &duration.Duration{Seconds: 54}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases + // when we are near limit, not after we have passed the limit. + timeSource.EXPECT().UnixNanoNow().Return(int64(50e9)).MaxTimes(1) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_0").SetArg(1, int64(110e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 110 - 50}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) +} From f2c63247a98b406a3aca91eba65decad89f2644e Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Fri, 20 Nov 2020 13:39:47 +0700 Subject: [PATCH 04/31] add random jitter test for windowed rate limit implementation Signed-off-by: William Albertus Dembo --- src/redis/windowed_cache_impl.go | 3 +++ test/redis/windowed_cache_impl_test.go | 33 ++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index ecbaa04bd..c1d5881a2 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -192,6 +192,9 @@ func (this *windowedRateLimitCacheImpl) DoLimit( // Store newTat expirationSeconds := nanosecondsToSeconds(newTat-arrivedAt) + 1 + if this.expirationJitterMaxSeconds > 0 { + expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) + } if this.perSecondClient != nil && cacheKey.PerSecond { if perSecondPipeline == nil { perSecondPipeline = Pipeline{} diff --git a/test/redis/windowed_cache_impl_test.go b/test/redis/windowed_cache_impl_test.go index a58813a42..65e7a57ec 100644 --- a/test/redis/windowed_cache_impl_test.go +++ b/test/redis/windowed_cache_impl_test.go @@ -191,3 +191,36 @@ func TestNearLimitWindowed(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) } + +func TestRedisWindowedWithJitter(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + client := mock_redis.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + jitterSource := mock_limiter.NewMockJitterRandSource(controller) + cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8) + statsStore := stats.NewStore(stats.NewNullSink(), false) + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + jitterSource.EXPECT().Int63().Return(int64(100)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(1e9+1e8)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(101)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Nanos: 1e8}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) +} From ab6f36045dd12919e867c020bdb02226b754622a Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Fri, 20 Nov 2020 14:23:02 +0700 Subject: [PATCH 05/31] add benchmark for windowed ratelimit Signed-off-by: William Albertus Dembo --- test/redis/bench_test.go | 92 ++++++++++++++++++++++++++++------------ 1 file changed, 64 insertions(+), 28 deletions(-) diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 4b1766b27..a775a82fe 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -38,13 +38,20 @@ func BenchmarkParallelDoLimit(b *testing.B) { }) } - mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int) func(*testing.B) { + mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int, rateLimitType string) func(*testing.B) { return func(b *testing.B) { statsStore := stats.NewStore(stats.NewNullSink(), false) client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() - cache := redis.NewFixedRateLimitCacheImpl(client, nil, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) + var cache limiter.RateLimitCache + if rateLimitType == "FIXED" { + cache = redis.NewFixedRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) + } else if rateLimitType == "WINDOWED" { + cache = redis.NewWindowedRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) + } else { + b.Fatalf("unknown rate limit type %s", rateLimitType) + } request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -65,30 +72,59 @@ func BenchmarkParallelDoLimit(b *testing.B) { } } - b.Run("no pipeline", mkDoLimitBench(0, 0)) - - b.Run("pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1)) - b.Run("pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1)) - b.Run("pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1)) - b.Run("pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1)) - - b.Run("pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2)) - b.Run("pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2)) - b.Run("pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2)) - b.Run("pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2)) - - b.Run("pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4)) - b.Run("pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4)) - b.Run("pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4)) - b.Run("pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4)) - - b.Run("pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8)) - b.Run("pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8)) - b.Run("pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8)) - b.Run("pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8)) - - b.Run("pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16)) - b.Run("pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16)) - b.Run("pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16)) - b.Run("pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16)) + // Fixed ratelimit + b.Run("fixed ratelimit with no pipeline", mkDoLimitBench(0, 0, "FIXED")) + + b.Run("fixed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, "FIXED")) + b.Run("fixed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, "FIXED")) + b.Run("fixed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, "FIXED")) + b.Run("fixed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, "FIXED")) + + b.Run("fixed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, "FIXED")) + b.Run("fixed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, "FIXED")) + b.Run("fixed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, "FIXED")) + b.Run("fixed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, "FIXED")) + + b.Run("fixed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, "FIXED")) + b.Run("fixed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, "FIXED")) + b.Run("fixed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, "FIXED")) + b.Run("fixed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, "FIXED")) + + b.Run("fixed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, "FIXED")) + b.Run("fixed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, "FIXED")) + b.Run("fixed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, "FIXED")) + b.Run("fixed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, "FIXED")) + + b.Run("fixed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, "FIXED")) + b.Run("fixed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, "FIXED")) + b.Run("fixed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, "FIXED")) + b.Run("fixed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, "FIXED")) + + // Windowed ratelimit + b.Run("windowed ratelimit with no pipeline", mkDoLimitBench(0, 0, "WINDOWED")) + + b.Run("windowed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, "WINDOWED")) + + b.Run("windowed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, "WINDOWED")) + + b.Run("windowed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, "WINDOWED")) + + b.Run("windowed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, "WINDOWED")) + + b.Run("windowed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, "WINDOWED")) + b.Run("windowed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, "WINDOWED")) } From e516c200e347fd5b31328afc2da4ca5a225796e8 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Fri, 20 Nov 2020 17:28:02 +0700 Subject: [PATCH 06/31] add local cache to windowed ratelimit Signed-off-by: William Albertus Dembo --- src/redis/windowed_cache_impl.go | 33 +++++++ test/redis/windowed_cache_impl_test.go | 114 +++++++++++++++++++++++++ 2 files changed, 147 insertions(+) diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index c1d5881a2..2cdf4ebdd 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -96,6 +96,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } // Get existing tat value for each cache keys + isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) tats := make([]int64, len(request.Descriptors)) var pipeline, perSecondPipeline Pipeline for i, cacheKey := range cacheKeys { @@ -103,6 +104,16 @@ func (this *windowedRateLimitCacheImpl) DoLimit( continue } + if this.localCache != nil { + // Get returns the value or not found error. + _, err := this.localCache.Get([]byte(cacheKey.Key)) + if err == nil { + isOverLimitWithLocalCache[i] = true + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + continue + } + } + logger.Debugf("looking up tat for cache key: %s", cacheKey.Key) expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit) @@ -144,6 +155,21 @@ func (this *windowedRateLimitCacheImpl) DoLimit( continue } + if isOverLimitWithLocalCache[i] { + secondsToReset := utils.UnitToDivider(limits[i].Limit.Unit) + secondsToReset -= nanosecondsToSeconds(now) % secondsToReset + responseDescriptorStatuses[i] = + &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limits[i].Limit, + LimitRemaining: 0, + DurationUntilReset: &duration.Duration{Seconds: secondsToReset}, + } + limits[i].Stats.OverLimit.Add(uint64(hitsAddend)) + limits[i].Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) + continue + } + // Time during computation should be in nanosecond limit := int64(limits[i].Limit.RequestsPerUnit) period := secondsToNanoseconds(utils.UnitToDivider(limits[i].Limit.Unit)) @@ -174,6 +200,13 @@ func (this *windowedRateLimitCacheImpl) DoLimit( limits[i].Stats.OverLimit.Add(uint64(quantity - previousLimitRemaining)) limits[i].Stats.NearLimit.Add(uint64(minInt64(previousLimitRemaining, nearLimitWindow))) + + if this.localCache != nil { + err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(nanosecondsToSeconds(-diff))) + if err != nil { + logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) + } + } continue } diff --git a/test/redis/windowed_cache_impl_test.go b/test/redis/windowed_cache_impl_test.go index 65e7a57ec..e9c463ef4 100644 --- a/test/redis/windowed_cache_impl_test.go +++ b/test/redis/windowed_cache_impl_test.go @@ -4,6 +4,7 @@ import ( "math/rand" "testing" + "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" @@ -192,6 +193,119 @@ func TestNearLimitWindowed(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) } +func TestWindowedOverLimitWithLocalCache(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + client := mock_redis.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + localCache := freecache.NewCache(100) + cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8) + sink := &common.TestStatSink{} + statsStore := stats.NewStore(sink, true) + localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} + + // Test Near Limit Stats. Under Near Limit Ratio + timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(60*60)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(71*4*60*1e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(72*4*60*1e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(12*4*60+1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 3, DurationUntilReset: &duration.Duration{Seconds: 12 * 4 * 60}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) + + // Test Near Limit Stats. At Near Limit Ratio, still OK + timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(60*60)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(72*4*60*1e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(73*4*60*1e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(13*4*60+1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + + limits = []*config.RateLimit{ + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: &duration.Duration{Seconds: 13 * 4 * 60}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) + + // Test Over limit stats + timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(60*60)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(75*4*60*1e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + + limits = []*config.RateLimit{ + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 15 * 4 * 60}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) + + // Test Over limit stats with local cache + timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) + + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + + limits = []*config.RateLimit{ + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 15 * 4 * 60}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 1, 3, 4, 0, 1) +} + func TestRedisWindowedWithJitter(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) From 4a4c9cce9d688c2e481723633667d03e2b6b1f93 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Mon, 23 Nov 2020 09:59:57 +0700 Subject: [PATCH 07/31] configure rate limit algorithm to use from setting Signed-off-by: William Albertus Dembo --- src/redis/cache_impl.go | 32 ++++++++---- src/settings/settings.go | 1 + test/redis/bench_test.go | 102 +++++++++++++++++++-------------------- 3 files changed, 75 insertions(+), 60 deletions(-) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index c65447da8..0649503d8 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -7,7 +7,7 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/server" "github.com/envoyproxy/ratelimit/src/settings" - "github.com/envoyproxy/ratelimit/src/utils" + logger "github.com/sirupsen/logrus" ) func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache { @@ -20,12 +20,26 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisType, s.RedisUrl, s.RedisPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) - return NewFixedRateLimitCacheImpl( - otherPool, - perSecondPool, - timeSource, - jitterRand, - expirationJitterMaxSeconds, - localCache, - s.NearLimitRatio) + if s.RateLimitAlgorithm == "FIXED_WINDOW" { + return NewFixedRateLimitCacheImpl( + otherPool, + perSecondPool, + timeSource, + jitterRand, + expirationJitterMaxSeconds, + localCache, + s.NearLimitRatio) + } else if s.RateLimitAlgorithm == "ROLLING_WINDOW" { + return NewWindowedRateLimitCacheImpl( + otherPool, + perSecondPool, + timeSource, + jitterRand, + expirationJitterMaxSeconds, + localCache, + s.NearLimitRatio) + } else { + logger.Fatalf("Unknown rate limit algorithm. %s\n", s.RateLimitAlgorithm) + } + return nil } diff --git a/src/settings/settings.go b/src/settings/settings.go index 38a2474c2..960db5585 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -23,6 +23,7 @@ type Settings struct { RuntimeWatchRoot bool `envconfig:"RUNTIME_WATCH_ROOT" default:"true"` LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` LogFormat string `envconfig:"LOG_FORMAT" default:"text"` + RateLimitAlgorithm string `envconfig:"RATE_LIMIT_ALGORITHM" default:"FIXED_WINDOW"` RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` RedisType string `envconfig:"REDIS_TYPE" default:"SINGLE"` RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index a775a82fe..c96cf2fde 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -38,19 +38,19 @@ func BenchmarkParallelDoLimit(b *testing.B) { }) } - mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int, rateLimitType string) func(*testing.B) { + mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int, rateLimitAlgorithm string) func(*testing.B) { return func(b *testing.B) { statsStore := stats.NewStore(stats.NewNullSink(), false) client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() var cache limiter.RateLimitCache - if rateLimitType == "FIXED" { + if rateLimitAlgorithm == "FIXED_WINDOW" { cache = redis.NewFixedRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) - } else if rateLimitType == "WINDOWED" { + } else if rateLimitAlgorithm == "ROLLING_WINDOW" { cache = redis.NewWindowedRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) } else { - b.Fatalf("unknown rate limit type %s", rateLimitType) + b.Fatalf("unknown rate limit type %s", rateLimitAlgorithm) } request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -73,58 +73,58 @@ func BenchmarkParallelDoLimit(b *testing.B) { } // Fixed ratelimit - b.Run("fixed ratelimit with no pipeline", mkDoLimitBench(0, 0, "FIXED")) + b.Run("fixed ratelimit with no pipeline", mkDoLimitBench(0, 0, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, "FIXED")) - b.Run("fixed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, "FIXED")) - b.Run("fixed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, "FIXED")) - b.Run("fixed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, "FIXED")) + b.Run("fixed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, "FIXED")) - b.Run("fixed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, "FIXED")) - b.Run("fixed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, "FIXED")) - b.Run("fixed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, "FIXED")) + b.Run("fixed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, "FIXED")) - b.Run("fixed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, "FIXED")) - b.Run("fixed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, "FIXED")) - b.Run("fixed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, "FIXED")) + b.Run("fixed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, "FIXED")) - b.Run("fixed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, "FIXED")) - b.Run("fixed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, "FIXED")) - b.Run("fixed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, "FIXED")) + b.Run("fixed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, "FIXED")) - b.Run("fixed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, "FIXED")) - b.Run("fixed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, "FIXED")) - b.Run("fixed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, "FIXED")) + b.Run("fixed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, "FIXED_WINDOW")) // Windowed ratelimit - b.Run("windowed ratelimit with no pipeline", mkDoLimitBench(0, 0, "WINDOWED")) - - b.Run("windowed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, "WINDOWED")) - - b.Run("windowed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, "WINDOWED")) - - b.Run("windowed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, "WINDOWED")) - - b.Run("windowed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, "WINDOWED")) - - b.Run("windowed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, "WINDOWED")) - b.Run("windowed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, "WINDOWED")) + b.Run("windowed ratelimit with no pipeline", mkDoLimitBench(0, 0, "ROLLING_WINDOW")) + + b.Run("windowed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, "ROLLING_WINDOW")) + + b.Run("windowed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, "ROLLING_WINDOW")) + + b.Run("windowed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, "ROLLING_WINDOW")) + + b.Run("windowed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, "ROLLING_WINDOW")) + + b.Run("windowed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, "ROLLING_WINDOW")) } From b69f04d6aeb1f3db0876a7ae286af06a49535ac5 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Mon, 23 Nov 2020 10:00:18 +0700 Subject: [PATCH 08/31] add readme on rate limit algorithm Signed-off-by: William Albertus Dembo --- README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/README.md b/README.md index fbfeb0636..b5ce7118a 100644 --- a/README.md +++ b/README.md @@ -181,6 +181,21 @@ The rate limit block specifies the actual rate limit that will be used when ther Currently the service supports per second, minute, hour, and day limits. More types of limits may be added in the future based on user demand. +### Rate limit algorithm + +Ratelimit supports two algorithms: + +1. Fixed window +For a limit of 60 requests per hour, there can only 60 requests in a single time window (e.g: 01:00 - 01:59). +Fixed window algorithm does not care when did the request arrive, all 60 can arrive at 01:01 or 01:50 and the limit will still reset at 02:00. + +2. Rolling window +For a limit of 60 requests per hour. Initially it able to take a burst of 60 requests at once, then the limit restore by 1 each minute. +Requests are allowed as long as there's still some available limit. + +Configure rate limit algorithm with `RATE_LIMIT_ALGORITHM` environment variable. +Use `FIXED_WINDOW` and `ROLLING_WINDOW` respectively. + ### Examples #### Example 1 From d72f951aaebb8b9e350d13f97086fc8054c3008b Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Wed, 25 Nov 2020 12:06:42 +0700 Subject: [PATCH 09/31] use constant for rate limit algorithm Signed-off-by: William Albertus Dembo --- src/redis/cache_impl.go | 4 +- src/settings/settings.go | 3 ++ test/redis/bench_test.go | 107 +++++++++++++++++++-------------------- 3 files changed, 58 insertions(+), 56 deletions(-) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 0649503d8..c7e2b84c4 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -20,7 +20,7 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisType, s.RedisUrl, s.RedisPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) - if s.RateLimitAlgorithm == "FIXED_WINDOW" { + if s.RateLimitAlgorithm == settings.FixedRateLimit { return NewFixedRateLimitCacheImpl( otherPool, perSecondPool, @@ -29,7 +29,7 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca expirationJitterMaxSeconds, localCache, s.NearLimitRatio) - } else if s.RateLimitAlgorithm == "ROLLING_WINDOW" { + } else if s.RateLimitAlgorithm == settings.WindowedRateLimit { return NewWindowedRateLimitCacheImpl( otherPool, perSecondPool, diff --git a/src/settings/settings.go b/src/settings/settings.go index 960db5585..5a1835487 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -7,6 +7,9 @@ import ( "google.golang.org/grpc" ) +const FixedRateLimit = "FIXED_WINDOW" +const WindowedRateLimit = "ROLLING_WINDOW" + type Settings struct { // runtime options GrpcUnaryInterceptor grpc.ServerOption diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index c96cf2fde..664158db5 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -2,18 +2,17 @@ package redis_test import ( "context" + "math/rand" "runtime" "testing" "time" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/redis" - "github.com/envoyproxy/ratelimit/src/utils" stats "github.com/lyft/gostats" - "math/rand" - + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/test/common" ) @@ -45,9 +44,9 @@ func BenchmarkParallelDoLimit(b *testing.B) { defer client.Close() var cache limiter.RateLimitCache - if rateLimitAlgorithm == "FIXED_WINDOW" { + if rateLimitAlgorithm == settings.FixedRateLimit { cache = redis.NewFixedRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) - } else if rateLimitAlgorithm == "ROLLING_WINDOW" { + } else if rateLimitAlgorithm == settings.WindowedRateLimit { cache = redis.NewWindowedRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) } else { b.Fatalf("unknown rate limit type %s", rateLimitAlgorithm) @@ -73,58 +72,58 @@ func BenchmarkParallelDoLimit(b *testing.B) { } // Fixed ratelimit - b.Run("fixed ratelimit with no pipeline", mkDoLimitBench(0, 0, "FIXED_WINDOW")) + b.Run("fixed ratelimit with no pipeline", mkDoLimitBench(0, 0, settings.FixedRateLimit)) - b.Run("fixed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, settings.FixedRateLimit)) - b.Run("fixed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, settings.FixedRateLimit)) - b.Run("fixed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, settings.FixedRateLimit)) - b.Run("fixed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, settings.FixedRateLimit)) - b.Run("fixed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, "FIXED_WINDOW")) - b.Run("fixed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, "FIXED_WINDOW")) + b.Run("fixed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, settings.FixedRateLimit)) + b.Run("fixed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, settings.FixedRateLimit)) // Windowed ratelimit - b.Run("windowed ratelimit with no pipeline", mkDoLimitBench(0, 0, "ROLLING_WINDOW")) - - b.Run("windowed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, "ROLLING_WINDOW")) - - b.Run("windowed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, "ROLLING_WINDOW")) - - b.Run("windowed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, "ROLLING_WINDOW")) - - b.Run("windowed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, "ROLLING_WINDOW")) - - b.Run("windowed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, "ROLLING_WINDOW")) - b.Run("windowed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, "ROLLING_WINDOW")) + b.Run("windowed ratelimit with no pipeline", mkDoLimitBench(0, 0, settings.WindowedRateLimit)) + + b.Run("windowed ratelimit with pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1, settings.WindowedRateLimit)) + + b.Run("windowed ratelimit with pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2, settings.WindowedRateLimit)) + + b.Run("windowed ratelimit with pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4, settings.WindowedRateLimit)) + + b.Run("windowed ratelimit with pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8, settings.WindowedRateLimit)) + + b.Run("windowed ratelimit with pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16, settings.WindowedRateLimit)) + b.Run("windowed ratelimit with pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16, settings.WindowedRateLimit)) } From e5704b684891e522e856be19e9707ddb97b417d2 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Mon, 7 Dec 2020 21:20:33 +0700 Subject: [PATCH 10/31] move max min to utils Signed-off-by: William Albertus Dembo --- src/redis/fixed_cache_impl.go | 102 +++++++++++++++++++++++++++---- src/redis/windowed_cache_impl.go | 24 ++------ src/utils/utilities.go | 18 ++++-- 3 files changed, 107 insertions(+), 37 deletions(-) diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index bd2502acd..ab5e44113 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -18,8 +18,13 @@ type fixedRateLimitCacheImpl struct { // If this client is nil, then the Cache will use the client for all // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. - perSecondClient Client - baseRateLimiter *limiter.BaseRateLimiter + perSecondClient Client + timeSource limiter.TimeSource + jitterRand *rand.Rand + expirationJitterMaxSeconds int64 + cacheKeyGenerator limiter.CacheKeyGenerator + localCache *freecache.Cache + nearLimitRatio float32 } func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) { @@ -34,11 +39,24 @@ func (this *fixedRateLimitCacheImpl) DoLimit( logger.Debugf("starting cache lookup") - // request.HitsAddend could be 0 (default value) if not specified by the caller in the RateLimit request. - hitsAddend := utils.Max(1, request.HitsAddend) - - // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) + // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. + hitsAddend := utils.MaxUint32(1, request.HitsAddend) + + // First build a list of all cache keys that we are actually going to hit. GenerateCacheKey() + // returns an empty string in the key if there is no limit so that we can keep the arrays + // all the same size. + assert.Assert(len(request.Descriptors) == len(limits)) + cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) + now := this.timeSource.UnixNow() + for i := 0; i < len(request.Descriptors); i++ { + cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey( + request.Domain, request.Descriptors[i], limits[i], now) + + // Increase statistics for limits hit by their respective requests. + if limits[i] != nil { + limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) + } + } isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) results := make([]uint32, len(request.Descriptors)) @@ -92,12 +110,70 @@ func (this *fixedRateLimitCacheImpl) DoLimit( limitAfterIncrease := results[i] limitBeforeIncrease := limitAfterIncrease - hitsAddend - - limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) - - responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) - + overLimitThreshold := limits[i].Limit.RequestsPerUnit + // The nearLimitThreshold is the number of requests that can be made before hitting the NearLimitRatio. + // We need to know it in both the OK and OVER_LIMIT scenarios. + nearLimitThreshold := uint32(math.Floor(float64(float32(overLimitThreshold) * this.nearLimitRatio))) + + logger.Debugf("cache key: %s current: %d", cacheKey.Key, limitAfterIncrease) + if limitAfterIncrease > overLimitThreshold { + responseDescriptorStatuses[i] = + &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limits[i].Limit, + LimitRemaining: 0, + DurationUntilReset: CalculateReset(limits[i].Limit, this.timeSource), + } + + // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to + // assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the + // N hits was over the limit, then all the N hits were over limit. + // Otherwise, only the difference between the current limit value and the over limit threshold + // were over limit hits. + if limitBeforeIncrease >= overLimitThreshold { + limits[i].Stats.OverLimit.Add(uint64(hitsAddend)) + } else { + limits[i].Stats.OverLimit.Add(uint64(limitAfterIncrease - overLimitThreshold)) + + // If the limit before increase was below the over limit value, then some of the hits were + // in the near limit range. + limits[i].Stats.NearLimit.Add(uint64(overLimitThreshold - utils.MaxUint32(nearLimitThreshold, limitBeforeIncrease))) + } + if this.localCache != nil { + // Set the TTL of the local_cache to be the entire duration. + // Since the cache_key gets changed once the time crosses over current time slot, the over-the-limit + // cache keys in local_cache lose effectiveness. + // For example, if we have an hour limit on all mongo connections, the cache key would be + // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start + // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). + // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. + err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(utils.UnitToDivider(limits[i].Limit.Unit))) + if err != nil { + logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) + } + } + } else { + responseDescriptorStatuses[i] = + &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limits[i].Limit, + LimitRemaining: overLimitThreshold - limitAfterIncrease, + DurationUntilReset: CalculateReset(limits[i].Limit, this.timeSource), + } + + // The limit is OK but we additionally want to know if we are near the limit. + if limitAfterIncrease > nearLimitThreshold { + // Here we also need to assess which portion of the hitsAddend were in the near limit range. + // If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise, + // only the difference between the current limit value and the near limit threshold were near + // limit hits. + if limitBeforeIncrease >= nearLimitThreshold { + limits[i].Stats.NearLimit.Add(uint64(hitsAddend)) + } else { + limits[i].Stats.NearLimit.Add(uint64(limitAfterIncrease - nearLimitThreshold)) + } + } + } } return responseDescriptorStatuses diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index 2cdf4ebdd..d8bfbae58 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -30,20 +30,6 @@ type windowedRateLimitCacheImpl struct { nearLimitRatio float32 } -func maxInt64(a int64, b int64) int64 { - if a > b { - return a - } - return b -} - -func minInt64(a int64, b int64) int64 { - if a < b { - return a - } - return b -} - func nanosecondsToDuration(nanoseconds int64) *duration.Duration { nanos := nanoseconds secs := nanos / 1e9 @@ -78,7 +64,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( logger.Debugf("starting windowed cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := max(1, request.HitsAddend) + hitsAddend := utils.MaxUint32(1, request.HitsAddend) // First build a list of all cache keys that we are actually going to hit. GenerateCacheKey() // returns an empty string in the key if there is no limit so that we can keep the arrays @@ -178,7 +164,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( emissionInterval := period / limit increment := emissionInterval * quantity - tat := maxInt64(tats[i], arrivedAt) + tat := utils.MaxInt64(tats[i], arrivedAt) newTat := tat + increment delayVariationTolerance := limit * emissionInterval previousAllowAt := tat - delayVariationTolerance @@ -186,7 +172,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( diff := arrivedAt - allowAt limitRemaining := int64(math.Ceil(float64((arrivedAt - allowAt) / emissionInterval))) previousLimitRemaining := int64(math.Ceil(float64((arrivedAt - previousAllowAt) / emissionInterval))) - previousLimitRemaining = maxInt64(previousLimitRemaining, 0) + previousLimitRemaining = utils.MaxInt64(previousLimitRemaining, 0) nearLimitWindow := int64(math.Ceil(float64(float32(limits[i].Limit.RequestsPerUnit) * (1.0 - this.nearLimitRatio)))) if diff < 0 { @@ -199,7 +185,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } limits[i].Stats.OverLimit.Add(uint64(quantity - previousLimitRemaining)) - limits[i].Stats.NearLimit.Add(uint64(minInt64(previousLimitRemaining, nearLimitWindow))) + limits[i].Stats.NearLimit.Add(uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow))) if this.localCache != nil { err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(nanosecondsToSeconds(-diff))) @@ -218,7 +204,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( DurationUntilReset: nanosecondsToDuration(newTat - arrivedAt), } - hitNearLimit := quantity - (maxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) + hitNearLimit := quantity - (utils.MaxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) if hitNearLimit > 0 { limits[i].Stats.NearLimit.Add(uint64(hitNearLimit)) } diff --git a/src/utils/utilities.go b/src/utils/utilities.go index e6029f5be..6a801af5b 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -29,13 +29,21 @@ func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { panic("should not get here") } -func CalculateReset(currentLimit *pb.RateLimitResponse_RateLimit, timeSource TimeSource) *duration.Duration { - sec := UnitToDivider(currentLimit.Unit) - now := timeSource.UnixNow() - return &duration.Duration{Seconds: sec - now%sec} +func MaxInt64(a int64, b int64) int64 { + if a > b { + return a + } + return b +} + +func MinInt64(a int64, b int64) int64 { + if a < b { + return a + } + return b } -func Max(a uint32, b uint32) uint32 { +func MaxUint32(a uint32, b uint32) uint32 { if a > b { return a } From b07edffc798f8d66597df0ff2be483b1de280c25 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Mon, 7 Dec 2020 22:09:41 +0700 Subject: [PATCH 11/31] add more explanation on rolling window implementation Signed-off-by: William Albertus Dembo --- src/redis/windowed_cache_impl.go | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index d8bfbae58..040bcf4c6 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -15,6 +15,15 @@ import ( "golang.org/x/net/context" ) +// This rolling window limit implemented using Generic Cell Rate Algorithm (GCRA) +// GCRA works by tracking remaining limit through a time called the “theoretical arrival time” (TAT). +// Request cost is represented as a multiplier of “emission interval”, which is derived from the duration of equally spread request. +// TAT is seeded by the current request arrival if not set then add the request costs. +// Subtract the window duration from TAT to get the time to allow a request +// Requests are allowed if the time to allow a request is in the past +// Store the TAT for next process +// https://blog.ian.stapletoncordas.co/2018/12/understanding-generic-cell-rate-limiting.html + type windowedRateLimitCacheImpl struct { client Client // Optional Client for a dedicated cache of per second limits. @@ -51,6 +60,7 @@ func windowedPipelineAppend(client Client, pipeline *Pipeline, key string, resul *pipeline = client.PipeAppend(*pipeline, result, "GET", key) } +// store new tat (Theoretical arrival time) func windowedSetNewTatPipelineAppend(client Client, pipeline *Pipeline, key string, newTat int64, expirationSeconds int64) { *pipeline = client.PipeAppend(*pipeline, nil, "SET", key, newTat) *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) @@ -127,7 +137,6 @@ func (this *windowedRateLimitCacheImpl) DoLimit( perSecondPipeline = nil } - // Rate limit GCRA logic responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) now := this.timeSource.UnixNanoNow() for i, cacheKey := range cacheKeys { @@ -162,18 +171,22 @@ func (this *windowedRateLimitCacheImpl) DoLimit( quantity := int64(hitsAddend) arrivedAt := now + // GCRA computation + // Emission interval is the cost of each request emissionInterval := period / limit - increment := emissionInterval * quantity + // Tat is set to current request timestamp if not set before tat := utils.MaxInt64(tats[i], arrivedAt) - newTat := tat + increment - delayVariationTolerance := limit * emissionInterval - previousAllowAt := tat - delayVariationTolerance - allowAt := newTat - delayVariationTolerance + // New tat define the end of the window + newTat := tat + emissionInterval*quantity + // We allow the request if it's inside the window + allowAt := newTat - period diff := arrivedAt - allowAt - limitRemaining := int64(math.Ceil(float64((arrivedAt - allowAt) / emissionInterval))) + + previousAllowAt := tat - period previousLimitRemaining := int64(math.Ceil(float64((arrivedAt - previousAllowAt) / emissionInterval))) previousLimitRemaining = utils.MaxInt64(previousLimitRemaining, 0) nearLimitWindow := int64(math.Ceil(float64(float32(limits[i].Limit.RequestsPerUnit) * (1.0 - this.nearLimitRatio)))) + limitRemaining := int64(math.Ceil(float64(diff / emissionInterval))) if diff < 0 { responseDescriptorStatuses[i] = @@ -209,7 +222,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( limits[i].Stats.NearLimit.Add(uint64(hitNearLimit)) } - // Store newTat + // Store new tat for initial tat of next requests expirationSeconds := nanosecondsToSeconds(newTat-arrivedAt) + 1 if this.expirationJitterMaxSeconds > 0 { expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) From 48446cfe7258c95a4202c09d2938b433e45a3490 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Mon, 7 Dec 2020 22:21:23 +0700 Subject: [PATCH 12/31] throw error when rate limit cache type is unknown Signed-off-by: William Albertus Dembo --- src/redis/cache_impl.go | 15 +++++++-------- src/service_cmd/runner/runner.go | 12 +++++++++++- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index c7e2b84c4..00640661e 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,16 +1,16 @@ package redis import ( + "fmt" "math/rand" "github.com/coocood/freecache" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/server" "github.com/envoyproxy/ratelimit/src/settings" - logger "github.com/sirupsen/logrus" ) -func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache { +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) (limiter.RateLimitCache, error) { var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, @@ -28,8 +28,9 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca jitterRand, expirationJitterMaxSeconds, localCache, - s.NearLimitRatio) - } else if s.RateLimitAlgorithm == settings.WindowedRateLimit { + s.NearLimitRatio), nil + } + if s.RateLimitAlgorithm == settings.WindowedRateLimit { return NewWindowedRateLimitCacheImpl( otherPool, perSecondPool, @@ -37,9 +38,7 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca jitterRand, expirationJitterMaxSeconds, localCache, - s.NearLimitRatio) - } else { - logger.Fatalf("Unknown rate limit algorithm. %s\n", s.RateLimitAlgorithm) + s.NearLimitRatio), nil } - return nil + return nil, fmt.Errorf("Unknown rate limit algorithm. %s\n", s.RateLimitAlgorithm) } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 7bde5e9a6..a2998f43f 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -85,10 +85,20 @@ func (runner *Runner) Run() { } srv := server.NewServer("ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) + rateLimitCache, err := redis.NewRateLimiterCacheImplFromSettings( + s, + localCache, + srv, + limiter.NewTimeSourceImpl(), + rand.New(limiter.NewLockedSource(time.Now().Unix())), + s.ExpirationJitterMaxSeconds) + if err != nil { + logger.Fatalf("Could not setup ratelimit cache. %v\n", err) + } service := ratelimit.NewService( srv.Runtime(), - createLimiter(srv, s, localCache), + rateLimitCache, config.NewRateLimitConfigLoaderImpl(), srv.Scope().Scope("service"), s.RuntimeWatchRoot, From 927b23ed5529828f4813176ecc165ba291a6ea93 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Thu, 31 Dec 2020 13:03:43 +0700 Subject: [PATCH 13/31] move time conversion related to utils Signed-off-by: William Albertus Dembo --- src/redis/windowed_cache_impl.go | 27 ++++------------- src/utils/time.go | 52 ++++++++------------------------ 2 files changed, 18 insertions(+), 61 deletions(-) diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index 040bcf4c6..8fe0f2a30 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -39,21 +39,6 @@ type windowedRateLimitCacheImpl struct { nearLimitRatio float32 } -func nanosecondsToDuration(nanoseconds int64) *duration.Duration { - nanos := nanoseconds - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration.Duration{Seconds: secs, Nanos: int32(nanos)} -} - -func secondsToNanoseconds(second int64) int64 { - return second * 1e9 -} - -func nanosecondsToSeconds(nanoseconds int64) int64 { - return nanoseconds / 1e9 -} - func windowedPipelineAppend(client Client, pipeline *Pipeline, key string, result *int64, expirationSeconds int64) { *pipeline = client.PipeAppend(*pipeline, nil, "SETNX", key, int64(0)) *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) @@ -152,7 +137,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( if isOverLimitWithLocalCache[i] { secondsToReset := utils.UnitToDivider(limits[i].Limit.Unit) - secondsToReset -= nanosecondsToSeconds(now) % secondsToReset + secondsToReset -= utils.NanosecondsToSeconds(now) % secondsToReset responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OVER_LIMIT, @@ -167,7 +152,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( // Time during computation should be in nanosecond limit := int64(limits[i].Limit.RequestsPerUnit) - period := secondsToNanoseconds(utils.UnitToDivider(limits[i].Limit.Unit)) + period := utils.SecondsToNanoseconds(utils.UnitToDivider(limits[i].Limit.Unit)) quantity := int64(hitsAddend) arrivedAt := now @@ -194,14 +179,14 @@ func (this *windowedRateLimitCacheImpl) DoLimit( Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[i].Limit, LimitRemaining: 0, - DurationUntilReset: nanosecondsToDuration(int64(math.Ceil(float64(tat - arrivedAt)))), + DurationUntilReset: utils.NanosecondsToDuration(int64(math.Ceil(float64(tat - arrivedAt)))), } limits[i].Stats.OverLimit.Add(uint64(quantity - previousLimitRemaining)) limits[i].Stats.NearLimit.Add(uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow))) if this.localCache != nil { - err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(nanosecondsToSeconds(-diff))) + err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(utils.NanosecondsToSeconds(-diff))) if err != nil { logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) } @@ -214,7 +199,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( Code: pb.RateLimitResponse_OK, CurrentLimit: limits[i].Limit, LimitRemaining: uint32(limitRemaining), - DurationUntilReset: nanosecondsToDuration(newTat - arrivedAt), + DurationUntilReset: utils.NanosecondsToDuration(newTat - arrivedAt), } hitNearLimit := quantity - (utils.MaxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) @@ -223,7 +208,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } // Store new tat for initial tat of next requests - expirationSeconds := nanosecondsToSeconds(newTat-arrivedAt) + 1 + expirationSeconds := utils.NanosecondsToSeconds(newTat-arrivedAt) + 1 if this.expirationJitterMaxSeconds > 0 { expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) } diff --git a/src/utils/time.go b/src/utils/time.go index f3fdf53d3..0d6a286bd 100644 --- a/src/utils/time.go +++ b/src/utils/time.go @@ -1,52 +1,24 @@ package utils import ( - "math/rand" - "sync" + "github.com/golang/protobuf/ptypes/duration" "time" ) -// Interface for a rand Source for expiration jitter. -type JitterRandSource interface { - // @return a non-negative pseudo-random 63-bit integer as an int64. - Int63() int64 - // @param seed initializes pseudo-random generator to a deterministic state. - Seed(seed int64) -} - -type timeSourceImpl struct{} - -func NewTimeSourceImpl() TimeSource { - return &timeSourceImpl{} -} - -func (this *timeSourceImpl) UnixNow() int64 { - return time.Now().Unix() -} - -func (this *timeSourceImpl) UnixNanoNow() int64 { - return time.Now().UnixNano() -} - -// rand for jitter. -type lockedSource struct { - lk sync.Mutex - src rand.Source -} +const secondToNanosecondRate = 1e9 -func NewLockedSource(seed int64) JitterRandSource { - return &lockedSource{src: rand.NewSource(seed)} +func NanosecondsToSeconds(nanoseconds int64) int64 { + return nanoseconds / secondToNanosecondRate } -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return +func NanosecondsToDuration(nanoseconds int64) *duration.Duration { + nanos := nanoseconds + secs := nanos / secondToNanosecondRate + nanos -= secs * secondToNanosecondRate + return &duration.Duration{Seconds: secs, Nanos: int32(nanos)} } -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() +func SecondsToNanoseconds(second int64) int64 { + time.Now() + return second * secondToNanosecondRate } From f0d3c8b3e649d238c1a6140d81022bb0bddd60d8 Mon Sep 17 00:00:00 2001 From: William Albertus Dembo Date: Tue, 5 Jan 2021 16:52:59 +0700 Subject: [PATCH 14/31] move pipeline and cache key method from cache implementation Signed-off-by: William Albertus Dembo --- go.mod | 3 +- go.sum | 2 + src/algorithm/fixed_window.go | 30 ++ src/algorithm/ratelimit_algorithm.go | 13 + src/algorithm/rolling_window.go | 32 ++ src/redis/cache_impl.go | 21 +- src/redis/{ => driver}/driver.go | 2 +- src/redis/{ => driver}/driver_impl.go | 10 +- src/redis/fixed_cache_impl.go | 46 ++- src/redis/windowed_cache_impl.go | 42 ++- src/service/ratelimit.go | 4 +- test/mocks/algorithm/ratelimit_algorithm.go | 65 ++++ test/mocks/redis/{ => driver}/redis.go | 104 +++--- test/redis/bench_test.go | 11 +- test/redis/driver_impl_test.go | 27 +- test/redis/fixed_cache_impl_test.go | 350 +++++++++++++------- test/redis/windowed_cache_impl_test.go | 210 ++++++++---- test/service/ratelimit_legacy_test.go | 4 +- test/service/ratelimit_test.go | 4 +- 19 files changed, 670 insertions(+), 310 deletions(-) create mode 100644 src/algorithm/fixed_window.go create mode 100644 src/algorithm/ratelimit_algorithm.go create mode 100644 src/algorithm/rolling_window.go rename src/redis/{ => driver}/driver.go (99%) rename src/redis/{ => driver}/driver_impl.go (95%) create mode 100644 test/mocks/algorithm/ratelimit_algorithm.go rename test/mocks/redis/{ => driver}/redis.go (71%) diff --git a/go.mod b/go.mod index ed885178a..5b5db3eb7 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/coocood/freecache v1.1.0 github.com/envoyproxy/go-control-plane v0.9.7 github.com/fsnotify/fsnotify v1.4.7 // indirect - github.com/golang/mock v1.4.1 + github.com/golang/mock v1.4.4 github.com/golang/protobuf v1.4.2 github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 github.com/kavu/go_reuseport v1.2.0 @@ -26,4 +26,5 @@ require ( google.golang.org/grpc v1.27.0 google.golang.org/protobuf v1.25.0 // indirect gopkg.in/yaml.v2 v2.3.0 + rsc.io/quote/v3 v3.1.0 // indirect ) diff --git a/go.sum b/go.sum index f8b59961e..a404d023e 100644 --- a/go.sum +++ b/go.sum @@ -35,6 +35,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= diff --git a/src/algorithm/fixed_window.go b/src/algorithm/fixed_window.go new file mode 100644 index 000000000..9220cf3d4 --- /dev/null +++ b/src/algorithm/fixed_window.go @@ -0,0 +1,30 @@ +package algorithm + +import ( + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" +) + +type FixedWindowImpl struct { + now int64 + cacheKeyGenerator limiter.CacheKeyGenerator +} + +func (this *FixedWindowImpl) GenerateCacheKey(domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit) limiter.CacheKey { + return this.cacheKeyGenerator.GenerateCacheKey(domain, descriptor, limit, this.now) +} + +func (this *FixedWindowImpl) AppendPipeline(client redis_driver.Client, pipeline redis_driver.Pipeline, key string, hitsAddend uint32, result interface{}, expirationSeconds int64) redis_driver.Pipeline { + pipeline = client.PipeAppend(pipeline, result, "INCRBY", key, hitsAddend) + pipeline = client.PipeAppend(pipeline, nil, "EXPIRE", key, expirationSeconds) + return pipeline +} + +func NewFixedWindowAlgorithm(timeSource limiter.TimeSource) *FixedWindowImpl { + return &FixedWindowImpl{ + now: timeSource.UnixNanoNow(), + cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + } +} diff --git a/src/algorithm/ratelimit_algorithm.go b/src/algorithm/ratelimit_algorithm.go new file mode 100644 index 000000000..aabdd4049 --- /dev/null +++ b/src/algorithm/ratelimit_algorithm.go @@ -0,0 +1,13 @@ +package algorithm + +import ( + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" +) + +type RatelimitAlgorithm interface { + GenerateCacheKey(domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit) limiter.CacheKey + AppendPipeline(client redis_driver.Client, pipeline redis_driver.Pipeline, key string, hitsAddend uint32, result interface{}, expirationSeconds int64) redis_driver.Pipeline +} diff --git a/src/algorithm/rolling_window.go b/src/algorithm/rolling_window.go new file mode 100644 index 000000000..a6b82c67c --- /dev/null +++ b/src/algorithm/rolling_window.go @@ -0,0 +1,32 @@ +package algorithm + +import ( + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" +) + +const DummyCacheKeyTime = 0 + +type RollingWindowImpl struct { + now int64 + cacheKeyGenerator limiter.CacheKeyGenerator +} + +func (this *RollingWindowImpl) GenerateCacheKey(domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit) limiter.CacheKey { + return this.cacheKeyGenerator.GenerateCacheKey(domain, descriptor, limit, DummyCacheKeyTime) +} + +func (this *RollingWindowImpl) AppendPipeline(client redis_driver.Client, pipeline redis_driver.Pipeline, key string, hitsAddend uint32, result interface{}, expirationSeconds int64) redis_driver.Pipeline { + pipeline = client.PipeAppend(pipeline, nil, "SETNX", key, int64(0)) + pipeline = client.PipeAppend(pipeline, nil, "EXPIRE", key, expirationSeconds) + pipeline = client.PipeAppend(pipeline, result, "GET", key) + return pipeline +} + +func NewRollingWindowAlgorithm() *RollingWindowImpl { + return &RollingWindowImpl{ + cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + } +} diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 00640661e..02a8b07b1 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -5,22 +5,28 @@ import ( "math/rand" "github.com/coocood/freecache" + + "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/server" "github.com/envoyproxy/ratelimit/src/settings" ) func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) (limiter.RateLimitCache, error) { - var perSecondPool Client + var perSecondPool driver.Client if s.RedisPerSecond { - perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, + perSecondPool = driver.NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) } - var otherPool Client - otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisType, s.RedisUrl, s.RedisPoolSize, + var otherPool driver.Client + otherPool = driver.NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisType, s.RedisUrl, s.RedisPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) if s.RateLimitAlgorithm == settings.FixedRateLimit { + ratelimitAlgorithm := algorithm.NewFixedWindowAlgorithm( + timeSource, + ) return NewFixedRateLimitCacheImpl( otherPool, perSecondPool, @@ -28,9 +34,11 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca jitterRand, expirationJitterMaxSeconds, localCache, - s.NearLimitRatio), nil + s.NearLimitRatio, + ratelimitAlgorithm), nil } if s.RateLimitAlgorithm == settings.WindowedRateLimit { + ratelimitAlgorithm := algorithm.NewRollingWindowAlgorithm() return NewWindowedRateLimitCacheImpl( otherPool, perSecondPool, @@ -38,7 +46,8 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca jitterRand, expirationJitterMaxSeconds, localCache, - s.NearLimitRatio), nil + s.NearLimitRatio, + ratelimitAlgorithm), nil } return nil, fmt.Errorf("Unknown rate limit algorithm. %s\n", s.RateLimitAlgorithm) } diff --git a/src/redis/driver.go b/src/redis/driver/driver.go similarity index 99% rename from src/redis/driver.go rename to src/redis/driver/driver.go index 7ffc0c7b7..70bb8bfdd 100644 --- a/src/redis/driver.go +++ b/src/redis/driver/driver.go @@ -1,4 +1,4 @@ -package redis +package driver import "github.com/mediocregopher/radix/v3" diff --git a/src/redis/driver_impl.go b/src/redis/driver/driver_impl.go similarity index 95% rename from src/redis/driver_impl.go rename to src/redis/driver/driver_impl.go index 18e213f1b..cf563207c 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver/driver_impl.go @@ -1,4 +1,4 @@ -package redis +package driver import ( "crypto/tls" @@ -46,7 +46,7 @@ type clientImpl struct { implicitPipelining bool } -func checkError(err error) { +func CheckError(err error) { if err != nil { panic(RedisError(err.Error())) } @@ -114,13 +114,13 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string panic(RedisError("Unrecognized redis type " + redisType)) } - checkError(err) + CheckError(err) // Check if connection is good var pingResponse string - checkError(client.Do(radix.Cmd(&pingResponse, "PING"))) + CheckError(client.Do(radix.Cmd(&pingResponse, "PING"))) if pingResponse != "PONG" { - checkError(fmt.Errorf("connecting redis error: %s", pingResponse)) + CheckError(fmt.Errorf("connecting redis error: %s", pingResponse)) } return &clientImpl{ diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index ab5e44113..dce625219 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -5,29 +5,33 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/algorithm" + "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/utils" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) type fixedRateLimitCacheImpl struct { - client Client + client driver.Client // Optional Client for a dedicated cache of per second limits. // If this client is nil, then the Cache will use the client for all // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. - perSecondClient Client + perSecondClient driver.Client timeSource limiter.TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 cacheKeyGenerator limiter.CacheKeyGenerator localCache *freecache.Cache nearLimitRatio float32 + algorithm algorithm.RatelimitAlgorithm } -func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) { +func pipelineAppend(client driver.Client, pipeline *driver.Pipeline, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) { *pipeline = client.PipeAppend(*pipeline, result, "INCRBY", key, hitsAddend) *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) } @@ -47,10 +51,9 @@ func (this *fixedRateLimitCacheImpl) DoLimit( // all the same size. assert.Assert(len(request.Descriptors) == len(limits)) cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) - now := this.timeSource.UnixNow() for i := 0; i < len(request.Descriptors); i++ { - cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey( - request.Domain, request.Descriptors[i], limits[i], now) + cacheKeys[i] = this.algorithm.GenerateCacheKey( + request.Domain, request.Descriptors[i], limits[i]) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { @@ -60,7 +63,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) results := make([]uint32, len(request.Descriptors)) - var pipeline, perSecondPipeline Pipeline + var pipeline, perSecondPipeline driver.Pipeline // Now, actually setup the pipeline, skipping empty cache keys. for i, cacheKey := range cacheKeys { @@ -85,22 +88,24 @@ func (this *fixedRateLimitCacheImpl) DoLimit( // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. if this.perSecondClient != nil && cacheKey.PerSecond { if perSecondPipeline == nil { - perSecondPipeline = Pipeline{} + perSecondPipeline = driver.Pipeline{} } - pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + perSecondPipeline = this.algorithm.AppendPipeline(this.perSecondClient, perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + //pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) } else { if pipeline == nil { - pipeline = Pipeline{} + pipeline = driver.Pipeline{} } - pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + pipeline = this.algorithm.AppendPipeline(this.client, pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + //pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) } } if pipeline != nil { - checkError(this.client.PipeDo(pipeline)) + driver.CheckError(this.client.PipeDo(pipeline)) } if perSecondPipeline != nil { - checkError(this.perSecondClient.PipeDo(perSecondPipeline)) + driver.CheckError(this.perSecondClient.PipeDo(perSecondPipeline)) } // Now fetch the pipeline. @@ -182,11 +187,16 @@ func (this *fixedRateLimitCacheImpl) DoLimit( // Flush() is a no-op with redis since quota reads and updates happen synchronously. func (this *fixedRateLimitCacheImpl) Flush() {} -func NewFixedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource utils.TimeSource, - jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32) limiter.RateLimitCache { +func NewFixedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ - client: client, - perSecondClient: perSecondClient, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio), + client: client, + perSecondClient: perSecondClient, + timeSource: timeSource, + jitterRand: jitterRand, + expirationJitterMaxSeconds: expirationJitterMaxSeconds, + cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + localCache: localCache, + nearLimitRatio: nearLimitRatio, + algorithm: algorithm, } } diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index 8fe0f2a30..2a6d78fe5 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -6,9 +6,11 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/utils" "github.com/golang/protobuf/ptypes/duration" logger "github.com/sirupsen/logrus" @@ -25,28 +27,29 @@ import ( // https://blog.ian.stapletoncordas.co/2018/12/understanding-generic-cell-rate-limiting.html type windowedRateLimitCacheImpl struct { - client Client + client driver.Client // Optional Client for a dedicated cache of per second limits. // If this client is nil, then the Cache will use the client for all // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. - perSecondClient Client + perSecondClient driver.Client timeSource limiter.TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 cacheKeyGenerator limiter.CacheKeyGenerator localCache *freecache.Cache nearLimitRatio float32 + algorithm algorithm.RatelimitAlgorithm } -func windowedPipelineAppend(client Client, pipeline *Pipeline, key string, result *int64, expirationSeconds int64) { +func windowedPipelineAppend(client driver.Client, pipeline *driver.Pipeline, key string, result *int64, expirationSeconds int64) { *pipeline = client.PipeAppend(*pipeline, nil, "SETNX", key, int64(0)) *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) *pipeline = client.PipeAppend(*pipeline, result, "GET", key) } // store new tat (Theoretical arrival time) -func windowedSetNewTatPipelineAppend(client Client, pipeline *Pipeline, key string, newTat int64, expirationSeconds int64) { +func windowedSetNewTatPipelineAppend(client driver.Client, pipeline *driver.Pipeline, key string, newTat int64, expirationSeconds int64) { *pipeline = client.PipeAppend(*pipeline, nil, "SET", key, newTat) *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) } @@ -67,8 +70,8 @@ func (this *windowedRateLimitCacheImpl) DoLimit( assert.Assert(len(request.Descriptors) == len(limits)) cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) for i := 0; i < len(request.Descriptors); i++ { - cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey( - request.Domain, request.Descriptors[i], limits[i], 0) + cacheKeys[i] = this.algorithm.GenerateCacheKey( + request.Domain, request.Descriptors[i], limits[i]) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { @@ -79,7 +82,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( // Get existing tat value for each cache keys isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) tats := make([]int64, len(request.Descriptors)) - var pipeline, perSecondPipeline Pipeline + var pipeline, perSecondPipeline driver.Pipeline for i, cacheKey := range cacheKeys { if cacheKey.Key == "" { continue @@ -102,23 +105,25 @@ func (this *windowedRateLimitCacheImpl) DoLimit( // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. if this.perSecondClient != nil && cacheKey.PerSecond { if perSecondPipeline == nil { - perSecondPipeline = Pipeline{} + perSecondPipeline = driver.Pipeline{} } - windowedPipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, &tats[i], expirationSeconds) + perSecondPipeline = this.algorithm.AppendPipeline(this.perSecondClient, perSecondPipeline, cacheKey.Key, hitsAddend, &tats[i], expirationSeconds) + //windowedPipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, &tats[i], expirationSeconds) } else { if pipeline == nil { - pipeline = Pipeline{} + pipeline = driver.Pipeline{} } - windowedPipelineAppend(this.client, &pipeline, cacheKey.Key, &tats[i], expirationSeconds) + pipeline = this.algorithm.AppendPipeline(this.client, pipeline, cacheKey.Key, hitsAddend, &tats[i], expirationSeconds) + //windowedPipelineAppend(this.client, &pipeline, cacheKey.Key, &tats[i], expirationSeconds) } } if pipeline != nil { - checkError(this.client.PipeDo(pipeline)) + driver.CheckError(this.client.PipeDo(pipeline)) pipeline = nil } if perSecondPipeline != nil { - checkError(this.perSecondClient.PipeDo(perSecondPipeline)) + driver.CheckError(this.perSecondClient.PipeDo(perSecondPipeline)) perSecondPipeline = nil } @@ -214,26 +219,26 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } if this.perSecondClient != nil && cacheKey.PerSecond { if perSecondPipeline == nil { - perSecondPipeline = Pipeline{} + perSecondPipeline = driver.Pipeline{} } windowedSetNewTatPipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, newTat, expirationSeconds) } else { if pipeline == nil { - pipeline = Pipeline{} + pipeline = driver.Pipeline{} } windowedSetNewTatPipelineAppend(this.client, &pipeline, cacheKey.Key, newTat, expirationSeconds) } } if pipeline != nil { - checkError(this.client.PipeDo(pipeline)) + driver.CheckError(this.client.PipeDo(pipeline)) } if perSecondPipeline != nil { - checkError(this.perSecondClient.PipeDo(perSecondPipeline)) + driver.CheckError(this.perSecondClient.PipeDo(perSecondPipeline)) } return responseDescriptorStatuses } -func NewWindowedRateLimitCacheImpl(client Client, perSecondClient Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32) limiter.RateLimitCache { +func NewWindowedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { return &windowedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, @@ -243,5 +248,6 @@ func NewWindowedRateLimitCacheImpl(client Client, perSecondClient Client, timeSo cacheKeyGenerator: limiter.NewCacheKeyGenerator(), localCache: localCache, nearLimitRatio: nearLimitRatio, + algorithm: algorithm, } } diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 126bb776b..5442f8e2d 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -9,7 +9,7 @@ import ( "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" - "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/lyft/goruntime/loader" stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" @@ -168,7 +168,7 @@ func (this *service) ShouldRateLimit( logger.Debugf("caught error during call") finalResponse = nil switch t := err.(type) { - case redis.RedisError: + case driver.RedisError: { this.stats.shouldRateLimit.redisError.Inc() finalError = t diff --git a/test/mocks/algorithm/ratelimit_algorithm.go b/test/mocks/algorithm/ratelimit_algorithm.go new file mode 100644 index 000000000..91ddc7182 --- /dev/null +++ b/test/mocks/algorithm/ratelimit_algorithm.go @@ -0,0 +1,65 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./src/algorithm/ratelimit_algorithm.go + +// Package mock_algorithm is a generated GoMock package. +package mock_algorithm + +import ( + envoy_extensions_common_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + config "github.com/envoyproxy/ratelimit/src/config" + limiter "github.com/envoyproxy/ratelimit/src/limiter" + driver "github.com/envoyproxy/ratelimit/src/redis/driver" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockRatelimitAlgorithm is a mock of RatelimitAlgorithm interface +type MockRatelimitAlgorithm struct { + ctrl *gomock.Controller + recorder *MockRatelimitAlgorithmMockRecorder +} + +// MockRatelimitAlgorithmMockRecorder is the mock recorder for MockRatelimitAlgorithm +type MockRatelimitAlgorithmMockRecorder struct { + mock *MockRatelimitAlgorithm +} + +// NewMockRatelimitAlgorithm creates a new mock instance +func NewMockRatelimitAlgorithm(ctrl *gomock.Controller) *MockRatelimitAlgorithm { + mock := &MockRatelimitAlgorithm{ctrl: ctrl} + mock.recorder = &MockRatelimitAlgorithmMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRatelimitAlgorithm) EXPECT() *MockRatelimitAlgorithmMockRecorder { + return m.recorder +} + +// GenerateCacheKey mocks base method +func (m *MockRatelimitAlgorithm) GenerateCacheKey(domain string, descriptor *envoy_extensions_common_ratelimit_v3.RateLimitDescriptor, limit *config.RateLimit) limiter.CacheKey { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GenerateCacheKey", domain, descriptor, limit) + ret0, _ := ret[0].(limiter.CacheKey) + return ret0 +} + +// GenerateCacheKey indicates an expected call of GenerateCacheKey +func (mr *MockRatelimitAlgorithmMockRecorder) GenerateCacheKey(domain, descriptor, limit interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateCacheKey", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).GenerateCacheKey), domain, descriptor, limit) +} + +// AppendPipeline mocks base method +func (m *MockRatelimitAlgorithm) AppendPipeline(client driver.Client, pipeline driver.Pipeline, key string, hitsAddend uint32, result interface{}, expirationSeconds int64) driver.Pipeline { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppendPipeline", client, pipeline, key, hitsAddend, result, expirationSeconds) + ret0, _ := ret[0].(driver.Pipeline) + return ret0 +} + +// AppendPipeline indicates an expected call of AppendPipeline +func (mr *MockRatelimitAlgorithmMockRecorder) AppendPipeline(client, pipeline, key, hitsAddend, result, expirationSeconds interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendPipeline", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).AppendPipeline), client, pipeline, key, hitsAddend, result, expirationSeconds) +} diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/driver/redis.go similarity index 71% rename from test/mocks/redis/redis.go rename to test/mocks/redis/driver/redis.go index 032b500dc..e9de3ffd4 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/driver/redis.go @@ -1,11 +1,11 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/envoyproxy/ratelimit/src/redis (interfaces: Client) +// Source: github.com/envoyproxy/ratelimit/src/redis/driver (interfaces: Client) -// Package mock_redis is a generated GoMock package. -package mock_redis +// Package mock_driver is a generated GoMock package. +package mock_driver import ( - redis "github.com/envoyproxy/ratelimit/src/redis" + driver "github.com/envoyproxy/ratelimit/src/redis/driver" gomock "github.com/golang/mock/gomock" reflect "reflect" ) @@ -33,51 +33,70 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder { return m.recorder } -// Close mocks base method -func (m *MockClient) Close() error { +// DoCmd mocks base method +func (m *MockClient) DoCmd(rcv interface{}, cmd, key string, args ...interface{}) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") + varargs := []interface{}{rcv, cmd, key} + for _, a := range args { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DoCmd", varargs...) ret0, _ := ret[0].(error) return ret0 } -// Close indicates an expected call of Close -func (mr *MockClientMockRecorder) Close() *gomock.Call { +// DoCmd indicates an expected call of DoCmd +func (mr *MockClientMockRecorder) DoCmd(rcv, cmd, key interface{}, args ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) + varargs := append([]interface{}{rcv, cmd, key}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoCmd", reflect.TypeOf((*MockClient)(nil).DoCmd), varargs...) } -// DoCmd mocks base method -func (m *MockClient) DoCmd(arg0 interface{}, arg1, arg2 string, arg3 ...interface{}) error { +// PipeAppend mocks base method +func (m *MockClient) PipeAppend(pipeline driver.Pipeline, rcv interface{}, cmd, key string, args ...interface{}) driver.Pipeline { m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { + varargs := []interface{}{pipeline, rcv, cmd, key} + for _, a := range args { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "DoCmd", varargs...) + ret := m.ctrl.Call(m, "PipeAppend", varargs...) + ret0, _ := ret[0].(driver.Pipeline) + return ret0 +} + +// PipeAppend indicates an expected call of PipeAppend +func (mr *MockClientMockRecorder) PipeAppend(pipeline, rcv, cmd, key interface{}, args ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{pipeline, rcv, cmd, key}, args...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PipeAppend", reflect.TypeOf((*MockClient)(nil).PipeAppend), varargs...) +} + +// PipeDo mocks base method +func (m *MockClient) PipeDo(pipeline driver.Pipeline) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PipeDo", pipeline) ret0, _ := ret[0].(error) return ret0 } -// DoCmd indicates an expected call of DoCmd -func (mr *MockClientMockRecorder) DoCmd(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { +// PipeDo indicates an expected call of PipeDo +func (mr *MockClientMockRecorder) PipeDo(pipeline interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoCmd", reflect.TypeOf((*MockClient)(nil).DoCmd), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PipeDo", reflect.TypeOf((*MockClient)(nil).PipeDo), pipeline) } -// ImplicitPipeliningEnabled mocks base method -func (m *MockClient) ImplicitPipeliningEnabled() bool { +// Close mocks base method +func (m *MockClient) Close() error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImplicitPipeliningEnabled") - ret0, _ := ret[0].(bool) + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) return ret0 } -// ImplicitPipeliningEnabled indicates an expected call of ImplicitPipeliningEnabled -func (mr *MockClientMockRecorder) ImplicitPipeliningEnabled() *gomock.Call { +// Close indicates an expected call of Close +func (mr *MockClientMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImplicitPipeliningEnabled", reflect.TypeOf((*MockClient)(nil).ImplicitPipeliningEnabled)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) } // NumActiveConns mocks base method @@ -94,35 +113,16 @@ func (mr *MockClientMockRecorder) NumActiveConns() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveConns", reflect.TypeOf((*MockClient)(nil).NumActiveConns)) } -// PipeAppend mocks base method -func (m *MockClient) PipeAppend(arg0 redis.Pipeline, arg1 interface{}, arg2, arg3 string, arg4 ...interface{}) redis.Pipeline { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2, arg3} - for _, a := range arg4 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PipeAppend", varargs...) - ret0, _ := ret[0].(redis.Pipeline) - return ret0 -} - -// PipeAppend indicates an expected call of PipeAppend -func (mr *MockClientMockRecorder) PipeAppend(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PipeAppend", reflect.TypeOf((*MockClient)(nil).PipeAppend), varargs...) -} - -// PipeDo mocks base method -func (m *MockClient) PipeDo(arg0 redis.Pipeline) error { +// ImplicitPipeliningEnabled mocks base method +func (m *MockClient) ImplicitPipeliningEnabled() bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PipeDo", arg0) - ret0, _ := ret[0].(error) + ret := m.ctrl.Call(m, "ImplicitPipeliningEnabled") + ret0, _ := ret[0].(bool) return ret0 } -// PipeDo indicates an expected call of PipeDo -func (mr *MockClientMockRecorder) PipeDo(arg0 interface{}) *gomock.Call { +// ImplicitPipeliningEnabled indicates an expected call of ImplicitPipeliningEnabled +func (mr *MockClientMockRecorder) ImplicitPipeliningEnabled() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PipeDo", reflect.TypeOf((*MockClient)(nil).PipeDo), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImplicitPipeliningEnabled", reflect.TypeOf((*MockClient)(nil).ImplicitPipeliningEnabled)) } diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 664158db5..153a5b210 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -10,8 +10,10 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" stats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/test/common" ) @@ -40,14 +42,17 @@ func BenchmarkParallelDoLimit(b *testing.B) { mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int, rateLimitAlgorithm string) func(*testing.B) { return func(b *testing.B) { statsStore := stats.NewStore(stats.NewNullSink(), false) - client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) + client := driver.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() var cache limiter.RateLimitCache + timeSource := limiter.NewTimeSourceImpl() if rateLimitAlgorithm == settings.FixedRateLimit { - cache = redis.NewFixedRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) + algorithmImpl := algorithm.NewFixedWindowAlgorithm(timeSource) + cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, algorithmImpl) } else if rateLimitAlgorithm == settings.WindowedRateLimit { - cache = redis.NewWindowedRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) + algorithmImpl := algorithm.NewRollingWindowAlgorithm() + cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, algorithmImpl) } else { b.Fatalf("unknown rate limit type %s", rateLimitAlgorithm) } diff --git a/test/redis/driver_impl_test.go b/test/redis/driver_impl_test.go index ab488e239..c1c5376ef 100644 --- a/test/redis/driver_impl_test.go +++ b/test/redis/driver_impl_test.go @@ -4,8 +4,9 @@ import ( "testing" "time" + "github.com/envoyproxy/ratelimit/src/redis/driver" + "github.com/alicebob/miniredis/v2" - "github.com/envoyproxy/ratelimit/src/redis" "github.com/lyft/gostats" "github.com/stretchr/testify/assert" ) @@ -35,8 +36,8 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit redisAuth := "123" statsStore := stats.NewStore(stats.NewNullSink(), false) - mkRedisClient := func(auth, addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, auth, "single", addr, 1, pipelineWindow, pipelineLimit) + mkRedisClient := func(auth, addr string) driver.Client { + return driver.NewClientImpl(statsStore, false, auth, "single", addr, 1, pipelineWindow, pipelineLimit) } t.Run("connection refused", func(t *testing.T) { @@ -50,7 +51,7 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit redisSrv := mustNewRedisServer() defer redisSrv.Close() - var client redis.Client + var client driver.Client assert.NotPanics(t, func() { client = mkRedisClient("", redisSrv.Addr()) }) @@ -102,8 +103,8 @@ func TestNewClientImpl(t *testing.T) { func TestDoCmd(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) - mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "single", addr, 1, 0, 0) + mkRedisClient := func(addr string) driver.Client { + return driver.NewClientImpl(statsStore, false, "", "single", addr, 1, 0, 0) } t.Run("SETGET ok", func(t *testing.T) { @@ -147,8 +148,8 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f return func(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) - mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", "single", addr, 1, pipelineWindow, pipelineLimit) + mkRedisClient := func(addr string) driver.Client { + return driver.NewClientImpl(statsStore, false, "", "single", addr, 1, pipelineWindow, pipelineLimit) } t.Run("SETGET ok", func(t *testing.T) { @@ -158,7 +159,7 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f client := mkRedisClient(redisSrv.Addr()) var res string - pipeline := redis.Pipeline{} + pipeline := driver.Pipeline{} pipeline = client.PipeAppend(pipeline, nil, "SET", "foo", "bar") pipeline = client.PipeAppend(pipeline, &res, "GET", "foo") @@ -174,10 +175,10 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f var res uint32 hits := uint32(1) - assert.Nil(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, &res, "INCRBY", "a", hits))) + assert.Nil(t, client.PipeDo(client.PipeAppend(driver.Pipeline{}, &res, "INCRBY", "a", hits))) assert.Equal(t, hits, res) - assert.Nil(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, &res, "INCRBY", "a", hits))) + assert.Nil(t, client.PipeDo(client.PipeAppend(driver.Pipeline{}, &res, "INCRBY", "a", hits))) assert.Equal(t, uint32(2), res) }) @@ -185,7 +186,7 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f redisSrv := mustNewRedisServer() client := mkRedisClient(redisSrv.Addr()) - assert.Nil(t, nil, client.PipeDo(client.PipeAppend(redis.Pipeline{}, nil, "SET", "foo", "bar"))) + assert.Nil(t, nil, client.PipeDo(client.PipeAppend(driver.Pipeline{}, nil, "SET", "foo", "bar"))) redisSrv.Close() @@ -194,7 +195,7 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f assert.Contains(t, err.Error(), "EOF") } - expectErrContainEOF(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, nil, "GET", "foo"))) + expectErrContainEOF(t, client.PipeDo(client.PipeAppend(driver.Pipeline{}, nil, "GET", "foo"))) }) } } diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 747cb9798..280ca7d09 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -4,30 +4,31 @@ import ( "testing" "github.com/coocood/freecache" + "github.com/golang/mock/gomock" + stats "github.com/lyft/gostats" "github.com/mediocregopher/radix/v3" + "github.com/stretchr/testify/assert" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" +<<<<<<< HEAD "github.com/envoyproxy/ratelimit/src/utils" stats "github.com/lyft/gostats" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" + mock_driver "github.com/envoyproxy/ratelimit/test/mocks/redis/driver" "math/rand" - - "github.com/envoyproxy/ratelimit/test/common" - mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" - mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" + "testing" +>>>>>>> move pipeline and cache key method from cache implementation ) -func TestRedis(t *testing.T) { t.Run("WithoutPerSecondRedis", testRedis(false)) t.Run("WithPerSecondRedis", testRedis(true)) } -func pipeAppend(pipeline redis.Pipeline, rcv interface{}, cmd, key string, args ...interface{}) redis.Pipeline { +func pipeAppend(pipeline redis_driver.Pipeline, rcv interface{}, cmd, key string, args ...interface{}) redis_driver.Pipeline { return append(pipeline, radix.FlatCmd(rcv, cmd, key, args...)) } @@ -37,31 +38,41 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { controller := gomock.NewController(t) defer controller.Finish() - client := mock_redis.NewMockClient(controller) - perSecondClient := mock_redis.NewMockClient(controller) - timeSource := mock_utils.NewMockTimeSource(controller) + client := mock_driver.NewMockClient(controller) + perSecondClient := mock_driver.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) var cache limiter.RateLimitCache if usePerSecondRedis { - cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) } else { - cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) } statsStore := stats.NewStore(stats.NewNullSink(), false) + domain := "domain" - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - var clientUsed *mock_redis.MockClient + var clientUsed *mock_driver.MockClient if usePerSecondRedis { clientUsed = perSecondClient } else { clientUsed = client } - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)).DoAndReturn(pipeAppend) + // Test 1 + request := common.NewRateLimitRequest(domain, [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key_value_1234", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_1234", uint32(1), gomock.Any(), int64(1)). + SetArg(4, uint32(5)). + Return(redis_driver.Pipeline{}) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -70,15 +81,9 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - clientUsed = client - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key2_value2_subkey2_subvalue2_1200", int64(60)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - + // Test 2 request = common.NewRateLimitRequest( - "domain", + domain, [][][2]string{ {{"key2", "value2"}}, {{"key2", "value2"}, {"subkey2", "subvalue2"}}, @@ -86,6 +91,24 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { limits = []*config.RateLimit{ nil, config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + + clientUsed = client + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[1], limits[1]).Return(limiter.CacheKey{ + Key: "domain_key2_value2_subkey2_subvalue2_1200", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key2_value2_subkey2_subvalue2_1200", uint32(1), gomock.Any(), int64(60)). + SetArg(4, uint32(11)). + Return(redis_driver.Pipeline{}) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, @@ -94,18 +117,9 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) - clientUsed = client - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key3_value3_997200", int64(3600)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key3_value3_subkey3_subvalue3_950400", int64(86400)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - + // Test 3 request = common.NewRateLimitRequest( - "domain", + domain, [][][2]string{ {{"key3", "value3"}}, {{"key3", "value3"}, {"subkey3", "subvalue3"}}, @@ -113,6 +127,28 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { limits = []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + + clientUsed = client + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(4) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key3_value3_997200", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[1], limits[1]).Return(limiter.CacheKey{ + Key: "domain_key3_value3_subkey3_subvalue3_950400", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key3_value3_997200", uint32(1), gomock.Any(), int64(3600)). + SetArg(4, uint32(11)). + Return(redis_driver.Pipeline{}) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key3_value3_subkey3_subvalue3_950400", uint32(1), gomock.Any(), int64(86400)). + SetArg(4, uint32(13)). + Return(redis_driver.Pipeline{}) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, @@ -168,25 +204,30 @@ func TestOverLimitWithLocalCache(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - client := mock_redis.NewMockClient(controller) - timeSource := mock_utils.NewMockTimeSource(controller) + client := mock_driver.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8) + ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, ratelimitAlgorithm) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + domain := "domain" // Test Near Limit Stats. Under Near Limit Ratio - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) - client.EXPECT().PipeDo(gomock.Any()).Return(nil) - - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) - + request := common.NewRateLimitRequest(domain, [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_997200", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). + SetArg(4, uint32(11)). + Return(redis_driver.Pipeline{}) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -201,10 +242,15 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) // Test Near Limit Stats. At Near Limit Ratio, still OK - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_997200", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). + SetArg(4, uint32(13)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( @@ -220,10 +266,15 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) // Test Over limit stats - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_997200", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). + SetArg(4, uint32(16)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( @@ -239,10 +290,11 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) // Test Over limit stats with local cache - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_997200", + PerSecond: true, + }) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, @@ -261,22 +313,27 @@ func TestNearLimit(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - client := mock_redis.NewMockClient(controller) - timeSource := mock_utils.NewMockTimeSource(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + client := mock_driver.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) statsStore := stats.NewStore(stats.NewNullSink(), false) + domain := "domain" // Test Near Limit Stats. Under Near Limit Ratio - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) - client.EXPECT().PipeDo(gomock.Any()).Return(nil) - - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) - + request := common.NewRateLimitRequest(domain, [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_997200", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). + SetArg(4, uint32(11)). + Return(redis_driver.Pipeline{}) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -287,10 +344,15 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // Test Near Limit Stats. At Near Limit Ratio, still OK - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_997200", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). + SetArg(4, uint32(13)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( @@ -303,10 +365,15 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases // when we are near limit, not after we have passed the limit. - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_997200", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). + SetArg(4, uint32(16)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( @@ -319,14 +386,20 @@ func TestNearLimit(t *testing.T) { // Now test hitsAddend that is greater than 1 // All of it under limit, under near limit - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_1234", uint32(3)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_1234", int64(1)).DoAndReturn(pipeAppend) - client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key5_value5_1234", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key5_value5_1234", uint32(3), gomock.Any(), int64(1)). + SetArg(4, uint32(5)). + Return(redis_driver.Pipeline{}) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) @@ -335,14 +408,20 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // All of it under limit, some over near limit - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key6_value6_1234", uint32(2)).SetArg(1, uint32(7)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key6_value6_1234", int64(1)).DoAndReturn(pipeAppend) - client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key6_value6_1234", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key6_value6_1234", uint32(2), gomock.Any(), int64(1)). + SetArg(4, uint32(7)). + Return(redis_driver.Pipeline{}) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) @@ -351,14 +430,20 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) // All of it under limit, all of it over near limit - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key7_value7_1234", uint32(3)).SetArg(1, uint32(19)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key7_value7_1234", int64(1)).DoAndReturn(pipeAppend) - client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key7_value7_1234", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key7_value7_1234", uint32(3), gomock.Any(), int64(1)). + SetArg(4, uint32(19)). + Return(redis_driver.Pipeline{}) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) @@ -367,14 +452,20 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(3), limits[0].Stats.NearLimit.Value()) // Some of it over limit, all of it over near limit - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key8_value8_1234", uint32(3)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key8_value8_1234", int64(1)).DoAndReturn(pipeAppend) - client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key8_value8_1234", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key8_value8_1234", uint32(3), gomock.Any(), int64(1)). + SetArg(4, uint32(22)). + Return(redis_driver.Pipeline{}) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) @@ -383,14 +474,20 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) // Some of it in all three places - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key9_value9_1234", uint32(7)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key9_value9_1234", int64(1)).DoAndReturn(pipeAppend) - client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key9_value9_1234", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key9_value9_1234", uint32(7), gomock.Any(), int64(1)). + SetArg(4, uint32(22)). + Return(redis_driver.Pipeline{}) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) @@ -399,14 +496,20 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(4), limits[0].Stats.NearLimit.Value()) // all of it over limit - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key10_value10_1234", uint32(3)).SetArg(1, uint32(30)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key10_value10_1234", int64(1)).DoAndReturn(pipeAppend) - client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key10_value10_1234", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key10_value10_1234", uint32(3), gomock.Any(), int64(1)). + SetArg(4, uint32(30)). + Return(redis_driver.Pipeline{}) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) @@ -420,21 +523,28 @@ func TestRedisWithJitter(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - client := mock_redis.NewMockClient(controller) - timeSource := mock_utils.NewMockTimeSource(controller) - jitterSource := mock_utils.NewMockJitterRandSource(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8) + client := mock_driver.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + jitterSource := mock_limiter.NewMockJitterRandSource(controller) + ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, ratelimitAlgorithm) statsStore := stats.NewStore(stats.NewNullSink(), false) + domain := "domain" - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + request := common.NewRateLimitRequest(domain, [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key_value_1234", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_1234", uint32(1), gomock.Any(), int64(101)). + SetArg(4, uint32(5)). + Return(redis_driver.Pipeline{}) jitterSource.EXPECT().Int63().Return(int64(100)) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(101)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) diff --git a/test/redis/windowed_cache_impl_test.go b/test/redis/windowed_cache_impl_test.go index e9c463ef4..94b83959c 100644 --- a/test/redis/windowed_cache_impl_test.go +++ b/test/redis/windowed_cache_impl_test.go @@ -4,14 +4,17 @@ import ( "math/rand" "testing" - "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" + redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/test/common" + mock_algorithm "github.com/envoyproxy/ratelimit/test/mocks/algorithm" mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" - mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" + redis_driver_mock "github.com/envoyproxy/ratelimit/test/mocks/redis/driver" + + "github.com/coocood/freecache" "github.com/golang/mock/gomock" "github.com/golang/protobuf/ptypes/duration" stats "github.com/lyft/gostats" @@ -29,35 +32,44 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { controller := gomock.NewController(t) defer controller.Finish() - client := mock_redis.NewMockClient(controller) - perSecondClient := mock_redis.NewMockClient(controller) + client := redis_driver_mock.NewMockClient(controller) + perSecondClient := redis_driver_mock.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) + ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) var cache limiter.RateLimitCache if usePerSecondRedis { - cache = redis.NewWindowedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + cache = redis.NewWindowedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) } else { - cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) } statsStore := stats.NewStore(stats.NewNullSink(), false) - timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) - var clientUsed *mock_redis.MockClient + domain := "domain" + var clientUsed *redis_driver_mock.MockClient if usePerSecondRedis { clientUsed = perSecondClient } else { clientUsed = client } - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(1)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(0)).DoAndReturn(pipeAppend) + // Test 1 + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(1e9+1e8)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(1)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key_value_0", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(1)). + SetArg(4, int64(0)). + Return(redis_driver.Pipeline{}) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Nanos: 1e8}}}, @@ -66,13 +78,7 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - clientUsed = client - timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key2_value2_subkey2_subvalue2_0", int64(0)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key2_value2_subkey2_subvalue2_0", int64(60)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key2_value2_subkey2_subvalue2_0").SetArg(1, int64(70e9)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - + // Test 2 request = common.NewRateLimitRequest( "domain", [][][2]string{ @@ -82,6 +88,23 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { limits = []*config.RateLimit{ nil, config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + clientUsed = client + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[1], limits[1]).Return(limiter.CacheKey{ + Key: "domain_key2_value2_subkey2_subvalue2_0", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key2_value2_subkey2_subvalue2_0", gomock.Any(), gomock.Any(), int64(60)). + SetArg(4, int64(70e9)). + Return(redis_driver.Pipeline{}) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 69}}}, @@ -90,18 +113,7 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) - clientUsed = client - timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(5) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key3_value3_0", int64(0)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_0", int64(60*60)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key3_value3_0").SetArg(1, int64(60*60*1e9)).DoAndReturn(pipeAppend) - - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key3_value3_subkey3_subvalue3_0", int64(0)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_subkey3_subvalue3_0", int64(60*60*24)).DoAndReturn(pipeAppend) - clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key3_value3_subkey3_subvalue3_0").SetArg(1, int64(60*60*24*1e9)).DoAndReturn(pipeAppend) - - clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - + // Test 3 request = common.NewRateLimitRequest( "domain", [][][2]string{ @@ -111,6 +123,27 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { limits = []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(5) + clientUsed = client + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key3_value3_0", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key3_value3_0", gomock.Any(), gomock.Any(), int64(60*60)). + SetArg(4, int64(60*60*1e9)). + Return(redis_driver.Pipeline{}) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[1], limits[1]).Return(limiter.CacheKey{ + Key: "domain_key3_value3_subkey3_subvalue3_0", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key3_value3_subkey3_subvalue3_0", gomock.Any(), gomock.Any(), int64(60*60*24)). + SetArg(4, int64(60*60*24*1e9)). + Return(redis_driver.Pipeline{}) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: (60 * 60) - 1}}, @@ -130,19 +163,26 @@ func TestNearLimitWindowed(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - client := mock_redis.NewMockClient(controller) + client := redis_driver_mock.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) - cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) + cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) statsStore := stats.NewStore(stats.NewNullSink(), false) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) + domain := "domain" + request := common.NewRateLimitRequest(domain, [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key4_value4", statsStore)} // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNanoNow().Return(int64(50e9)).MaxTimes(1) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_0").SetArg(1, int64(50e9)).DoAndReturn(pipeAppend) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_0", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_0", gomock.Any(), gomock.Any(), int64(60)). + SetArg(4, int64(50e9)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key4_value4_0", int64(50e9+6e9)).DoAndReturn(pipeAppend) @@ -159,9 +199,14 @@ func TestNearLimitWindowed(t *testing.T) { // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNanoNow().Return(int64(50e9)).MaxTimes(1) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_0").SetArg(1, int64(98e9)).DoAndReturn(pipeAppend) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_0", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_0", gomock.Any(), gomock.Any(), int64(60)). + SetArg(4, int64(98e9)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key4_value4_0", int64(98e9+6e9)).DoAndReturn(pipeAppend) @@ -179,9 +224,14 @@ func TestNearLimitWindowed(t *testing.T) { // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases // when we are near limit, not after we have passed the limit. timeSource.EXPECT().UnixNanoNow().Return(int64(50e9)).MaxTimes(1) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_0").SetArg(1, int64(110e9)).DoAndReturn(pipeAppend) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key4_value4_0", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_0", gomock.Any(), gomock.Any(), int64(60)). + SetArg(4, int64(110e9)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( @@ -198,23 +248,30 @@ func TestWindowedOverLimitWithLocalCache(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - client := mock_redis.NewMockClient(controller) + client := redis_driver_mock.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8) + ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) + cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, ratelimitAlgorithm) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) + domain := "domain" localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + request := common.NewRateLimitRequest(domain, [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(60*60)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(71*4*60*1e9)).DoAndReturn(pipeAppend) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key_value_0", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(60*60)). + SetArg(4, int64(71*4*60*1e9)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(72*4*60*1e9)).DoAndReturn(pipeAppend) @@ -235,9 +292,14 @@ func TestWindowedOverLimitWithLocalCache(t *testing.T) { // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(60*60)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(72*4*60*1e9)).DoAndReturn(pipeAppend) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key_value_0", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(60*60)). + SetArg(4, int64(72*4*60*1e9)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(73*4*60*1e9)).DoAndReturn(pipeAppend) @@ -263,9 +325,14 @@ func TestWindowedOverLimitWithLocalCache(t *testing.T) { // Test Over limit stats timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(60*60)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(75*4*60*1e9)).DoAndReturn(pipeAppend) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key_value_0", + PerSecond: false, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(60*60)). + SetArg(4, int64(75*4*60*1e9)). + Return(redis_driver.Pipeline{}) client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) @@ -286,13 +353,16 @@ func TestWindowedOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) // Test Over limit stats with local cache - timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits = []*config.RateLimit{ config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} + timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key_value_0", + PerSecond: false, + }) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 15 * 4 * 60}}}, @@ -311,26 +381,32 @@ func TestRedisWindowedWithJitter(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - client := mock_redis.NewMockClient(controller) + client := redis_driver_mock.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) jitterSource := mock_limiter.NewMockJitterRandSource(controller) - cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8) + ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) + cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, ratelimitAlgorithm) statsStore := stats.NewStore(stats.NewNullSink(), false) + domain := "domain" + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ + Key: "domain_key_value_0", + PerSecond: true, + }) + ratelimitAlgorithm.EXPECT(). + AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(1)). + SetArg(4, int64(0)). + Return(redis_driver.Pipeline{}) jitterSource.EXPECT().Int63().Return(int64(100)) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(1)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(0)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(1e9+1e8)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(101)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Nanos: 1e8}}}, cache.DoLimit(nil, request, limits)) diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index a51ddbe90..510e80232 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -10,7 +10,7 @@ import ( pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/mock/gomock" @@ -197,7 +197,7 @@ func TestCacheErrorLegacy(test *testing.T) { t.config.EXPECT().GetLimit(nil, "different-domain", req.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, req, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { - panic(redis.RedisError("cache error")) + panic(driver.RedisError("cache error")) }) response, err := service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 12c77926a..58c7b25d1 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -6,7 +6,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/redis/driver" ratelimit "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/test/common" mock_config "github.com/envoyproxy/ratelimit/test/mocks/config" @@ -205,7 +205,7 @@ func TestCacheError(test *testing.T) { t.config.EXPECT().GetLimit(nil, "different-domain", request.Descriptors[0]).Return(limits[0]) t.cache.EXPECT().DoLimit(nil, request, limits).Do( func(context.Context, *pb.RateLimitRequest, []*config.RateLimit) { - panic(redis.RedisError("cache error")) + panic(driver.RedisError("cache error")) }) response, err := service.ShouldRateLimit(nil, request) From 5e2676b3c5f3f218a8c04e457e1563da4c16d312 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Wed, 13 Jan 2021 08:04:55 +0100 Subject: [PATCH 15/31] Fix fixed_window algorithm generate same cache keys Co-Authored-By: exagil Signed-off-by: zufardhiyaulhaq --- src/algorithm/fixed_window.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/algorithm/fixed_window.go b/src/algorithm/fixed_window.go index 9220cf3d4..cb659391e 100644 --- a/src/algorithm/fixed_window.go +++ b/src/algorithm/fixed_window.go @@ -8,12 +8,12 @@ import ( ) type FixedWindowImpl struct { - now int64 + now limiter.TimeSource cacheKeyGenerator limiter.CacheKeyGenerator } func (this *FixedWindowImpl) GenerateCacheKey(domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit) limiter.CacheKey { - return this.cacheKeyGenerator.GenerateCacheKey(domain, descriptor, limit, this.now) + return this.cacheKeyGenerator.GenerateCacheKey(domain, descriptor, limit, this.now.UnixNow()) } func (this *FixedWindowImpl) AppendPipeline(client redis_driver.Client, pipeline redis_driver.Pipeline, key string, hitsAddend uint32, result interface{}, expirationSeconds int64) redis_driver.Pipeline { @@ -24,7 +24,7 @@ func (this *FixedWindowImpl) AppendPipeline(client redis_driver.Client, pipeline func NewFixedWindowAlgorithm(timeSource limiter.TimeSource) *FixedWindowImpl { return &FixedWindowImpl{ - now: timeSource.UnixNanoNow(), + now: timeSource, cacheKeyGenerator: limiter.NewCacheKeyGenerator(), } } From ffff44aadb44e1ef2de74ee48ea1879631c7a4a8 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Thu, 14 Jan 2021 12:25:58 +0100 Subject: [PATCH 16/31] fix conflicting files Co-Authored-By: exagil Signed-off-by: zufardhiyaulhaq --- src/limiter/base_limiter.go | 7 +- src/memcached/cache_impl.go | 185 +++------------------------ src/memcached/fixed_cache_impl.go | 164 ++++++++++++++++++++++++ src/redis/fixed_cache_impl.go | 50 ++++++-- src/redis/windowed_cache_impl.go | 3 +- src/service/ratelimit_legacy.go | 2 +- src/service_cmd/runner/runner.go | 24 ++-- src/utils/jitter_rand_source.go | 9 ++ src/utils/jitter_rand_source_impl.go | 29 +++++ src/utils/time.go | 27 +--- src/utils/time_impl.go | 19 +++ src/utils/utilities.go | 30 ++++- test/memcached/cache_impl_test.go | 12 +- test/mocks/limiter/limiter.go | 18 ++- test/mocks/utils/utils.go | 8 ++ test/redis/bench_test.go | 8 +- test/redis/fixed_cache_impl_test.go | 9 +- 17 files changed, 359 insertions(+), 245 deletions(-) create mode 100644 src/memcached/fixed_cache_impl.go create mode 100644 src/utils/jitter_rand_source.go create mode 100644 src/utils/jitter_rand_source_impl.go create mode 100644 src/utils/time_impl.go diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go index a5a3c1be5..9f168c848 100644 --- a/src/limiter/base_limiter.go +++ b/src/limiter/base_limiter.go @@ -1,14 +1,15 @@ package limiter import ( + "math" + "math/rand" + "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" logger "github.com/sirupsen/logrus" - "math" - "math/rand" ) type BaseRateLimiter struct { @@ -140,7 +141,7 @@ func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { // If the limit before increase was below the over limit value, then some of the hits were // in the near limit range. limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - - utils.Max(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) + utils.MaxUint32(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) } } diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 10a870442..618a8fa41 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -16,182 +16,29 @@ package memcached import ( - "context" + "fmt" "math/rand" - "strconv" - "sync" - - "github.com/coocood/freecache" - stats "github.com/lyft/gostats" "github.com/bradfitz/gomemcache/memcache" - - logger "github.com/sirupsen/logrus" - - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - - "github.com/envoyproxy/ratelimit/src/config" + "github.com/coocood/freecache" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/utils" + stats "github.com/lyft/gostats" ) -type rateLimitMemcacheImpl struct { - client Client - timeSource utils.TimeSource - jitterRand *rand.Rand - expirationJitterMaxSeconds int64 - cacheKeyGenerator limiter.CacheKeyGenerator - localCache *freecache.Cache - waitGroup sync.WaitGroup - nearLimitRatio float32 - baseRateLimiter *limiter.BaseRateLimiter -} - -var _ limiter.RateLimitCache = (*rateLimitMemcacheImpl)(nil) - -func (this *rateLimitMemcacheImpl) DoLimit( - ctx context.Context, - request *pb.RateLimitRequest, - limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { - - logger.Debugf("starting cache lookup") - - // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.Max(1, request.HitsAddend) - - // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) - - isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) - - keysToGet := make([]string, 0, len(request.Descriptors)) - - for i, cacheKey := range cacheKeys { - if cacheKey.Key == "" { - continue - } - - // Check if key is over the limit in local cache. - if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { - isOverLimitWithLocalCache[i] = true - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) - continue - } - - logger.Debugf("looking up cache key: %s", cacheKey.Key) - keysToGet = append(keysToGet, cacheKey.Key) - } - - // Now fetch from memcache. - responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, - len(request.Descriptors)) - - var memcacheValues map[string]*memcache.Item - var err error - - if len(keysToGet) > 0 { - memcacheValues, err = this.client.GetMulti(keysToGet) - if err != nil { - logger.Errorf("Error multi-getting memcache keys (%s): %s", keysToGet, err) - } - } - - for i, cacheKey := range cacheKeys { - - rawMemcacheValue, ok := memcacheValues[cacheKey.Key] - var limitBeforeIncrease uint32 - if ok { - decoded, err := strconv.ParseInt(string(rawMemcacheValue.Value), 10, 32) - if err != nil { - logger.Errorf("Unexpected non-numeric value in memcached: %v", rawMemcacheValue) - } else { - limitBeforeIncrease = uint32(decoded) - } - - } - - limitAfterIncrease := limitBeforeIncrease + hitsAddend - - limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) - - responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) - } - - this.waitGroup.Add(1) - go this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, uint64(hitsAddend)) - - return responseDescriptorStatuses -} - -func (this *rateLimitMemcacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, isOverLimitWithLocalCache []bool, - limits []*config.RateLimit, hitsAddend uint64) { - defer this.waitGroup.Done() - for i, cacheKey := range cacheKeys { - if cacheKey.Key == "" || isOverLimitWithLocalCache[i] { - continue - } - - _, err := this.client.Increment(cacheKey.Key, hitsAddend) - if err == memcache.ErrCacheMiss { - expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit) - if this.expirationJitterMaxSeconds > 0 { - expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) - } - - // Need to add instead of increment. - err = this.client.Add(&memcache.Item{ - Key: cacheKey.Key, - Value: []byte(strconv.FormatUint(hitsAddend, 10)), - Expiration: int32(expirationSeconds), - }) - if err == memcache.ErrNotStored { - // There was a race condition to do this add. We should be able to increment - // now instead. - _, err := this.client.Increment(cacheKey.Key, hitsAddend) - if err != nil { - logger.Errorf("Failed to increment key %s after failing to add: %s", cacheKey.Key, err) - continue - } - } else if err != nil { - logger.Errorf("Failed to add key %s: %s", cacheKey.Key, err) - continue - } - } else if err != nil { - logger.Errorf("Failed to increment key %s: %s", cacheKey.Key, err) - continue - } - } -} - -func (this *rateLimitMemcacheImpl) Flush() { - this.waitGroup.Wait() -} - -func NewRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32) limiter.RateLimitCache { - return &rateLimitMemcacheImpl{ - client: client, - timeSource: timeSource, - cacheKeyGenerator: limiter.NewCacheKeyGenerator(), - jitterRand: jitterRand, - expirationJitterMaxSeconds: expirationJitterMaxSeconds, - localCache: localCache, - nearLimitRatio: nearLimitRatio, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio), - } -} - func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, - localCache *freecache.Cache, scope stats.Scope) limiter.RateLimitCache { - return NewRateLimitCacheImpl( - memcache.New(s.MemcacheHostPort), - timeSource, - jitterRand, - s.ExpirationJitterMaxSeconds, - localCache, - scope, - s.NearLimitRatio, - ) + localCache *freecache.Cache, scope stats.Scope) (limiter.RateLimitCache, error) { + if s.RateLimitAlgorithm == settings.FixedRateLimit { + return NewFixedRateLimitCacheImpl( + memcache.New(s.MemcacheHostPort), + timeSource, + jitterRand, + s.ExpirationJitterMaxSeconds, + localCache, + scope, + s.NearLimitRatio, + ), nil + } + return nil, fmt.Errorf("Unknown rate limit algorithm. %s\n", s.RateLimitAlgorithm) } diff --git a/src/memcached/fixed_cache_impl.go b/src/memcached/fixed_cache_impl.go new file mode 100644 index 000000000..677ef4a17 --- /dev/null +++ b/src/memcached/fixed_cache_impl.go @@ -0,0 +1,164 @@ +package memcached + +import ( + "context" + "math/rand" + "strconv" + "sync" + + "github.com/bradfitz/gomemcache/memcache" + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/utils" + stats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" +) + +type fixedRateLimitCacheImpl struct { + client Client + timeSource utils.TimeSource + jitterRand *rand.Rand + expirationJitterMaxSeconds int64 + cacheKeyGenerator limiter.CacheKeyGenerator + localCache *freecache.Cache + waitGroup sync.WaitGroup + nearLimitRatio float32 + baseRateLimiter *limiter.BaseRateLimiter +} + +var _ limiter.RateLimitCache = (*fixedRateLimitCacheImpl)(nil) + +func (this *fixedRateLimitCacheImpl) DoLimit( + ctx context.Context, + request *pb.RateLimitRequest, + limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { + + logger.Debugf("starting cache lookup") + + // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. + hitsAddend := utils.MaxUint32(1, request.HitsAddend) + + // First build a list of all cache keys that we are actually going to hit. + cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) + + isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) + + keysToGet := make([]string, 0, len(request.Descriptors)) + + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + continue + } + + // Check if key is over the limit in local cache. + if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { + isOverLimitWithLocalCache[i] = true + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + continue + } + + logger.Debugf("looking up cache key: %s", cacheKey.Key) + keysToGet = append(keysToGet, cacheKey.Key) + } + + // Now fetch from memcache. + responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, + len(request.Descriptors)) + + var memcacheValues map[string]*memcache.Item + var err error + + if len(keysToGet) > 0 { + memcacheValues, err = this.client.GetMulti(keysToGet) + if err != nil { + logger.Errorf("Error multi-getting memcache keys (%s): %s", keysToGet, err) + } + } + + for i, cacheKey := range cacheKeys { + + rawMemcacheValue, ok := memcacheValues[cacheKey.Key] + var limitBeforeIncrease uint32 + if ok { + decoded, err := strconv.ParseInt(string(rawMemcacheValue.Value), 10, 32) + if err != nil { + logger.Errorf("Unexpected non-numeric value in memcached: %v", rawMemcacheValue) + } else { + limitBeforeIncrease = uint32(decoded) + } + + } + + limitAfterIncrease := limitBeforeIncrease + hitsAddend + + limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) + + responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, + limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + } + + this.waitGroup.Add(1) + go this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, uint64(hitsAddend)) + + return responseDescriptorStatuses +} + +func (this *fixedRateLimitCacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, isOverLimitWithLocalCache []bool, + limits []*config.RateLimit, hitsAddend uint64) { + defer this.waitGroup.Done() + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" || isOverLimitWithLocalCache[i] { + continue + } + + _, err := this.client.Increment(cacheKey.Key, hitsAddend) + if err == memcache.ErrCacheMiss { + expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit) + if this.expirationJitterMaxSeconds > 0 { + expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) + } + + // Need to add instead of increment. + err = this.client.Add(&memcache.Item{ + Key: cacheKey.Key, + Value: []byte(strconv.FormatUint(hitsAddend, 10)), + Expiration: int32(expirationSeconds), + }) + if err == memcache.ErrNotStored { + // There was a race condition to do this add. We should be able to increment + // now instead. + _, err := this.client.Increment(cacheKey.Key, hitsAddend) + if err != nil { + logger.Errorf("Failed to increment key %s after failing to add: %s", cacheKey.Key, err) + continue + } + } else if err != nil { + logger.Errorf("Failed to add key %s: %s", cacheKey.Key, err) + continue + } + } else if err != nil { + logger.Errorf("Failed to increment key %s: %s", cacheKey.Key, err) + continue + } + } +} + +func (this *fixedRateLimitCacheImpl) Flush() { + this.waitGroup.Wait() +} + +func NewFixedRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, + expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32) limiter.RateLimitCache { + return &fixedRateLimitCacheImpl{ + client: client, + timeSource: timeSource, + cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + jitterRand: jitterRand, + expirationJitterMaxSeconds: expirationJitterMaxSeconds, + localCache: localCache, + nearLimitRatio: nearLimitRatio, + baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio), + } +} diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index dce625219..c2dcb68d3 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + "math" "math/rand" "github.com/coocood/freecache" @@ -71,18 +72,20 @@ func (this *fixedRateLimitCacheImpl) DoLimit( continue } - // Check if key is over the limit in local cache. - if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { - isOverLimitWithLocalCache[i] = true - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) - continue + // not sure about this code + if this.localCache != nil { + // Get returns the value or not found error. + _, err := this.localCache.Get([]byte(cacheKey.Key)) + if err == nil { + isOverLimitWithLocalCache[i] = true + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + continue + } } - logger.Debugf("looking up cache key: %s", cacheKey.Key) - expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit) - if this.baseRateLimiter.ExpirationJitterMaxSeconds > 0 { - expirationSeconds += this.baseRateLimiter.JitterRand.Int63n(this.baseRateLimiter.ExpirationJitterMaxSeconds) + if this.expirationJitterMaxSeconds > 0 { + expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) } // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. @@ -112,6 +115,28 @@ func (this *fixedRateLimitCacheImpl) DoLimit( responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + responseDescriptorStatuses[i] = + &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: nil, + LimitRemaining: 0, + } + continue + } + + if isOverLimitWithLocalCache[i] { + responseDescriptorStatuses[i] = + &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limits[i].Limit, + LimitRemaining: 0, + DurationUntilReset: utils.CalculateReset(limits[i].Limit, this.timeSource), + } + limits[i].Stats.OverLimit.Add(uint64(hitsAddend)) + limits[i].Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) + continue + } limitAfterIncrease := results[i] limitBeforeIncrease := limitAfterIncrease - hitsAddend @@ -127,7 +152,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[i].Limit, LimitRemaining: 0, - DurationUntilReset: CalculateReset(limits[i].Limit, this.timeSource), + DurationUntilReset: utils.CalculateReset(limits[i].Limit, this.timeSource), } // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to @@ -163,7 +188,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( Code: pb.RateLimitResponse_OK, CurrentLimit: limits[i].Limit, LimitRemaining: overLimitThreshold - limitAfterIncrease, - DurationUntilReset: CalculateReset(limits[i].Limit, this.timeSource), + DurationUntilReset: utils.CalculateReset(limits[i].Limit, this.timeSource), } // The limit is OK but we additionally want to know if we are near the limit. @@ -173,8 +198,10 @@ func (this *fixedRateLimitCacheImpl) DoLimit( // only the difference between the current limit value and the near limit threshold were near // limit hits. if limitBeforeIncrease >= nearLimitThreshold { + // if before increasing the limit number in redis, the data count recorded in redis its already more than the threshold, we can add nearlimit metrics by number of hits limits[i].Stats.NearLimit.Add(uint64(hitsAddend)) } else { + // if not, subtracting limit after increase with the threshold. limits[i].Stats.NearLimit.Add(uint64(limitAfterIncrease - nearLimitThreshold)) } } @@ -184,7 +211,6 @@ func (this *fixedRateLimitCacheImpl) DoLimit( return responseDescriptorStatuses } -// Flush() is a no-op with redis since quota reads and updates happen synchronously. func (this *fixedRateLimitCacheImpl) Flush() {} func NewFixedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index 2a6d78fe5..e22fd749b 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -79,7 +79,6 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } } - // Get existing tat value for each cache keys isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) tats := make([]int64, len(request.Descriptors)) var pipeline, perSecondPipeline driver.Pipeline @@ -238,6 +237,8 @@ func (this *windowedRateLimitCacheImpl) DoLimit( return responseDescriptorStatuses } +func (this *windowedRateLimitCacheImpl) Flush() {} + func NewWindowedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { return &windowedRateLimitCacheImpl{ client: client, diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index 17112675c..98e1ae269 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -5,7 +5,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/lyft/gostats" + stats "github.com/lyft/gostats" "golang.org/x/net/context" ) diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index a2998f43f..dbc4b08cf 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -40,20 +40,28 @@ func (runner *Runner) GetStatsStore() stats.Store { func createLimiter(srv server.Server, s settings.Settings, localCache *freecache.Cache) limiter.RateLimitCache { switch s.BackendType { case "redis", "": - return redis.NewRateLimiterCacheImplFromSettings( + cacheImpl, err := redis.NewRateLimiterCacheImplFromSettings( s, localCache, srv, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), s.ExpirationJitterMaxSeconds) + if err != nil { + logger.Fatalf("Could not setup redis ratelimit cache. %v\n", err) + } + return cacheImpl case "memcache": - return memcached.NewRateLimitCacheImplFromSettings( + cacheImpl, err := memcached.NewRateLimitCacheImplFromSettings( s, utils.NewTimeSourceImpl(), rand.New(utils.NewLockedSource(time.Now().Unix())), localCache, srv.Scope()) + if err != nil { + logger.Fatalf("Could not setup redis ratelimit cache. %v\n", err) + } + return cacheImpl default: logger.Fatalf("Invalid setting for BackendType: %s", s.BackendType) panic("This line should not be reachable") @@ -85,20 +93,10 @@ func (runner *Runner) Run() { } srv := server.NewServer("ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) - rateLimitCache, err := redis.NewRateLimiterCacheImplFromSettings( - s, - localCache, - srv, - limiter.NewTimeSourceImpl(), - rand.New(limiter.NewLockedSource(time.Now().Unix())), - s.ExpirationJitterMaxSeconds) - if err != nil { - logger.Fatalf("Could not setup ratelimit cache. %v\n", err) - } service := ratelimit.NewService( srv.Runtime(), - rateLimitCache, + createLimiter(srv, s, localCache), config.NewRateLimitConfigLoaderImpl(), srv.Scope().Scope("service"), s.RuntimeWatchRoot, diff --git a/src/utils/jitter_rand_source.go b/src/utils/jitter_rand_source.go new file mode 100644 index 000000000..33eebe4d2 --- /dev/null +++ b/src/utils/jitter_rand_source.go @@ -0,0 +1,9 @@ +package utils + +// Interface for a rand Source for expiration jitter. +type JitterRandSource interface { + // @return a non-negative pseudo-random 63-bit integer as an int64. + Int63() int64 + // @param seed initializes pseudo-random generator to a deterministic state. + Seed(seed int64) +} diff --git a/src/utils/jitter_rand_source_impl.go b/src/utils/jitter_rand_source_impl.go new file mode 100644 index 000000000..39283d60e --- /dev/null +++ b/src/utils/jitter_rand_source_impl.go @@ -0,0 +1,29 @@ +package utils + +import ( + "math/rand" + "sync" +) + +// rand for jitter. +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func NewLockedSource(seed int64) JitterRandSource { + return &lockedSource{src: rand.NewSource(seed)} +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/src/utils/time.go b/src/utils/time.go index 0d6a286bd..d4d56b51b 100644 --- a/src/utils/time.go +++ b/src/utils/time.go @@ -1,24 +1,9 @@ package utils -import ( - "github.com/golang/protobuf/ptypes/duration" - "time" -) - -const secondToNanosecondRate = 1e9 - -func NanosecondsToSeconds(nanoseconds int64) int64 { - return nanoseconds / secondToNanosecondRate -} - -func NanosecondsToDuration(nanoseconds int64) *duration.Duration { - nanos := nanoseconds - secs := nanos / secondToNanosecondRate - nanos -= secs * secondToNanosecondRate - return &duration.Duration{Seconds: secs, Nanos: int32(nanos)} -} - -func SecondsToNanoseconds(second int64) int64 { - time.Now() - return second * secondToNanosecondRate +// Interface for a time source. +type TimeSource interface { + // @return the current unix time in seconds. + UnixNow() int64 + // @return the current unix time in nanoseconds. + UnixNanoNow() int64 } diff --git a/src/utils/time_impl.go b/src/utils/time_impl.go new file mode 100644 index 000000000..f20075256 --- /dev/null +++ b/src/utils/time_impl.go @@ -0,0 +1,19 @@ +package utils + +import ( + "time" +) + +type timeSourceImpl struct{} + +func (this *timeSourceImpl) UnixNow() int64 { + return time.Now().Unix() +} + +func (this *timeSourceImpl) UnixNanoNow() int64 { + return time.Now().UnixNano() +} + +func NewTimeSourceImpl() TimeSource { + return &timeSourceImpl{} +} diff --git a/src/utils/utilities.go b/src/utils/utilities.go index 6a801af5b..ba798e331 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -1,15 +1,13 @@ package utils import ( + "time" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/golang/protobuf/ptypes/duration" ) -// Interface for a time source. -type TimeSource interface { - // @return the current unix time in seconds. - UnixNow() int64 -} +const secondToNanosecondRate = 1e9 // Convert a rate limit into a time divider. // @param unit supplies the unit to convert. @@ -49,3 +47,25 @@ func MaxUint32(a uint32, b uint32) uint32 { } return b } + +func NanosecondsToSeconds(nanoseconds int64) int64 { + return nanoseconds / secondToNanosecondRate +} + +func NanosecondsToDuration(nanoseconds int64) *duration.Duration { + nanos := nanoseconds + secs := nanos / secondToNanosecondRate + nanos -= secs * secondToNanosecondRate + return &duration.Duration{Seconds: secs, Nanos: int32(nanos)} +} + +func SecondsToNanoseconds(second int64) int64 { + time.Now() + return second * secondToNanosecondRate +} + +func CalculateReset(currentLimit *pb.RateLimitResponse_RateLimit, timeSource TimeSource) *duration.Duration { + sec := UnitToDivider(currentLimit.Unit) + now := timeSource.UnixNow() + return &duration.Duration{Seconds: sec - now%sec} +} diff --git a/test/memcached/cache_impl_test.go b/test/memcached/cache_impl_test.go index fad218407..e5fbfe723 100644 --- a/test/memcached/cache_impl_test.go +++ b/test/memcached/cache_impl_test.go @@ -34,7 +34,7 @@ func TestMemcached(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( @@ -120,7 +120,7 @@ func TestMemcachedGetError(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( @@ -204,7 +204,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { localCache := freecache.NewCache(100) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, localCache, statsStore, 0.8) + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, localCache, statsStore, 0.8) localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio @@ -296,7 +296,7 @@ func TestNearLimit(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) @@ -464,7 +464,7 @@ func TestMemcacheWithJitter(t *testing.T) { client := mock_memcached.NewMockClient(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, statsStore, 0.8) + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, statsStore, 0.8) timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) @@ -505,7 +505,7 @@ func TestMemcacheAdd(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) // Test a race condition with the initial add timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go index 6b73f2dc4..927031404 100644 --- a/test/mocks/limiter/limiter.go +++ b/test/mocks/limiter/limiter.go @@ -49,6 +49,18 @@ func (mr *MockRateLimitCacheMockRecorder) DoLimit(arg0, arg1, arg2 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), arg0, arg1, arg2) } +// Flush mocks base method +func (m *MockRateLimitCache) Flush() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Flush") +} + +// Flush indicates an expected call of Flush +func (mr *MockRateLimitCacheMockRecorder) Flush() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockRateLimitCache)(nil).Flush)) +} + // MockTimeSource is a mock of TimeSource interface type MockTimeSource struct { ctrl *gomock.Controller @@ -142,9 +154,3 @@ func (m *MockJitterRandSource) Seed(arg0 int64) { m.ctrl.T.Helper() m.ctrl.Call(m, "Flush") } - -// Flush indicates an expected call of Flush -func (mr *MockRateLimitCacheMockRecorder) Flush() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockRateLimitCache)(nil).Flush)) -} diff --git a/test/mocks/utils/utils.go b/test/mocks/utils/utils.go index 1812f4f0f..64e93d631 100644 --- a/test/mocks/utils/utils.go +++ b/test/mocks/utils/utils.go @@ -40,6 +40,14 @@ func (m *MockTimeSource) UnixNow() int64 { return ret0 } +// UnixNow mocks base method +func (m *MockTimeSource) UnixNanoNow() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnixNanoNow") + ret0, _ := ret[0].(int64) + return ret0 +} + // UnixNow indicates an expected call of UnixNow func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { mr.mock.ctrl.T.Helper() diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 153a5b210..ab5d1c686 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -12,9 +12,11 @@ import ( "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/utils" "github.com/envoyproxy/ratelimit/test/common" ) @@ -46,13 +48,13 @@ func BenchmarkParallelDoLimit(b *testing.B) { defer client.Close() var cache limiter.RateLimitCache - timeSource := limiter.NewTimeSourceImpl() + timeSource := utils.NewTimeSourceImpl() if rateLimitAlgorithm == settings.FixedRateLimit { algorithmImpl := algorithm.NewFixedWindowAlgorithm(timeSource) - cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, algorithmImpl) + cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, algorithmImpl) } else if rateLimitAlgorithm == settings.WindowedRateLimit { algorithmImpl := algorithm.NewRollingWindowAlgorithm() - cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, algorithmImpl) + cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, algorithmImpl) } else { b.Fatalf("unknown rate limit type %s", rateLimitAlgorithm) } diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 280ca7d09..3a85b0f2b 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -1,8 +1,6 @@ package redis_test import ( - "testing" - "github.com/coocood/freecache" "github.com/golang/mock/gomock" stats "github.com/lyft/gostats" @@ -13,17 +11,18 @@ import ( "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" -<<<<<<< HEAD + redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" + "github.com/envoyproxy/ratelimit/test/common" + mock_algorithm "github.com/envoyproxy/ratelimit/test/mocks/algorithm" mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" mock_driver "github.com/envoyproxy/ratelimit/test/mocks/redis/driver" "math/rand" "testing" ->>>>>>> move pipeline and cache key method from cache implementation ) +func TestRedis(t *testing.T) { t.Run("WithoutPerSecondRedis", testRedis(false)) t.Run("WithPerSecondRedis", testRedis(true)) } From 33f39f9928b4dae479a30bf2156d90549610a332 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Thu, 14 Jan 2021 16:46:01 +0100 Subject: [PATCH 17/31] add memcached rolling window code Signed-off-by: zufardhiyaulhaq --- src/memcached/cache_impl.go | 11 + src/memcached/client.go | 1 + src/memcached/windowed_cache_impl.go | 208 ++++++++++++++++++ ..._impl_test.go => fixed_cache_impl_test.go} | 0 test/mocks/memcached/client.go | 14 ++ 5 files changed, 234 insertions(+) create mode 100644 src/memcached/windowed_cache_impl.go rename test/memcached/{cache_impl_test.go => fixed_cache_impl_test.go} (100%) diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 618a8fa41..36c90a464 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -40,5 +40,16 @@ func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.Tim s.NearLimitRatio, ), nil } + if s.RateLimitAlgorithm == settings.WindowedRateLimit { + return NewWindowedRateLimitCacheImpl( + memcache.New(s.MemcacheHostPort), + timeSource, + jitterRand, + s.ExpirationJitterMaxSeconds, + localCache, + scope, + s.NearLimitRatio, + ), nil + } return nil, fmt.Errorf("Unknown rate limit algorithm. %s\n", s.RateLimitAlgorithm) } diff --git a/src/memcached/client.go b/src/memcached/client.go index 55c0ec318..031c341c4 100644 --- a/src/memcached/client.go +++ b/src/memcached/client.go @@ -11,4 +11,5 @@ type Client interface { GetMulti(keys []string) (map[string]*memcache.Item, error) Increment(key string, delta uint64) (newValue uint64, err error) Add(item *memcache.Item) error + Set(item *memcache.Item) error } diff --git a/src/memcached/windowed_cache_impl.go b/src/memcached/windowed_cache_impl.go new file mode 100644 index 000000000..86b94be06 --- /dev/null +++ b/src/memcached/windowed_cache_impl.go @@ -0,0 +1,208 @@ +package memcached + +import ( + "context" + "math" + "math/rand" + "strconv" + "sync" + + "github.com/bradfitz/gomemcache/memcache" + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/assert" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/utils" + stats "github.com/lyft/gostats" + logger "github.com/sirupsen/logrus" +) + +type windowedRateLimitCacheImpl struct { + client Client + timeSource utils.TimeSource + jitterRand *rand.Rand + expirationJitterMaxSeconds int64 + cacheKeyGenerator limiter.CacheKeyGenerator + localCache *freecache.Cache + waitGroup sync.WaitGroup + nearLimitRatio float32 + baseRateLimiter *limiter.BaseRateLimiter +} + +var _ limiter.RateLimitCache = (*windowedRateLimitCacheImpl)(nil) + +func (this *windowedRateLimitCacheImpl) DoLimit( + ctx context.Context, + request *pb.RateLimitRequest, + limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { + + logger.Debugf("starting cache lookup") + + // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. + hitsAddend := utils.MaxUint32(1, request.HitsAddend) + + // First build a list of all cache keys that we are actually going to hit. + assert.Assert(len(request.Descriptors) == len(limits)) + cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) + for i := 0; i < len(request.Descriptors); i++ { + cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey( + request.Domain, request.Descriptors[i], limits[i], 0) + + // Increase statistics for limits hit by their respective requests. + if limits[i] != nil { + limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) + } + } + + isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) + keysToGet := make([]string, 0, len(request.Descriptors)) + + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" { + continue + } + + // Check if key is over the limit in local cache. + if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { + isOverLimitWithLocalCache[i] = true + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + continue + } + + logger.Debugf("looking up cache key: %s", cacheKey.Key) + keysToGet = append(keysToGet, cacheKey.Key) + } + + // Now fetch from memcache. + responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, + len(request.Descriptors)) + + var memcacheValues map[string]*memcache.Item + var err error + + if len(keysToGet) > 0 { + memcacheValues, err = this.client.GetMulti(keysToGet) + if err != nil { + logger.Errorf("Error multi-getting memcache keys (%s): %s", keysToGet, err) + } + } + + newTats := make([]int64, len(cacheKeys)) + expirationSeconds := make([]int64, len(cacheKeys)) + isOverLimit := make([]bool, len(cacheKeys)) + now := this.timeSource.UnixNanoNow() + + for i, cacheKey := range cacheKeys { + + rawMemcacheValue, ok := memcacheValues[cacheKey.Key] + var tat int64 + if ok { + tat, err = strconv.ParseInt(string(rawMemcacheValue.Value), 10, 64) + if err != nil { + logger.Errorf("Unexpected non-numeric value in memcached: %v", rawMemcacheValue) + } + } else { + tat = now + } + + limit := int64(limits[i].Limit.RequestsPerUnit) + period := utils.SecondsToNanoseconds(utils.UnitToDivider(limits[i].Limit.Unit)) + quantity := int64(hitsAddend) + arrivedAt := now + + emissionInterval := period / limit + tat = utils.MaxInt64(tat, arrivedAt) + newTats[i] = tat + emissionInterval*quantity + allowAt := newTats[i] - period + diff := arrivedAt - allowAt + + previousAllowAt := tat - period + previousLimitRemaining := int64(math.Ceil(float64((arrivedAt - previousAllowAt) / emissionInterval))) + previousLimitRemaining = utils.MaxInt64(previousLimitRemaining, 0) + nearLimitWindow := int64(math.Ceil(float64(float32(limits[i].Limit.RequestsPerUnit) * (1.0 - this.nearLimitRatio)))) + limitRemaining := int64(math.Ceil(float64(diff / emissionInterval))) + + expirationSeconds[i] = utils.NanosecondsToSeconds(newTats[i]-arrivedAt) + 1 + if this.expirationJitterMaxSeconds > 0 { + expirationSeconds[i] += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) + } + + if diff < 0 { + isOverLimit[i] = true + responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limits[i].Limit, + LimitRemaining: 0, + DurationUntilReset: utils.NanosecondsToDuration(int64(math.Ceil(float64(tat - arrivedAt)))), + } + + limits[i].Stats.OverLimit.Add(uint64(quantity - previousLimitRemaining)) + limits[i].Stats.NearLimit.Add(uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow))) + + if this.localCache != nil { + err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(utils.NanosecondsToSeconds(-diff))) + if err != nil { + logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) + } + } + continue + } else { + isOverLimit[i] = false + responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limits[i].Limit, + LimitRemaining: uint32(limitRemaining), + DurationUntilReset: utils.NanosecondsToDuration(newTats[i] - arrivedAt), + } + + hitNearLimit := quantity - (utils.MaxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) + if hitNearLimit > 0 { + limits[i].Stats.NearLimit.Add(uint64(hitNearLimit)) + } + } + } + + this.waitGroup.Add(1) + go this.increaseAsync(isOverLimitWithLocalCache, isOverLimit, cacheKeys, expirationSeconds, newTats) + + return responseDescriptorStatuses +} + +func (this *windowedRateLimitCacheImpl) increaseAsync(isOverLimitWithLocalCache []bool, isOverLimit []bool, cacheKeys []limiter.CacheKey, expirationSeconds []int64, newTats []int64) { + defer this.waitGroup.Done() + for i, cacheKey := range cacheKeys { + if cacheKey.Key == "" || isOverLimitWithLocalCache[i] || isOverLimit[i] { + continue + } + + err := this.client.Set(&memcache.Item{ + Key: cacheKey.Key, + Value: []byte(strconv.FormatInt(newTats[i], 10)), + Expiration: int32(expirationSeconds[i]), + }) + + if err != nil { + logger.Errorf("Failed to set key %s: %s", cacheKey.Key, err) + continue + } + } +} + +func (this *windowedRateLimitCacheImpl) Flush() { + this.waitGroup.Wait() +} + +func NewWindowedRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, + expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32) limiter.RateLimitCache { + return &windowedRateLimitCacheImpl{ + client: client, + timeSource: timeSource, + cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + jitterRand: jitterRand, + expirationJitterMaxSeconds: expirationJitterMaxSeconds, + localCache: localCache, + nearLimitRatio: nearLimitRatio, + baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio), + } +} diff --git a/test/memcached/cache_impl_test.go b/test/memcached/fixed_cache_impl_test.go similarity index 100% rename from test/memcached/cache_impl_test.go rename to test/memcached/fixed_cache_impl_test.go diff --git a/test/mocks/memcached/client.go b/test/mocks/memcached/client.go index 433105bd0..4db357e61 100644 --- a/test/mocks/memcached/client.go +++ b/test/mocks/memcached/client.go @@ -47,6 +47,20 @@ func (mr *MockClientMockRecorder) Add(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockClient)(nil).Add), arg0) } +// Set mocks base method +func (m *MockClient) Set(arg0 *memcache.Item) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Set +func (mr *MockClientMockRecorder) set(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockClient)(nil).Add), arg0) +} + // GetMulti mocks base method func (m *MockClient) GetMulti(arg0 []string) (map[string]*memcache.Item, error) { m.ctrl.T.Helper() From 846ba8e375780941d0f2ff0c9573dc6c08be6c76 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Sun, 17 Jan 2021 05:34:19 +0100 Subject: [PATCH 18/31] generate memcached client mock with MockGen Signed-off-by: zufardhiyaulhaq --- test/mocks/memcached/client.go | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/test/mocks/memcached/client.go b/test/mocks/memcached/client.go index 4db357e61..690900e59 100644 --- a/test/mocks/memcached/client.go +++ b/test/mocks/memcached/client.go @@ -47,20 +47,6 @@ func (mr *MockClientMockRecorder) Add(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockClient)(nil).Add), arg0) } -// Set mocks base method -func (m *MockClient) Set(arg0 *memcache.Item) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Add", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Add indicates an expected call of Set -func (mr *MockClientMockRecorder) set(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockClient)(nil).Add), arg0) -} - // GetMulti mocks base method func (m *MockClient) GetMulti(arg0 []string) (map[string]*memcache.Item, error) { m.ctrl.T.Helper() @@ -90,3 +76,17 @@ func (mr *MockClientMockRecorder) Increment(arg0, arg1 interface{}) *gomock.Call mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Increment", reflect.TypeOf((*MockClient)(nil).Increment), arg0, arg1) } + +// Set mocks base method +func (m *MockClient) Set(arg0 *memcache.Item) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Set", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Set indicates an expected call of Set +func (mr *MockClientMockRecorder) Set(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockClient)(nil).Set), arg0) +} From e4e25ccea2df42be81eda19d22b6e7790afb7a59 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Wed, 20 Jan 2021 05:24:45 +0100 Subject: [PATCH 19/31] refactor rate limit decision to algorithm package Signed-off-by: zufardhiyaulhaq --- Makefile | 5 +- src/algorithm/fixed_window.go | 124 ++++++++++-- src/algorithm/ratelimit_algorithm.go | 18 +- src/algorithm/rolling_window.go | 140 ++++++++++++-- src/limiter/base_limiter.go | 178 ------------------ src/limiter/{cache.go => rate_limit_cache.go} | 16 -- src/limiter/rate_limiter.go | 0 src/memcached/cache_impl.go | 13 ++ src/memcached/{ => driver}/client.go | 2 +- src/memcached/fixed_cache_impl.go | 38 ++-- src/memcached/windowed_cache_impl.go | 100 +++------- src/redis/cache_impl.go | 11 +- src/redis/fixed_cache_impl.go | 155 +++------------ src/redis/windowed_cache_impl.go | 162 ++++------------ src/server/server_impl.go | 4 +- .../cache_key_generator.go} | 58 +++--- src/{limiter => utils}/local_cache_stats.go | 24 +-- src/utils/utilities.go | 2 +- test/limiter/base_limiter_test.go | 125 ------------ test/mocks/algorithm/ratelimit_algorithm.go | 36 ++++ 20 files changed, 460 insertions(+), 751 deletions(-) delete mode 100644 src/limiter/base_limiter.go rename src/limiter/{cache.go => rate_limit_cache.go} (73%) create mode 100644 src/limiter/rate_limiter.go rename src/memcached/{ => driver}/client.go (95%) rename src/{limiter/cache_key.go => utils/cache_key_generator.go} (62%) rename src/{limiter => utils}/local_cache_stats.go (98%) delete mode 100644 test/limiter/base_limiter_test.go diff --git a/Makefile b/Makefile index 168aa1a65..4f30d659b 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ export GO111MODULE=on PROJECT = ratelimit -REGISTRY ?= envoyproxy +REGISTRY ?= zufardhiyaulhaq IMAGE := $(REGISTRY)/$(PROJECT) INTEGRATION_IMAGE := $(REGISTRY)/$(PROJECT)_integration MODULE = github.com/envoyproxy/ratelimit @@ -112,7 +112,8 @@ docker_tests: docker run $$(tty -s && echo "-it" || echo) $(INTEGRATION_IMAGE):$(VERSION) .PHONY: docker_image -docker_image: docker_tests +# docker_image: docker_tests +docker_image: docker build . -t $(IMAGE):$(VERSION) .PHONY: docker_push diff --git a/src/algorithm/fixed_window.go b/src/algorithm/fixed_window.go index cb659391e..e0c4c6979 100644 --- a/src/algorithm/fixed_window.go +++ b/src/algorithm/fixed_window.go @@ -1,30 +1,126 @@ package algorithm import ( - pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + "math" + + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/limiter" - redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" + "github.com/envoyproxy/ratelimit/src/utils" + logger "github.com/sirupsen/logrus" + "google.golang.org/protobuf/types/known/durationpb" ) type FixedWindowImpl struct { - now limiter.TimeSource - cacheKeyGenerator limiter.CacheKeyGenerator + timeSource utils.TimeSource + cacheKeyGenerator utils.CacheKeyGenerator + localCache *freecache.Cache + nearLimitRatio float32 +} + +func (fw *FixedWindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus { + if key == "" { + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: nil, + LimitRemaining: 0, + } + } + if isOverLimitWithLocalCache { + fw.PopulateStats(limit, 0, uint64(hitsAddend), uint64(hitsAddend)) + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limit.Limit, + LimitRemaining: 0, + DurationUntilReset: utils.CalculateFixedReset(limit.Limit, fw.timeSource), + } + } + + isOverLimit, limitRemaining, durationUntilReset := fw.IsOverLimit(limit, int64(results), hitsAddend) + if !isOverLimit { + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limit.Limit, + LimitRemaining: uint32(limitRemaining), + DurationUntilReset: durationUntilReset, + } + } else { + if fw.localCache != nil { + err := fw.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limit.Limit.Unit))) + if err != nil { + logger.Errorf("Failing to set local cache key: %s", key) + } + } + + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limit.Limit, + LimitRemaining: uint32(limitRemaining), + DurationUntilReset: durationUntilReset, + } + } +} + +func (fw *FixedWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, *durationpb.Duration) { + limitAfterIncrease := results + limitBeforeIncrease := limitAfterIncrease - int64(hitsAddend) + overLimitThreshold := int64(limit.Limit.RequestsPerUnit) + nearLimitThreshold := int64(math.Floor(float64(float32(overLimitThreshold) * fw.nearLimitRatio))) + + if limitAfterIncrease > overLimitThreshold { + if limitBeforeIncrease >= overLimitThreshold { + fw.PopulateStats(limit, 0, uint64(hitsAddend), 0) + } else { + fw.PopulateStats(limit, uint64(overLimitThreshold-utils.MaxInt64(nearLimitThreshold, limitBeforeIncrease)), uint64(limitAfterIncrease-overLimitThreshold), 0) + } + + return true, 0, utils.CalculateFixedReset(limit.Limit, fw.timeSource) + } else { + if limitAfterIncrease > nearLimitThreshold { + if limitBeforeIncrease >= nearLimitThreshold { + fw.PopulateStats(limit, uint64(hitsAddend), 0, 0) + } else { + fw.PopulateStats(limit, uint64(limitAfterIncrease-nearLimitThreshold), 0, 0) + } + } + + return false, overLimitThreshold - limitAfterIncrease, utils.CalculateFixedReset(limit.Limit, fw.timeSource) + } +} + +func (fw *FixedWindowImpl) IsOverLimitWithLocalCache(key string) bool { + if fw.localCache != nil { + _, err := fw.localCache.Get([]byte(key)) + if err == nil { + return true + } + } + return false } -func (this *FixedWindowImpl) GenerateCacheKey(domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit) limiter.CacheKey { - return this.cacheKeyGenerator.GenerateCacheKey(domain, descriptor, limit, this.now.UnixNow()) +func (fw *FixedWindowImpl) GenerateCacheKeys(request *pb.RateLimitRequest, + limits []*config.RateLimit, hitsAddend int64) []utils.CacheKey { + return fw.cacheKeyGenerator.GenerateCacheKeys(request, limits, uint32(hitsAddend), fw.timeSource.UnixNow()) } -func (this *FixedWindowImpl) AppendPipeline(client redis_driver.Client, pipeline redis_driver.Pipeline, key string, hitsAddend uint32, result interface{}, expirationSeconds int64) redis_driver.Pipeline { - pipeline = client.PipeAppend(pipeline, result, "INCRBY", key, hitsAddend) - pipeline = client.PipeAppend(pipeline, nil, "EXPIRE", key, expirationSeconds) - return pipeline +func (fw *FixedWindowImpl) PopulateStats(limit *config.RateLimit, nearLimit uint64, overLimit uint64, overLimitWithLocalCache uint64) { + limit.Stats.NearLimit.Add(nearLimit) + limit.Stats.OverLimit.Add(overLimit) + limit.Stats.OverLimitWithLocalCache.Add(overLimitWithLocalCache) +} + +func (fw *FixedWindowImpl) GetNewTat() int64 { + return 0 +} +func (fw *FixedWindowImpl) GetArrivedAt() int64 { + return 0 } -func NewFixedWindowAlgorithm(timeSource limiter.TimeSource) *FixedWindowImpl { +func NewFixedWindowAlgorithm(timeSource utils.TimeSource, localCache *freecache.Cache, nearLimitRatio float32) *FixedWindowImpl { return &FixedWindowImpl{ - now: timeSource, - cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + timeSource: timeSource, + cacheKeyGenerator: utils.NewCacheKeyGenerator(), + localCache: localCache, + nearLimitRatio: nearLimitRatio, } } diff --git a/src/algorithm/ratelimit_algorithm.go b/src/algorithm/ratelimit_algorithm.go index aabdd4049..d79820433 100644 --- a/src/algorithm/ratelimit_algorithm.go +++ b/src/algorithm/ratelimit_algorithm.go @@ -1,13 +1,21 @@ package algorithm import ( - pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/limiter" - redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" + "github.com/envoyproxy/ratelimit/src/utils" + "google.golang.org/protobuf/types/known/durationpb" ) type RatelimitAlgorithm interface { - GenerateCacheKey(domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit) limiter.CacheKey - AppendPipeline(client redis_driver.Client, pipeline redis_driver.Pipeline, key string, hitsAddend uint32, result interface{}, expirationSeconds int64) redis_driver.Pipeline + IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, *durationpb.Duration) + IsOverLimitWithLocalCache(key string) bool + + GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus + GetNewTat() int64 + GetArrivedAt() int64 + + GenerateCacheKeys(request *pb.RateLimitRequest, + limits []*config.RateLimit, hitsAddend int64) []utils.CacheKey + PopulateStats(limit *config.RateLimit, nearLimit uint64, overLimit uint64, overLimitWithLocalCache uint64) } diff --git a/src/algorithm/rolling_window.go b/src/algorithm/rolling_window.go index a6b82c67c..cf60bc657 100644 --- a/src/algorithm/rolling_window.go +++ b/src/algorithm/rolling_window.go @@ -1,32 +1,144 @@ package algorithm import ( - pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + "math" + + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/limiter" - redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" + "github.com/envoyproxy/ratelimit/src/utils" + logger "github.com/sirupsen/logrus" + "google.golang.org/protobuf/types/known/durationpb" ) const DummyCacheKeyTime = 0 type RollingWindowImpl struct { - now int64 - cacheKeyGenerator limiter.CacheKeyGenerator + timeSource utils.TimeSource + cacheKeyGenerator utils.CacheKeyGenerator + localCache *freecache.Cache + nearLimitRatio float32 + arrivedAt int64 + newTat int64 +} + +func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus { + if key == "" { + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: nil, + LimitRemaining: 0, + } + } + if isOverLimitWithLocalCache { + rw.PopulateStats(limit, 0, uint64(hitsAddend), uint64(hitsAddend)) + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limit.Limit, + LimitRemaining: 0, + DurationUntilReset: utils.CalculateFixedReset(limit.Limit, rw.timeSource), + } + } + + isOverLimit, limitRemaining, durationUntilReset := rw.IsOverLimit(limit, int64(results), hitsAddend) + if !isOverLimit { + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limit.Limit, + LimitRemaining: uint32(limitRemaining), + DurationUntilReset: durationUntilReset, + } + } else { + if rw.localCache != nil { + err := rw.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limit.Limit.Unit))) + if err != nil { + logger.Errorf("Failing to set local cache key: %s", key) + } + } + + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limit.Limit, + LimitRemaining: uint32(limitRemaining), + DurationUntilReset: durationUntilReset, + } + } +} + +func (rw *RollingWindowImpl) GetNewTat() int64 { + return rw.newTat +} +func (rw *RollingWindowImpl) GetArrivedAt() int64 { + return rw.newTat +} + +func (rw *RollingWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, *durationpb.Duration) { + now := rw.timeSource.UnixNanoNow() + + // Time during computation should be in nanosecond + rw.arrivedAt = now + tat := utils.MaxInt64(results, rw.arrivedAt) + totalLimit := int64(limit.Limit.RequestsPerUnit) + period := utils.SecondsToNanoseconds(utils.UnitToDivider(limit.Limit.Unit)) + quantity := int64(hitsAddend) + + // GCRA computation + // Emission interval is the cost of each request + emissionInterval := period / totalLimit + // Tat is set to current request timestamp if not set before + + // New tat define the end of the window + rw.newTat = tat + emissionInterval*quantity + // We allow the request if it's inside the window + allowAt := rw.newTat - period + diff := rw.arrivedAt - allowAt + + previousAllowAt := tat - period + previousLimitRemaining := int64(math.Ceil(float64((rw.arrivedAt - previousAllowAt) / emissionInterval))) + previousLimitRemaining = utils.MaxInt64(previousLimitRemaining, 0) + nearLimitWindow := int64(math.Ceil(float64(float32(limit.Limit.RequestsPerUnit) * (1.0 - rw.nearLimitRatio)))) + limitRemaining := int64(math.Ceil(float64(diff / emissionInterval))) + hitNearLimit := quantity - (utils.MaxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) + + if diff < 0 { + rw.PopulateStats(limit, uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow)), uint64(quantity-previousLimitRemaining), 0) + + return true, 0, utils.NanosecondsToDuration(int64(math.Ceil(float64(tat - rw.arrivedAt)))) + } else { + if hitNearLimit > 0 { + rw.PopulateStats(limit, uint64(hitNearLimit), 0, 0) + } + + return false, limitRemaining, utils.NanosecondsToDuration(rw.newTat - rw.arrivedAt) + } +} + +func (rw *RollingWindowImpl) IsOverLimitWithLocalCache(key string) bool { + if rw.localCache != nil { + _, err := rw.localCache.Get([]byte(key)) + if err == nil { + return true + } + } + return false } -func (this *RollingWindowImpl) GenerateCacheKey(domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit) limiter.CacheKey { - return this.cacheKeyGenerator.GenerateCacheKey(domain, descriptor, limit, DummyCacheKeyTime) +func (rw *RollingWindowImpl) GenerateCacheKeys(request *pb.RateLimitRequest, + limits []*config.RateLimit, hitsAddend int64) []utils.CacheKey { + return rw.cacheKeyGenerator.GenerateCacheKeys(request, limits, uint32(hitsAddend), DummyCacheKeyTime) } -func (this *RollingWindowImpl) AppendPipeline(client redis_driver.Client, pipeline redis_driver.Pipeline, key string, hitsAddend uint32, result interface{}, expirationSeconds int64) redis_driver.Pipeline { - pipeline = client.PipeAppend(pipeline, nil, "SETNX", key, int64(0)) - pipeline = client.PipeAppend(pipeline, nil, "EXPIRE", key, expirationSeconds) - pipeline = client.PipeAppend(pipeline, result, "GET", key) - return pipeline +func (rw *RollingWindowImpl) PopulateStats(limit *config.RateLimit, nearLimit uint64, overLimit uint64, overLimitWithLocalCache uint64) { + limit.Stats.NearLimit.Add(nearLimit) + limit.Stats.OverLimit.Add(overLimit) + limit.Stats.OverLimitWithLocalCache.Add(overLimitWithLocalCache) } -func NewRollingWindowAlgorithm() *RollingWindowImpl { +func NewRollingWindowAlgorithm(timeSource utils.TimeSource, localCache *freecache.Cache, nearLimitRatio float32) *RollingWindowImpl { return &RollingWindowImpl{ - cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + timeSource: timeSource, + cacheKeyGenerator: utils.NewCacheKeyGenerator(), + localCache: localCache, + nearLimitRatio: nearLimitRatio, } } diff --git a/src/limiter/base_limiter.go b/src/limiter/base_limiter.go deleted file mode 100644 index 9f168c848..000000000 --- a/src/limiter/base_limiter.go +++ /dev/null @@ -1,178 +0,0 @@ -package limiter - -import ( - "math" - "math/rand" - - "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/assert" - "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/utils" - logger "github.com/sirupsen/logrus" -) - -type BaseRateLimiter struct { - timeSource utils.TimeSource - JitterRand *rand.Rand - ExpirationJitterMaxSeconds int64 - cacheKeyGenerator CacheKeyGenerator - localCache *freecache.Cache - nearLimitRatio float32 -} - -type LimitInfo struct { - limit *config.RateLimit - limitBeforeIncrease uint32 - limitAfterIncrease uint32 - nearLimitThreshold uint32 - overLimitThreshold uint32 -} - -func NewRateLimitInfo(limit *config.RateLimit, limitBeforeIncrease uint32, limitAfterIncrease uint32, - nearLimitThreshold uint32, overLimitThreshold uint32) *LimitInfo { - return &LimitInfo{limit: limit, limitBeforeIncrease: limitBeforeIncrease, limitAfterIncrease: limitAfterIncrease, - nearLimitThreshold: nearLimitThreshold, overLimitThreshold: overLimitThreshold} -} - -// Generates cache keys for given rate limit request. Each cache key is represented by a concatenation of -// domain, descriptor and current timestamp. -func (this *BaseRateLimiter) GenerateCacheKeys(request *pb.RateLimitRequest, - limits []*config.RateLimit, hitsAddend uint32) []CacheKey { - assert.Assert(len(request.Descriptors) == len(limits)) - cacheKeys := make([]CacheKey, len(request.Descriptors)) - now := this.timeSource.UnixNow() - for i := 0; i < len(request.Descriptors); i++ { - // generateCacheKey() returns an empty string in the key if there is no limit - // so that we can keep the arrays all the same size. - cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) - // Increase statistics for limits hit by their respective requests. - if limits[i] != nil { - limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) - } - } - return cacheKeys -} - -// Returns `true` in case local cache is enabled and contains value for provided cache key, `false` otherwise. -func (this *BaseRateLimiter) IsOverLimitWithLocalCache(key string) bool { - if this.localCache != nil { - // Get returns the value or not found error. - _, err := this.localCache.Get([]byte(key)) - if err == nil { - return true - } - } - return false -} - -// Generates response descriptor status based on cache key, over the limit with local cache, over the limit and -// near the limit thresholds. Thresholds are checked in order and are mutually exclusive. -func (this *BaseRateLimiter) GetResponseDescriptorStatus(key string, limitInfo *LimitInfo, - isOverLimitWithLocalCache bool, hitsAddend uint32) *pb.RateLimitResponse_DescriptorStatus { - if key == "" { - return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, - nil, 0) - } - if isOverLimitWithLocalCache { - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) - limitInfo.limit.Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) - return this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, - limitInfo.limit.Limit, 0) - } - var responseDescriptorStatus *pb.RateLimitResponse_DescriptorStatus - limitInfo.overLimitThreshold = limitInfo.limit.Limit.RequestsPerUnit - // The nearLimitThreshold is the number of requests that can be made before hitting the nearLimitRatio. - // We need to know it in both the OK and OVER_LIMIT scenarios. - limitInfo.nearLimitThreshold = uint32(math.Floor(float64(float32(limitInfo.overLimitThreshold) * this.nearLimitRatio))) - logger.Debugf("cache key: %s current: %d", key, limitInfo.limitAfterIncrease) - if limitInfo.limitAfterIncrease > limitInfo.overLimitThreshold { - responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OVER_LIMIT, - limitInfo.limit.Limit, 0) - - checkOverLimitThreshold(limitInfo, hitsAddend) - - if this.localCache != nil { - // Set the TTL of the local_cache to be the entire duration. - // Since the cache_key gets changed once the time crosses over current time slot, the over-the-limit - // cache keys in local_cache lose effectiveness. - // For example, if we have an hour limit on all mongo connections, the cache key would be - // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start - // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). - // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. - err := this.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limitInfo.limit.Limit.Unit))) - if err != nil { - logger.Errorf("Failing to set local cache key: %s", key) - } - } - } else { - responseDescriptorStatus = this.generateResponseDescriptorStatus(pb.RateLimitResponse_OK, - limitInfo.limit.Limit, limitInfo.overLimitThreshold-limitInfo.limitAfterIncrease) - - // The limit is OK but we additionally want to know if we are near the limit. - checkNearLimitThreshold(limitInfo, hitsAddend) - } - return responseDescriptorStatus -} - -func NewBaseRateLimit(timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, - localCache *freecache.Cache, nearLimitRatio float32) *BaseRateLimiter { - return &BaseRateLimiter{ - timeSource: timeSource, - JitterRand: jitterRand, - ExpirationJitterMaxSeconds: expirationJitterMaxSeconds, - cacheKeyGenerator: NewCacheKeyGenerator(), - localCache: localCache, - nearLimitRatio: nearLimitRatio, - } -} - -func checkOverLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { - // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to - // assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the - // N hits was over the limit, then all the N hits were over limit. - // Otherwise, only the difference between the current limit value and the over limit threshold - // were over limit hits. - if limitInfo.limitBeforeIncrease >= limitInfo.overLimitThreshold { - limitInfo.limit.Stats.OverLimit.Add(uint64(hitsAddend)) - } else { - limitInfo.limit.Stats.OverLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.overLimitThreshold)) - - // If the limit before increase was below the over limit value, then some of the hits were - // in the near limit range. - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.overLimitThreshold - - utils.MaxUint32(limitInfo.nearLimitThreshold, limitInfo.limitBeforeIncrease))) - } -} - -func checkNearLimitThreshold(limitInfo *LimitInfo, hitsAddend uint32) { - if limitInfo.limitAfterIncrease > limitInfo.nearLimitThreshold { - // Here we also need to assess which portion of the hitsAddend were in the near limit range. - // If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise, - // only the difference between the current limit value and the near limit threshold were near - // limit hits. - if limitInfo.limitBeforeIncrease >= limitInfo.nearLimitThreshold { - limitInfo.limit.Stats.NearLimit.Add(uint64(hitsAddend)) - } else { - limitInfo.limit.Stats.NearLimit.Add(uint64(limitInfo.limitAfterIncrease - limitInfo.nearLimitThreshold)) - } - } -} - -func (this *BaseRateLimiter) generateResponseDescriptorStatus(responseCode pb.RateLimitResponse_Code, - limit *pb.RateLimitResponse_RateLimit, limitRemaining uint32) *pb.RateLimitResponse_DescriptorStatus { - if limit != nil { - return &pb.RateLimitResponse_DescriptorStatus{ - Code: responseCode, - CurrentLimit: limit, - LimitRemaining: limitRemaining, - DurationUntilReset: utils.CalculateReset(limit, this.timeSource), - } - } else { - return &pb.RateLimitResponse_DescriptorStatus{ - Code: responseCode, - CurrentLimit: limit, - LimitRemaining: limitRemaining, - } - } -} diff --git a/src/limiter/cache.go b/src/limiter/rate_limit_cache.go similarity index 73% rename from src/limiter/cache.go rename to src/limiter/rate_limit_cache.go index 59a79a6ba..5ca07edea 100644 --- a/src/limiter/cache.go +++ b/src/limiter/rate_limit_cache.go @@ -6,22 +6,6 @@ import ( "golang.org/x/net/context" ) -// Interface for a time source. -type TimeSource interface { - // @return the current unix time in seconds. - UnixNow() int64 - // @return the current unix time in nanoseconds. - UnixNanoNow() int64 -} - -// Interface for a rand Source for expiration jitter. -type JitterRandSource interface { - // @return a non-negative pseudo-random 63-bit integer as an int64. - Int63() int64 - // @param seed initializes pseudo-random generator to a deterministic state. - Seed(seed int64) -} - // Interface for interacting with a cache backend for rate limiting. type RateLimitCache interface { // Contact the cache and perform rate limiting for a set of descriptors and limits. diff --git a/src/limiter/rate_limiter.go b/src/limiter/rate_limiter.go new file mode 100644 index 000000000..e69de29bb diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 36c90a464..bef5e71dc 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -21,6 +21,7 @@ import ( "github.com/bradfitz/gomemcache/memcache" "github.com/coocood/freecache" + "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/settings" "github.com/envoyproxy/ratelimit/src/utils" @@ -30,6 +31,11 @@ import ( func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, localCache *freecache.Cache, scope stats.Scope) (limiter.RateLimitCache, error) { if s.RateLimitAlgorithm == settings.FixedRateLimit { + ratelimitAlgorithm := algorithm.NewFixedWindowAlgorithm( + timeSource, + localCache, + s.NearLimitRatio, + ) return NewFixedRateLimitCacheImpl( memcache.New(s.MemcacheHostPort), timeSource, @@ -38,9 +44,15 @@ func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.Tim localCache, scope, s.NearLimitRatio, + ratelimitAlgorithm, ), nil } if s.RateLimitAlgorithm == settings.WindowedRateLimit { + ratelimitAlgorithm := algorithm.NewRollingWindowAlgorithm( + timeSource, + localCache, + s.NearLimitRatio, + ) return NewWindowedRateLimitCacheImpl( memcache.New(s.MemcacheHostPort), timeSource, @@ -49,6 +61,7 @@ func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.Tim localCache, scope, s.NearLimitRatio, + ratelimitAlgorithm, ), nil } return nil, fmt.Errorf("Unknown rate limit algorithm. %s\n", s.RateLimitAlgorithm) diff --git a/src/memcached/client.go b/src/memcached/driver/client.go similarity index 95% rename from src/memcached/client.go rename to src/memcached/driver/client.go index 031c341c4..7cbcbc1cb 100644 --- a/src/memcached/client.go +++ b/src/memcached/driver/client.go @@ -1,4 +1,4 @@ -package memcached +package driver import ( "github.com/bradfitz/gomemcache/memcache" diff --git a/src/memcached/fixed_cache_impl.go b/src/memcached/fixed_cache_impl.go index 677ef4a17..1e4c1e007 100644 --- a/src/memcached/fixed_cache_impl.go +++ b/src/memcached/fixed_cache_impl.go @@ -9,23 +9,25 @@ import ( "github.com/bradfitz/gomemcache/memcache" "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/memcached/driver" "github.com/envoyproxy/ratelimit/src/utils" stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" ) type fixedRateLimitCacheImpl struct { - client Client + client driver.Client timeSource utils.TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 - cacheKeyGenerator limiter.CacheKeyGenerator + cacheKeyGenerator utils.CacheKeyGenerator localCache *freecache.Cache waitGroup sync.WaitGroup nearLimitRatio float32 - baseRateLimiter *limiter.BaseRateLimiter + algorithm algorithm.RatelimitAlgorithm } var _ limiter.RateLimitCache = (*fixedRateLimitCacheImpl)(nil) @@ -38,13 +40,12 @@ func (this *fixedRateLimitCacheImpl) DoLimit( logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.MaxUint32(1, request.HitsAddend) + hitsAddend := utils.MinInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.baseRateLimiter.GenerateCacheKeys(request, limits, hitsAddend) + cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) - keysToGet := make([]string, 0, len(request.Descriptors)) for i, cacheKey := range cacheKeys { @@ -53,7 +54,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( } // Check if key is over the limit in local cache. - if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { + if this.algorithm.IsOverLimitWithLocalCache(cacheKey.Key) { isOverLimitWithLocalCache[i] = true logger.Debugf("cache key is over the limit: %s", cacheKey.Key) continue @@ -78,25 +79,19 @@ func (this *fixedRateLimitCacheImpl) DoLimit( } for i, cacheKey := range cacheKeys { - rawMemcacheValue, ok := memcacheValues[cacheKey.Key] - var limitBeforeIncrease uint32 + var result int64 if ok { decoded, err := strconv.ParseInt(string(rawMemcacheValue.Value), 10, 32) if err != nil { logger.Errorf("Unexpected non-numeric value in memcached: %v", rawMemcacheValue) } else { - limitBeforeIncrease = uint32(decoded) + result = decoded } } - limitAfterIncrease := limitBeforeIncrease + hitsAddend - - limitInfo := limiter.NewRateLimitInfo(limits[i], limitBeforeIncrease, limitAfterIncrease, 0, 0) - - responseDescriptorStatuses[i] = this.baseRateLimiter.GetResponseDescriptorStatus(cacheKey.Key, - limitInfo, isOverLimitWithLocalCache[i], hitsAddend) + responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], result, isOverLimitWithLocalCache[i], int64(hitsAddend)) } this.waitGroup.Add(1) @@ -105,7 +100,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( return responseDescriptorStatuses } -func (this *fixedRateLimitCacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, isOverLimitWithLocalCache []bool, +func (this *fixedRateLimitCacheImpl) increaseAsync(cacheKeys []utils.CacheKey, isOverLimitWithLocalCache []bool, limits []*config.RateLimit, hitsAddend uint64) { defer this.waitGroup.Done() for i, cacheKey := range cacheKeys { @@ -126,6 +121,7 @@ func (this *fixedRateLimitCacheImpl) increaseAsync(cacheKeys []limiter.CacheKey, Value: []byte(strconv.FormatUint(hitsAddend, 10)), Expiration: int32(expirationSeconds), }) + if err == memcache.ErrNotStored { // There was a race condition to do this add. We should be able to increment // now instead. @@ -149,16 +145,16 @@ func (this *fixedRateLimitCacheImpl) Flush() { this.waitGroup.Wait() } -func NewFixedRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32) limiter.RateLimitCache { +func NewFixedRateLimitCacheImpl(client driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, + expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, timeSource: timeSource, - cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + cacheKeyGenerator: utils.NewCacheKeyGenerator(), jitterRand: jitterRand, expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio), + algorithm: algorithm, } } diff --git a/src/memcached/windowed_cache_impl.go b/src/memcached/windowed_cache_impl.go index 86b94be06..fa0fb919e 100644 --- a/src/memcached/windowed_cache_impl.go +++ b/src/memcached/windowed_cache_impl.go @@ -2,7 +2,6 @@ package memcached import ( "context" - "math" "math/rand" "strconv" "sync" @@ -10,24 +9,25 @@ import ( "github.com/bradfitz/gomemcache/memcache" "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/assert" + "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/memcached/driver" "github.com/envoyproxy/ratelimit/src/utils" stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" ) type windowedRateLimitCacheImpl struct { - client Client + client driver.Client timeSource utils.TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 - cacheKeyGenerator limiter.CacheKeyGenerator + cacheKeyGenerator utils.CacheKeyGenerator localCache *freecache.Cache waitGroup sync.WaitGroup nearLimitRatio float32 - baseRateLimiter *limiter.BaseRateLimiter + algorithm algorithm.RatelimitAlgorithm } var _ limiter.RateLimitCache = (*windowedRateLimitCacheImpl)(nil) @@ -40,20 +40,10 @@ func (this *windowedRateLimitCacheImpl) DoLimit( logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.MaxUint32(1, request.HitsAddend) + hitsAddend := utils.MinInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. - assert.Assert(len(request.Descriptors) == len(limits)) - cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) - for i := 0; i < len(request.Descriptors); i++ { - cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey( - request.Domain, request.Descriptors[i], limits[i], 0) - - // Increase statistics for limits hit by their respective requests. - if limits[i] != nil { - limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) - } - } + cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) keysToGet := make([]string, 0, len(request.Descriptors)) @@ -64,7 +54,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } // Check if key is over the limit in local cache. - if this.baseRateLimiter.IsOverLimitWithLocalCache(cacheKey.Key) { + if this.algorithm.IsOverLimitWithLocalCache(cacheKey.Key) { isOverLimitWithLocalCache[i] = true logger.Debugf("cache key is over the limit: %s", cacheKey.Key) continue @@ -74,7 +64,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( keysToGet = append(keysToGet, cacheKey.Key) } - // Now fetch from memcache. + // Now fetch from memcached. responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) @@ -89,12 +79,10 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } newTats := make([]int64, len(cacheKeys)) - expirationSeconds := make([]int64, len(cacheKeys)) isOverLimit := make([]bool, len(cacheKeys)) - now := this.timeSource.UnixNanoNow() + expirationSeconds := make([]int64, len(cacheKeys)) for i, cacheKey := range cacheKeys { - rawMemcacheValue, ok := memcacheValues[cacheKey.Key] var tat int64 if ok { @@ -102,65 +90,23 @@ func (this *windowedRateLimitCacheImpl) DoLimit( if err != nil { logger.Errorf("Unexpected non-numeric value in memcached: %v", rawMemcacheValue) } - } else { - tat = now } - limit := int64(limits[i].Limit.RequestsPerUnit) - period := utils.SecondsToNanoseconds(utils.UnitToDivider(limits[i].Limit.Unit)) - quantity := int64(hitsAddend) - arrivedAt := now + responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], tat, isOverLimitWithLocalCache[i], int64(hitsAddend)) - emissionInterval := period / limit - tat = utils.MaxInt64(tat, arrivedAt) - newTats[i] = tat + emissionInterval*quantity - allowAt := newTats[i] - period - diff := arrivedAt - allowAt + if responseDescriptorStatuses[i].Code == pb.RateLimitResponse_OVER_LIMIT { + isOverLimit[i] = true + } else { + isOverLimit[i] = false + } - previousAllowAt := tat - period - previousLimitRemaining := int64(math.Ceil(float64((arrivedAt - previousAllowAt) / emissionInterval))) - previousLimitRemaining = utils.MaxInt64(previousLimitRemaining, 0) - nearLimitWindow := int64(math.Ceil(float64(float32(limits[i].Limit.RequestsPerUnit) * (1.0 - this.nearLimitRatio)))) - limitRemaining := int64(math.Ceil(float64(diff / emissionInterval))) + arrivedAt := this.algorithm.GetArrivedAt() + newTats[i] = this.algorithm.GetNewTat() expirationSeconds[i] = utils.NanosecondsToSeconds(newTats[i]-arrivedAt) + 1 if this.expirationJitterMaxSeconds > 0 { expirationSeconds[i] += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) } - - if diff < 0 { - isOverLimit[i] = true - responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: limits[i].Limit, - LimitRemaining: 0, - DurationUntilReset: utils.NanosecondsToDuration(int64(math.Ceil(float64(tat - arrivedAt)))), - } - - limits[i].Stats.OverLimit.Add(uint64(quantity - previousLimitRemaining)) - limits[i].Stats.NearLimit.Add(uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow))) - - if this.localCache != nil { - err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(utils.NanosecondsToSeconds(-diff))) - if err != nil { - logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) - } - } - continue - } else { - isOverLimit[i] = false - responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: limits[i].Limit, - LimitRemaining: uint32(limitRemaining), - DurationUntilReset: utils.NanosecondsToDuration(newTats[i] - arrivedAt), - } - - hitNearLimit := quantity - (utils.MaxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) - if hitNearLimit > 0 { - limits[i].Stats.NearLimit.Add(uint64(hitNearLimit)) - } - } } this.waitGroup.Add(1) @@ -169,7 +115,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( return responseDescriptorStatuses } -func (this *windowedRateLimitCacheImpl) increaseAsync(isOverLimitWithLocalCache []bool, isOverLimit []bool, cacheKeys []limiter.CacheKey, expirationSeconds []int64, newTats []int64) { +func (this *windowedRateLimitCacheImpl) increaseAsync(isOverLimitWithLocalCache []bool, isOverLimit []bool, cacheKeys []utils.CacheKey, expirationSeconds []int64, newTats []int64) { defer this.waitGroup.Done() for i, cacheKey := range cacheKeys { if cacheKey.Key == "" || isOverLimitWithLocalCache[i] || isOverLimit[i] { @@ -193,16 +139,16 @@ func (this *windowedRateLimitCacheImpl) Flush() { this.waitGroup.Wait() } -func NewWindowedRateLimitCacheImpl(client Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32) limiter.RateLimitCache { +func NewWindowedRateLimitCacheImpl(client driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, + expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { return &windowedRateLimitCacheImpl{ client: client, timeSource: timeSource, - cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + cacheKeyGenerator: utils.NewCacheKeyGenerator(), jitterRand: jitterRand, expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - baseRateLimiter: limiter.NewBaseRateLimit(timeSource, jitterRand, expirationJitterMaxSeconds, localCache, nearLimitRatio), + algorithm: algorithm, } } diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 02a8b07b1..a9df77ae6 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -11,9 +11,10 @@ import ( "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/server" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/utils" ) -func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) (limiter.RateLimitCache, error) { +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) (limiter.RateLimitCache, error) { var perSecondPool driver.Client if s.RedisPerSecond { perSecondPool = driver.NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, @@ -26,6 +27,8 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca if s.RateLimitAlgorithm == settings.FixedRateLimit { ratelimitAlgorithm := algorithm.NewFixedWindowAlgorithm( timeSource, + localCache, + s.NearLimitRatio, ) return NewFixedRateLimitCacheImpl( otherPool, @@ -38,7 +41,11 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca ratelimitAlgorithm), nil } if s.RateLimitAlgorithm == settings.WindowedRateLimit { - ratelimitAlgorithm := algorithm.NewRollingWindowAlgorithm() + ratelimitAlgorithm := algorithm.NewRollingWindowAlgorithm( + timeSource, + localCache, + s.NearLimitRatio, + ) return NewWindowedRateLimitCacheImpl( otherPool, perSecondPool, diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index c2dcb68d3..a016e7d45 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -1,13 +1,11 @@ package redis import ( - "math" "math/rand" "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/algorithm" - "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis/driver" @@ -23,20 +21,15 @@ type fixedRateLimitCacheImpl struct { // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. perSecondClient driver.Client - timeSource limiter.TimeSource + timeSource utils.TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 - cacheKeyGenerator limiter.CacheKeyGenerator + cacheKeyGenerator utils.CacheKeyGenerator localCache *freecache.Cache nearLimitRatio float32 algorithm algorithm.RatelimitAlgorithm } -func pipelineAppend(client driver.Client, pipeline *driver.Pipeline, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) { - *pipeline = client.PipeAppend(*pipeline, result, "INCRBY", key, hitsAddend) - *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) -} - func (this *fixedRateLimitCacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, @@ -45,25 +38,13 @@ func (this *fixedRateLimitCacheImpl) DoLimit( logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.MaxUint32(1, request.HitsAddend) - - // First build a list of all cache keys that we are actually going to hit. GenerateCacheKey() - // returns an empty string in the key if there is no limit so that we can keep the arrays - // all the same size. - assert.Assert(len(request.Descriptors) == len(limits)) - cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) - for i := 0; i < len(request.Descriptors); i++ { - cacheKeys[i] = this.algorithm.GenerateCacheKey( - request.Domain, request.Descriptors[i], limits[i]) - - // Increase statistics for limits hit by their respective requests. - if limits[i] != nil { - limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) - } - } + hitsAddend := utils.MinInt64(1, int64(request.HitsAddend)) + + // First build a list of all cache keys that we are actually going to hit. + cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) - results := make([]uint32, len(request.Descriptors)) + results := make([]int64, len(request.Descriptors)) var pipeline, perSecondPipeline driver.Pipeline // Now, actually setup the pipeline, skipping empty cache keys. @@ -72,15 +53,11 @@ func (this *fixedRateLimitCacheImpl) DoLimit( continue } - // not sure about this code - if this.localCache != nil { - // Get returns the value or not found error. - _, err := this.localCache.Get([]byte(cacheKey.Key)) - if err == nil { - isOverLimitWithLocalCache[i] = true - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) - continue - } + // Check if key is over the limit in local cache. + if this.algorithm.IsOverLimitWithLocalCache(cacheKey.Key) { + isOverLimitWithLocalCache[i] = true + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + continue } expirationSeconds := utils.UnitToDivider(limits[i].Limit.Unit) @@ -93,14 +70,12 @@ func (this *fixedRateLimitCacheImpl) DoLimit( if perSecondPipeline == nil { perSecondPipeline = driver.Pipeline{} } - perSecondPipeline = this.algorithm.AppendPipeline(this.perSecondClient, perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) - //pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + fixedPipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) } else { if pipeline == nil { pipeline = driver.Pipeline{} } - pipeline = this.algorithm.AppendPipeline(this.client, pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) - //pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) + fixedPipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) } } @@ -114,98 +89,9 @@ func (this *fixedRateLimitCacheImpl) DoLimit( // Now fetch the pipeline. responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) - for i, cacheKey := range cacheKeys { - if cacheKey.Key == "" { - responseDescriptorStatuses[i] = - &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: nil, - LimitRemaining: 0, - } - continue - } - if isOverLimitWithLocalCache[i] { - responseDescriptorStatuses[i] = - &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: limits[i].Limit, - LimitRemaining: 0, - DurationUntilReset: utils.CalculateReset(limits[i].Limit, this.timeSource), - } - limits[i].Stats.OverLimit.Add(uint64(hitsAddend)) - limits[i].Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) - continue - } - - limitAfterIncrease := results[i] - limitBeforeIncrease := limitAfterIncrease - hitsAddend - overLimitThreshold := limits[i].Limit.RequestsPerUnit - // The nearLimitThreshold is the number of requests that can be made before hitting the NearLimitRatio. - // We need to know it in both the OK and OVER_LIMIT scenarios. - nearLimitThreshold := uint32(math.Floor(float64(float32(overLimitThreshold) * this.nearLimitRatio))) - - logger.Debugf("cache key: %s current: %d", cacheKey.Key, limitAfterIncrease) - if limitAfterIncrease > overLimitThreshold { - responseDescriptorStatuses[i] = - &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: limits[i].Limit, - LimitRemaining: 0, - DurationUntilReset: utils.CalculateReset(limits[i].Limit, this.timeSource), - } - - // Increase over limit statistics. Because we support += behavior for increasing the limit, we need to - // assess if the entire hitsAddend were over the limit. That is, if the limit's value before adding the - // N hits was over the limit, then all the N hits were over limit. - // Otherwise, only the difference between the current limit value and the over limit threshold - // were over limit hits. - if limitBeforeIncrease >= overLimitThreshold { - limits[i].Stats.OverLimit.Add(uint64(hitsAddend)) - } else { - limits[i].Stats.OverLimit.Add(uint64(limitAfterIncrease - overLimitThreshold)) - - // If the limit before increase was below the over limit value, then some of the hits were - // in the near limit range. - limits[i].Stats.NearLimit.Add(uint64(overLimitThreshold - utils.MaxUint32(nearLimitThreshold, limitBeforeIncrease))) - } - if this.localCache != nil { - // Set the TTL of the local_cache to be the entire duration. - // Since the cache_key gets changed once the time crosses over current time slot, the over-the-limit - // cache keys in local_cache lose effectiveness. - // For example, if we have an hour limit on all mongo connections, the cache key would be - // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start - // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). - // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. - err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(utils.UnitToDivider(limits[i].Limit.Unit))) - if err != nil { - logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) - } - } - } else { - responseDescriptorStatuses[i] = - &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: limits[i].Limit, - LimitRemaining: overLimitThreshold - limitAfterIncrease, - DurationUntilReset: utils.CalculateReset(limits[i].Limit, this.timeSource), - } - - // The limit is OK but we additionally want to know if we are near the limit. - if limitAfterIncrease > nearLimitThreshold { - // Here we also need to assess which portion of the hitsAddend were in the near limit range. - // If all the hits were over the nearLimitThreshold, then all hits are near limit. Otherwise, - // only the difference between the current limit value and the near limit threshold were near - // limit hits. - if limitBeforeIncrease >= nearLimitThreshold { - // if before increasing the limit number in redis, the data count recorded in redis its already more than the threshold, we can add nearlimit metrics by number of hits - limits[i].Stats.NearLimit.Add(uint64(hitsAddend)) - } else { - // if not, subtracting limit after increase with the threshold. - limits[i].Stats.NearLimit.Add(uint64(limitAfterIncrease - nearLimitThreshold)) - } - } - } + for i, cacheKey := range cacheKeys { + responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], int64(results[i]), isOverLimitWithLocalCache[i], int64(hitsAddend)) } return responseDescriptorStatuses @@ -213,14 +99,19 @@ func (this *fixedRateLimitCacheImpl) DoLimit( func (this *fixedRateLimitCacheImpl) Flush() {} -func NewFixedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { +func fixedPipelineAppend(client driver.Client, pipeline *driver.Pipeline, key string, hitsAddend int64, result *int64, expirationSeconds int64) { + *pipeline = client.PipeAppend(*pipeline, result, "INCRBY", key, hitsAddend) + *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) +} + +func NewFixedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, timeSource: timeSource, jitterRand: jitterRand, expirationJitterMaxSeconds: expirationJitterMaxSeconds, - cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + cacheKeyGenerator: utils.NewCacheKeyGenerator(), localCache: localCache, nearLimitRatio: nearLimitRatio, algorithm: algorithm, diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index e22fd749b..77206092a 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -1,18 +1,15 @@ package redis import ( - "math" "math/rand" "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/algorithm" - "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/utils" - "github.com/golang/protobuf/ptypes/duration" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -33,27 +30,15 @@ type windowedRateLimitCacheImpl struct { // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. perSecondClient driver.Client - timeSource limiter.TimeSource + timeSource utils.TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 - cacheKeyGenerator limiter.CacheKeyGenerator + cacheKeyGenerator utils.CacheKeyGenerator localCache *freecache.Cache nearLimitRatio float32 algorithm algorithm.RatelimitAlgorithm } -func windowedPipelineAppend(client driver.Client, pipeline *driver.Pipeline, key string, result *int64, expirationSeconds int64) { - *pipeline = client.PipeAppend(*pipeline, nil, "SETNX", key, int64(0)) - *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) - *pipeline = client.PipeAppend(*pipeline, result, "GET", key) -} - -// store new tat (Theoretical arrival time) -func windowedSetNewTatPipelineAppend(client driver.Client, pipeline *driver.Pipeline, key string, newTat int64, expirationSeconds int64) { - *pipeline = client.PipeAppend(*pipeline, nil, "SET", key, newTat) - *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) -} - func (this *windowedRateLimitCacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, @@ -62,39 +47,26 @@ func (this *windowedRateLimitCacheImpl) DoLimit( logger.Debugf("starting windowed cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.MaxUint32(1, request.HitsAddend) - - // First build a list of all cache keys that we are actually going to hit. GenerateCacheKey() - // returns an empty string in the key if there is no limit so that we can keep the arrays - // all the same size. - assert.Assert(len(request.Descriptors) == len(limits)) - cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) - for i := 0; i < len(request.Descriptors); i++ { - cacheKeys[i] = this.algorithm.GenerateCacheKey( - request.Domain, request.Descriptors[i], limits[i]) - - // Increase statistics for limits hit by their respective requests. - if limits[i] != nil { - limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) - } - } + hitsAddend := utils.MinInt64(1, int64(request.HitsAddend)) + + // First build a list of all cache keys that we are actually going to hit. + cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) tats := make([]int64, len(request.Descriptors)) var pipeline, perSecondPipeline driver.Pipeline + + // Now, actually setup the pipeline, skipping empty cache keys. for i, cacheKey := range cacheKeys { if cacheKey.Key == "" { continue } - if this.localCache != nil { - // Get returns the value or not found error. - _, err := this.localCache.Get([]byte(cacheKey.Key)) - if err == nil { - isOverLimitWithLocalCache[i] = true - logger.Debugf("cache key is over the limit: %s", cacheKey.Key) - continue - } + // Check if key is over the limit in local cache. + if this.algorithm.IsOverLimitWithLocalCache(cacheKey.Key) { + isOverLimitWithLocalCache[i] = true + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) + continue } logger.Debugf("looking up tat for cache key: %s", cacheKey.Key) @@ -106,14 +78,12 @@ func (this *windowedRateLimitCacheImpl) DoLimit( if perSecondPipeline == nil { perSecondPipeline = driver.Pipeline{} } - perSecondPipeline = this.algorithm.AppendPipeline(this.perSecondClient, perSecondPipeline, cacheKey.Key, hitsAddend, &tats[i], expirationSeconds) - //windowedPipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, &tats[i], expirationSeconds) + windowedPipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, &tats[i], expirationSeconds) } else { if pipeline == nil { pipeline = driver.Pipeline{} } - pipeline = this.algorithm.AppendPipeline(this.client, pipeline, cacheKey.Key, hitsAddend, &tats[i], expirationSeconds) - //windowedPipelineAppend(this.client, &pipeline, cacheKey.Key, &tats[i], expirationSeconds) + windowedPipelineAppend(this.client, &pipeline, cacheKey.Key, &tats[i], expirationSeconds) } } @@ -127,89 +97,12 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) - now := this.timeSource.UnixNanoNow() - for i, cacheKey := range cacheKeys { - if cacheKey.Key == "" { - responseDescriptorStatuses[i] = - &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: nil, - LimitRemaining: 0, - } - continue - } - - if isOverLimitWithLocalCache[i] { - secondsToReset := utils.UnitToDivider(limits[i].Limit.Unit) - secondsToReset -= utils.NanosecondsToSeconds(now) % secondsToReset - responseDescriptorStatuses[i] = - &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: limits[i].Limit, - LimitRemaining: 0, - DurationUntilReset: &duration.Duration{Seconds: secondsToReset}, - } - limits[i].Stats.OverLimit.Add(uint64(hitsAddend)) - limits[i].Stats.OverLimitWithLocalCache.Add(uint64(hitsAddend)) - continue - } - - // Time during computation should be in nanosecond - limit := int64(limits[i].Limit.RequestsPerUnit) - period := utils.SecondsToNanoseconds(utils.UnitToDivider(limits[i].Limit.Unit)) - quantity := int64(hitsAddend) - arrivedAt := now - - // GCRA computation - // Emission interval is the cost of each request - emissionInterval := period / limit - // Tat is set to current request timestamp if not set before - tat := utils.MaxInt64(tats[i], arrivedAt) - // New tat define the end of the window - newTat := tat + emissionInterval*quantity - // We allow the request if it's inside the window - allowAt := newTat - period - diff := arrivedAt - allowAt - - previousAllowAt := tat - period - previousLimitRemaining := int64(math.Ceil(float64((arrivedAt - previousAllowAt) / emissionInterval))) - previousLimitRemaining = utils.MaxInt64(previousLimitRemaining, 0) - nearLimitWindow := int64(math.Ceil(float64(float32(limits[i].Limit.RequestsPerUnit) * (1.0 - this.nearLimitRatio)))) - limitRemaining := int64(math.Ceil(float64(diff / emissionInterval))) - - if diff < 0 { - responseDescriptorStatuses[i] = - &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: limits[i].Limit, - LimitRemaining: 0, - DurationUntilReset: utils.NanosecondsToDuration(int64(math.Ceil(float64(tat - arrivedAt)))), - } - - limits[i].Stats.OverLimit.Add(uint64(quantity - previousLimitRemaining)) - limits[i].Stats.NearLimit.Add(uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow))) - - if this.localCache != nil { - err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(utils.NanosecondsToSeconds(-diff))) - if err != nil { - logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) - } - } - continue - } - responseDescriptorStatuses[i] = - &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: limits[i].Limit, - LimitRemaining: uint32(limitRemaining), - DurationUntilReset: utils.NanosecondsToDuration(newTat - arrivedAt), - } + for i, cacheKey := range cacheKeys { + responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], int64(tats[i]), isOverLimitWithLocalCache[i], int64(hitsAddend)) - hitNearLimit := quantity - (utils.MaxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) - if hitNearLimit > 0 { - limits[i].Stats.NearLimit.Add(uint64(hitNearLimit)) - } + arrivedAt := this.algorithm.GetArrivedAt() + newTat := this.algorithm.GetNewTat() // Store new tat for initial tat of next requests expirationSeconds := utils.NanosecondsToSeconds(newTat-arrivedAt) + 1 @@ -228,25 +121,38 @@ func (this *windowedRateLimitCacheImpl) DoLimit( windowedSetNewTatPipelineAppend(this.client, &pipeline, cacheKey.Key, newTat, expirationSeconds) } } + if pipeline != nil { driver.CheckError(this.client.PipeDo(pipeline)) } if perSecondPipeline != nil { driver.CheckError(this.perSecondClient.PipeDo(perSecondPipeline)) } + return responseDescriptorStatuses } func (this *windowedRateLimitCacheImpl) Flush() {} -func NewWindowedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { +func windowedPipelineAppend(client driver.Client, pipeline *driver.Pipeline, key string, result *int64, expirationSeconds int64) { + *pipeline = client.PipeAppend(*pipeline, nil, "SETNX", key, int64(0)) + *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) + *pipeline = client.PipeAppend(*pipeline, result, "GET", key) +} + +func windowedSetNewTatPipelineAppend(client driver.Client, pipeline *driver.Pipeline, key string, newTat int64, expirationSeconds int64) { + *pipeline = client.PipeAppend(*pipeline, nil, "SET", key, newTat) + *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) +} + +func NewWindowedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { return &windowedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, timeSource: timeSource, jitterRand: jitterRand, expirationJitterMaxSeconds: expirationJitterMaxSeconds, - cacheKeyGenerator: limiter.NewCacheKeyGenerator(), + cacheKeyGenerator: utils.NewCacheKeyGenerator(), localCache: localCache, nearLimitRatio: nearLimitRatio, algorithm: algorithm, diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 8652bf5d8..3c3604ee4 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -18,8 +18,8 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/utils" "github.com/golang/protobuf/jsonpb" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" @@ -177,7 +177,7 @@ func newServer(name string, store stats.Store, localCache *freecache.Cache, opts ret.scope = ret.store.Scope(name) ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) if localCache != nil { - ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) + ret.store.AddStatGenerator(utils.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) } // setup runtime diff --git a/src/limiter/cache_key.go b/src/utils/cache_key_generator.go similarity index 62% rename from src/limiter/cache_key.go rename to src/utils/cache_key_generator.go index a2746f6b3..63dc28d56 100644 --- a/src/limiter/cache_key.go +++ b/src/utils/cache_key_generator.go @@ -1,4 +1,4 @@ -package limiter +package utils import ( "bytes" @@ -7,41 +7,45 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/utils" ) -type CacheKeyGenerator struct { - // bytes.Buffer pool used to efficiently generate cache keys. - bufferPool sync.Pool -} - -func NewCacheKeyGenerator() CacheKeyGenerator { - return CacheKeyGenerator{bufferPool: sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - }} -} - type CacheKey struct { Key string // True if the key corresponds to a limit with a SECOND unit. False otherwise. PerSecond bool } -func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { - return unit == pb.RateLimitResponse_RateLimit_SECOND +type CacheKeyGenerator struct { + // bytes.Buffer pool used to efficiently generate cache keys. + bufferPool sync.Pool +} + +func (this *CacheKeyGenerator) GenerateCacheKeys(request *pb.RateLimitRequest, + limits []*config.RateLimit, hitsAddend uint32, time int64) []CacheKey { + assert.Assert(len(request.Descriptors) == len(limits)) + cacheKeys := make([]CacheKey, len(request.Descriptors)) + for i := 0; i < len(request.Descriptors); i++ { + // generateCacheKey() returns an empty string in the key if there is no limit + // so that we can keep the arrays all the same size. + cacheKeys[i] = this.GenerateCacheKey(request.Domain, request.Descriptors[i], limits[i], time) + // Increase statistics for limits hit by their respective requests. + if limits[i] != nil { + limits[i].Stats.TotalHits.Add(uint64(hitsAddend)) + } + } + return cacheKeys } // Generate a cache key for a limit lookup. // @param domain supplies the cache key domain. // @param descriptor supplies the descriptor to generate the key for. // @param limit supplies the rate limit to generate the key for (may be nil). -// @param now supplies the current unix time. +// @param time supplies the current unix time. // @return CacheKey struct. func (this *CacheKeyGenerator) GenerateCacheKey( - domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) CacheKey { + domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, time int64) CacheKey { if limit == nil { return CacheKey{ @@ -64,10 +68,22 @@ func (this *CacheKeyGenerator) GenerateCacheKey( b.WriteByte('_') } - divider := utils.UnitToDivider(limit.Limit.Unit) - b.WriteString(strconv.FormatInt((now/divider)*divider, 10)) + divider := UnitToDivider(limit.Limit.Unit) + b.WriteString(strconv.FormatInt((time/divider)*divider, 10)) return CacheKey{ Key: b.String(), PerSecond: isPerSecondLimit(limit.Limit.Unit)} } + +func NewCacheKeyGenerator() CacheKeyGenerator { + return CacheKeyGenerator{bufferPool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }} +} + +func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { + return unit == pb.RateLimitResponse_RateLimit_SECOND +} diff --git a/src/limiter/local_cache_stats.go b/src/utils/local_cache_stats.go similarity index 98% rename from src/limiter/local_cache_stats.go rename to src/utils/local_cache_stats.go index d0d59dc27..be3f93899 100644 --- a/src/limiter/local_cache_stats.go +++ b/src/utils/local_cache_stats.go @@ -1,4 +1,4 @@ -package limiter +package utils import ( "github.com/coocood/freecache" @@ -17,6 +17,17 @@ type localCacheStats struct { overwriteCount stats.Gauge } +func (stats localCacheStats) GenerateStats() { + stats.evacuateCount.Set(uint64(stats.cache.EvacuateCount())) + stats.expiredCount.Set(uint64(stats.cache.ExpiredCount())) + stats.entryCount.Set(uint64(stats.cache.EntryCount())) + stats.averageAccessTime.Set(uint64(stats.cache.AverageAccessTime())) + stats.hitCount.Set(uint64(stats.cache.HitCount())) + stats.missCount.Set(uint64(stats.cache.MissCount())) + stats.lookupCount.Set(uint64(stats.cache.LookupCount())) + stats.overwriteCount.Set(uint64(stats.cache.OverwriteCount())) +} + func NewLocalCacheStats(localCache *freecache.Cache, scope stats.Scope) stats.StatGenerator { return localCacheStats{ cache: localCache, @@ -30,14 +41,3 @@ func NewLocalCacheStats(localCache *freecache.Cache, scope stats.Scope) stats.St overwriteCount: scope.NewGauge("overwriteCount"), } } - -func (stats localCacheStats) GenerateStats() { - stats.evacuateCount.Set(uint64(stats.cache.EvacuateCount())) - stats.expiredCount.Set(uint64(stats.cache.ExpiredCount())) - stats.entryCount.Set(uint64(stats.cache.EntryCount())) - stats.averageAccessTime.Set(uint64(stats.cache.AverageAccessTime())) - stats.hitCount.Set(uint64(stats.cache.HitCount())) - stats.missCount.Set(uint64(stats.cache.MissCount())) - stats.lookupCount.Set(uint64(stats.cache.LookupCount())) - stats.overwriteCount.Set(uint64(stats.cache.OverwriteCount())) -} diff --git a/src/utils/utilities.go b/src/utils/utilities.go index ba798e331..ec673aa34 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -64,7 +64,7 @@ func SecondsToNanoseconds(second int64) int64 { return second * secondToNanosecondRate } -func CalculateReset(currentLimit *pb.RateLimitResponse_RateLimit, timeSource TimeSource) *duration.Duration { +func CalculateFixedReset(currentLimit *pb.RateLimitResponse_RateLimit, timeSource TimeSource) *duration.Duration { sec := UnitToDivider(currentLimit.Unit) now := timeSource.UnixNow() return &duration.Duration{Seconds: sec - now%sec} diff --git a/test/limiter/base_limiter_test.go b/test/limiter/base_limiter_test.go deleted file mode 100644 index 0694ca009..000000000 --- a/test/limiter/base_limiter_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package limiter - -import ( - "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/limiter" - "github.com/envoyproxy/ratelimit/test/common" - mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" - "github.com/golang/mock/gomock" - stats "github.com/lyft/gostats" - "github.com/stretchr/testify/assert" - "math/rand" - "testing" -) - -func TestGenerateCacheKeys(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() - timeSource := mock_utils.NewMockTimeSource(controller) - jitterSource := mock_utils.NewMockJitterRandSource(controller) - statsStore := stats.NewStore(stats.NewNullSink(), false) - timeSource.EXPECT().UnixNow().Return(int64(1234)) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, rand.New(jitterSource), 3600, nil, 0.8) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - assert.Equal(uint64(0), limits[0].Stats.TotalHits.Value()) - cacheKeys := baseRateLimit.GenerateCacheKeys(request, limits, 1) - assert.Equal(1, len(cacheKeys)) - assert.Equal("domain_key_value_1234", cacheKeys[0].Key) - assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) -} - -func TestOverLimitWithLocalCache(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() - localCache := freecache.NewCache(100) - localCache.Set([]byte("key"), []byte("value"), 100) - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8) - // Returns true, as local cache contains over limit value for the key. - assert.Equal(true, baseRateLimit.IsOverLimitWithLocalCache("key")) -} - -func TestNoOverLimitWithLocalCache(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8) - // Returns false, as local cache is nil. - assert.Equal(false, baseRateLimit.IsOverLimitWithLocalCache("domain_key_value_1234")) - localCache := freecache.NewCache(100) - baseRateLimitWithLocalCache := limiter.NewBaseRateLimit(nil, nil, 3600, localCache, 0.8) - // Returns false, as local cache does not contain value for cache key. - assert.Equal(false, baseRateLimitWithLocalCache.IsOverLimitWithLocalCache("domain_key_value_1234")) -} - -func TestGetResponseStatusEmptyKey(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() - baseRateLimit := limiter.NewBaseRateLimit(nil, nil, 3600, nil, 0.8) - responseStatus := baseRateLimit.GetResponseDescriptorStatus("", nil, false, 1) - assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) - assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) -} - -func TestGetResponseStatusOverLimitWithLocalCache(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() - timeSource := mock_utils.NewMockTimeSource(controller) - timeSource.EXPECT().UnixNow().Return(int64(1234)) - statsStore := stats.NewStore(stats.NewNullSink(), false) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 4, 5) - // As `isOverLimitWithLocalCache` is passed as `true`, immediate response is returned with no checks of the limits. - responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, true, 2) - assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) - assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) - assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) - assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) - assert.Equal(uint64(2), limits[0].Stats.OverLimitWithLocalCache.Value()) -} - -func TestGetResponseStatusOverLimit(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() - timeSource := mock_utils.NewMockTimeSource(controller) - timeSource.EXPECT().UnixNow().Return(int64(1234)) - statsStore := stats.NewStore(stats.NewNullSink(), false) - localCache := freecache.NewCache(100) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, localCache, 0.8) - limits := []*config.RateLimit{config.NewRateLimit(5, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 7, 4, 5) - responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) - assert.Equal(pb.RateLimitResponse_OVER_LIMIT, responseStatus.GetCode()) - assert.Equal(uint32(0), responseStatus.GetLimitRemaining()) - assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) - result, _ := localCache.Get([]byte("key")) - // Local cache should have been populated with over the limit key. - assert.Equal("", string(result)) - assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) - assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) -} - -func TestGetResponseStatusBelowLimit(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() - timeSource := mock_utils.NewMockTimeSource(controller) - timeSource.EXPECT().UnixNow().Return(int64(1234)) - statsStore := stats.NewStore(stats.NewNullSink(), false) - baseRateLimit := limiter.NewBaseRateLimit(timeSource, nil, 3600, nil, 0.8) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - limitInfo := limiter.NewRateLimitInfo(limits[0], 2, 6, 9, 10) - responseStatus := baseRateLimit.GetResponseDescriptorStatus("key", limitInfo, false, 1) - assert.Equal(pb.RateLimitResponse_OK, responseStatus.GetCode()) - assert.Equal(uint32(4), responseStatus.GetLimitRemaining()) - assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - assert.Equal(limits[0].Limit, responseStatus.GetCurrentLimit()) -} diff --git a/test/mocks/algorithm/ratelimit_algorithm.go b/test/mocks/algorithm/ratelimit_algorithm.go index 91ddc7182..58e2233c9 100644 --- a/test/mocks/algorithm/ratelimit_algorithm.go +++ b/test/mocks/algorithm/ratelimit_algorithm.go @@ -63,3 +63,39 @@ func (mr *MockRatelimitAlgorithmMockRecorder) AppendPipeline(client, pipeline, k mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendPipeline", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).AppendPipeline), client, pipeline, key, hitsAddend, result, expirationSeconds) } + +// PopulateStats mocks base method +func (m *MockRatelimitAlgorithm) PopulateStats() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PopulateStats") +} + +// PopulateStats indicates an expected call of PopulateStats +func (mr *MockRatelimitAlgorithmMockRecorder) PopulateStats() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PopulateStats", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).PopulateStats)) +} + +// CalculateResetDuration mocks base method +func (m *MockRatelimitAlgorithm) CalculateResetDuration() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "CalculateResetDuration") +} + +// CalculateResetDuration indicates an expected call of CalculateResetDuration +func (mr *MockRatelimitAlgorithmMockRecorder) CalculateResetDuration() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateResetDuration", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).CalculateResetDuration)) +} + +// IsUnderLimit mocks base method +func (m *MockRatelimitAlgorithm) IsUnderLimit() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "IsUnderLimit") +} + +// IsUnderLimit indicates an expected call of IsUnderLimit +func (mr *MockRatelimitAlgorithmMockRecorder) IsUnderLimit() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnderLimit", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).IsUnderLimit)) +} From 244e8011adf462279a9721a33a4ad08886efd618 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Wed, 20 Jan 2021 09:26:48 +0100 Subject: [PATCH 20/31] fix broken rolling window limiter with localcache Signed-off-by: zufardhiyaulhaq --- src/algorithm/fixed_window.go | 34 ++++++++--- src/algorithm/ratelimit_algorithm.go | 3 +- src/algorithm/rolling_window.go | 88 ++++++++++++++++++++-------- src/limiter/rate_limiter.go | 0 src/memcached/fixed_cache_impl.go | 24 +++++++- src/memcached/windowed_cache_impl.go | 23 +++++++- src/redis/fixed_cache_impl.go | 25 +++++++- src/redis/windowed_cache_impl.go | 23 +++++++- src/utils/utilities.go | 7 +++ 9 files changed, 189 insertions(+), 38 deletions(-) delete mode 100644 src/limiter/rate_limiter.go diff --git a/src/algorithm/fixed_window.go b/src/algorithm/fixed_window.go index e0c4c6979..f010d8d10 100644 --- a/src/algorithm/fixed_window.go +++ b/src/algorithm/fixed_window.go @@ -1,6 +1,7 @@ package algorithm import ( + "encoding/json" "math" "github.com/coocood/freecache" @@ -8,7 +9,6 @@ import ( "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" logger "github.com/sirupsen/logrus" - "google.golang.org/protobuf/types/known/durationpb" ) type FixedWindowImpl struct { @@ -19,6 +19,14 @@ type FixedWindowImpl struct { } func (fw *FixedWindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus { + + logger.Debugf("[fixed] key: %s", key) + logger.Debugf("[fixed] results: %d", results) + logger.Debugf("[fixed] hitsAddend: %d", hitsAddend) + logger.Debugf("[fixed] isOverLimitWithLocalCache: %t", isOverLimitWithLocalCache) + limitJSON, _ := json.Marshal(limit) + logger.Debugf("[fixed] key: %s", limitJSON) + if key == "" { return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -37,16 +45,23 @@ func (fw *FixedWindowImpl) GetResponseDescriptorStatus(key string, limit *config } isOverLimit, limitRemaining, durationUntilReset := fw.IsOverLimit(limit, int64(results), hitsAddend) + + logger.Debugf("[fixed] limitRemaining: %d", limitRemaining) + logger.Debugf("[fixed] isOverLimit: %t", isOverLimit) + logger.Debugf("[fixed] durationUntilReset: %d", durationUntilReset) + if !isOverLimit { return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, CurrentLimit: limit.Limit, LimitRemaining: uint32(limitRemaining), - DurationUntilReset: durationUntilReset, + DurationUntilReset: utils.CalculateFixedReset(limit.Limit, fw.timeSource), } } else { if fw.localCache != nil { - err := fw.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limit.Limit.Unit))) + durationUntilReset = utils.MaxInt(1, durationUntilReset) + logger.Debugf("[fixed] duration until reset in local cache: %d", durationUntilReset) + err := fw.localCache.Set([]byte(key), []byte{}, durationUntilReset) if err != nil { logger.Errorf("Failing to set local cache key: %s", key) } @@ -56,17 +71,22 @@ func (fw *FixedWindowImpl) GetResponseDescriptorStatus(key string, limit *config Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limit.Limit, LimitRemaining: uint32(limitRemaining), - DurationUntilReset: durationUntilReset, + DurationUntilReset: utils.CalculateFixedReset(limit.Limit, fw.timeSource), } } } -func (fw *FixedWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, *durationpb.Duration) { +func (fw *FixedWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, int) { limitAfterIncrease := results limitBeforeIncrease := limitAfterIncrease - int64(hitsAddend) overLimitThreshold := int64(limit.Limit.RequestsPerUnit) nearLimitThreshold := int64(math.Floor(float64(float32(overLimitThreshold) * fw.nearLimitRatio))) + logger.Debugf("[fixed] limitAfterIncrease: %d", limitAfterIncrease) + logger.Debugf("[fixed] limitBeforeIncrease: %d", limitBeforeIncrease) + logger.Debugf("[fixed] overLimitThreshold: %d", overLimitThreshold) + logger.Debugf("[fixed] nearLimitThreshold: %d", nearLimitThreshold) + if limitAfterIncrease > overLimitThreshold { if limitBeforeIncrease >= overLimitThreshold { fw.PopulateStats(limit, 0, uint64(hitsAddend), 0) @@ -74,7 +94,7 @@ func (fw *FixedWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, h fw.PopulateStats(limit, uint64(overLimitThreshold-utils.MaxInt64(nearLimitThreshold, limitBeforeIncrease)), uint64(limitAfterIncrease-overLimitThreshold), 0) } - return true, 0, utils.CalculateFixedReset(limit.Limit, fw.timeSource) + return true, 0, int(utils.UnitToDivider(limit.Limit.Unit)) } else { if limitAfterIncrease > nearLimitThreshold { if limitBeforeIncrease >= nearLimitThreshold { @@ -84,7 +104,7 @@ func (fw *FixedWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, h } } - return false, overLimitThreshold - limitAfterIncrease, utils.CalculateFixedReset(limit.Limit, fw.timeSource) + return false, overLimitThreshold - limitAfterIncrease, int(utils.UnitToDivider(limit.Limit.Unit)) } } diff --git a/src/algorithm/ratelimit_algorithm.go b/src/algorithm/ratelimit_algorithm.go index d79820433..9223c51b8 100644 --- a/src/algorithm/ratelimit_algorithm.go +++ b/src/algorithm/ratelimit_algorithm.go @@ -4,11 +4,10 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" - "google.golang.org/protobuf/types/known/durationpb" ) type RatelimitAlgorithm interface { - IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, *durationpb.Duration) + IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, int) IsOverLimitWithLocalCache(key string) bool GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus diff --git a/src/algorithm/rolling_window.go b/src/algorithm/rolling_window.go index cf60bc657..a38280680 100644 --- a/src/algorithm/rolling_window.go +++ b/src/algorithm/rolling_window.go @@ -1,14 +1,15 @@ package algorithm import ( + "encoding/json" "math" "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" + "github.com/golang/protobuf/ptypes/duration" logger "github.com/sirupsen/logrus" - "google.golang.org/protobuf/types/known/durationpb" ) const DummyCacheKeyTime = 0 @@ -19,10 +20,20 @@ type RollingWindowImpl struct { localCache *freecache.Cache nearLimitRatio float32 arrivedAt int64 + tat int64 newTat int64 + diff int64 } func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus { + + logger.Debugf("[rolling] key: %s", key) + logger.Debugf("[rolling] results: %d", results) + logger.Debugf("[rolling] hitsAddend: %d", hitsAddend) + logger.Debugf("[rolling] isOverLimitWithLocalCache: %t", isOverLimitWithLocalCache) + limitJSON, _ := json.Marshal(limit) + logger.Debugf("[rolling] limit: %s", limitJSON) + if key == "" { return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -30,27 +41,38 @@ func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *conf LimitRemaining: 0, } } + if isOverLimitWithLocalCache { rw.PopulateStats(limit, 0, uint64(hitsAddend), uint64(hitsAddend)) + + secondsToReset := utils.UnitToDivider(limit.Limit.Unit) + secondsToReset -= utils.NanosecondsToSeconds(rw.timeSource.UnixNanoNow()) % secondsToReset return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limit.Limit, LimitRemaining: 0, - DurationUntilReset: utils.CalculateFixedReset(limit.Limit, rw.timeSource), + DurationUntilReset: &duration.Duration{Seconds: secondsToReset}, } } isOverLimit, limitRemaining, durationUntilReset := rw.IsOverLimit(limit, int64(results), hitsAddend) + + logger.Debugf("[rolling] limitRemaining: %d", limitRemaining) + logger.Debugf("[rolling] isOverLimit: %t", isOverLimit) + logger.Debugf("[rolling] durationUntilReset: %d", durationUntilReset) + if !isOverLimit { return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, CurrentLimit: limit.Limit, LimitRemaining: uint32(limitRemaining), - DurationUntilReset: durationUntilReset, + DurationUntilReset: utils.NanosecondsToDuration(rw.newTat - rw.arrivedAt), } } else { if rw.localCache != nil { - err := rw.localCache.Set([]byte(key), []byte{}, int(utils.UnitToDivider(limit.Limit.Unit))) + durationUntilReset = utils.MaxInt(1, durationUntilReset) + logger.Debugf("[rolling] duration until reset in local cache: %d", durationUntilReset) + err := rw.localCache.Set([]byte(key), []byte{}, durationUntilReset) if err != nil { logger.Errorf("Failing to set local cache key: %s", key) } @@ -60,56 +82,65 @@ func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *conf Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limit.Limit, LimitRemaining: uint32(limitRemaining), - DurationUntilReset: durationUntilReset, + DurationUntilReset: utils.NanosecondsToDuration(int64(math.Ceil(float64(rw.tat - rw.arrivedAt)))), } } } -func (rw *RollingWindowImpl) GetNewTat() int64 { - return rw.newTat -} -func (rw *RollingWindowImpl) GetArrivedAt() int64 { - return rw.newTat -} - -func (rw *RollingWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, *durationpb.Duration) { +func (rw *RollingWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, int) { now := rw.timeSource.UnixNanoNow() // Time during computation should be in nanosecond rw.arrivedAt = now - tat := utils.MaxInt64(results, rw.arrivedAt) + // Tat is set to current request timestamp if not set before + rw.tat = utils.MaxInt64(results, rw.arrivedAt) totalLimit := int64(limit.Limit.RequestsPerUnit) period := utils.SecondsToNanoseconds(utils.UnitToDivider(limit.Limit.Unit)) quantity := int64(hitsAddend) + logger.Debugf("[rolling] rw.arrivedAt: %d", rw.arrivedAt) + logger.Debugf("[rolling] tat: %d", rw.tat) + logger.Debugf("[rolling] totalLimit: %d", totalLimit) + logger.Debugf("[rolling] period: %d", period) + logger.Debugf("[rolling] quantity: %d", quantity) + // GCRA computation // Emission interval is the cost of each request emissionInterval := period / totalLimit - // Tat is set to current request timestamp if not set before - // New tat define the end of the window - rw.newTat = tat + emissionInterval*quantity + rw.newTat = rw.tat + emissionInterval*quantity // We allow the request if it's inside the window allowAt := rw.newTat - period - diff := rw.arrivedAt - allowAt + rw.diff = rw.arrivedAt - allowAt + + logger.Debugf("[rolling] emissionInterval: %d", emissionInterval) + logger.Debugf("[rolling] rw.newTat: %d", rw.newTat) + logger.Debugf("[rolling] allowAt: %d", allowAt) + logger.Debugf("[rolling] diff: %d", rw.diff) - previousAllowAt := tat - period + previousAllowAt := rw.tat - period previousLimitRemaining := int64(math.Ceil(float64((rw.arrivedAt - previousAllowAt) / emissionInterval))) previousLimitRemaining = utils.MaxInt64(previousLimitRemaining, 0) nearLimitWindow := int64(math.Ceil(float64(float32(limit.Limit.RequestsPerUnit) * (1.0 - rw.nearLimitRatio)))) - limitRemaining := int64(math.Ceil(float64(diff / emissionInterval))) + limitRemaining := int64(math.Ceil(float64(rw.diff / emissionInterval))) hitNearLimit := quantity - (utils.MaxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) - if diff < 0 { + logger.Debugf("[rolling] previousAllowAt: %d", previousAllowAt) + logger.Debugf("[rolling] previousLimitRemaining: %d", previousLimitRemaining) + logger.Debugf("[rolling] nearLimitWindow: %d", nearLimitWindow) + logger.Debugf("[rolling] limitRemaining: %d", limitRemaining) + logger.Debugf("[rolling] hitNearLimit: %d", hitNearLimit) + + if rw.diff < 0 { rw.PopulateStats(limit, uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow)), uint64(quantity-previousLimitRemaining), 0) - return true, 0, utils.NanosecondsToDuration(int64(math.Ceil(float64(tat - rw.arrivedAt)))) + return true, 0, int(utils.NanosecondsToSeconds(-rw.diff)) } else { if hitNearLimit > 0 { rw.PopulateStats(limit, uint64(hitNearLimit), 0, 0) } - return false, limitRemaining, utils.NanosecondsToDuration(rw.newTat - rw.arrivedAt) + return false, limitRemaining, 0 } } @@ -123,6 +154,17 @@ func (rw *RollingWindowImpl) IsOverLimitWithLocalCache(key string) bool { return false } +func (rw *RollingWindowImpl) GetNewTat() int64 { + if rw.diff < 0 { + return rw.tat + } + return rw.newTat +} + +func (rw *RollingWindowImpl) GetArrivedAt() int64 { + return rw.arrivedAt +} + func (rw *RollingWindowImpl) GenerateCacheKeys(request *pb.RateLimitRequest, limits []*config.RateLimit, hitsAddend int64) []utils.CacheKey { return rw.cacheKeyGenerator.GenerateCacheKeys(request, limits, uint32(hitsAddend), DummyCacheKeyTime) diff --git a/src/limiter/rate_limiter.go b/src/limiter/rate_limiter.go deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/memcached/fixed_cache_impl.go b/src/memcached/fixed_cache_impl.go index 1e4c1e007..ae0b60ef8 100644 --- a/src/memcached/fixed_cache_impl.go +++ b/src/memcached/fixed_cache_impl.go @@ -2,6 +2,7 @@ package memcached import ( "context" + "encoding/json" "math/rand" "strconv" "sync" @@ -37,14 +38,23 @@ func (this *fixedRateLimitCacheImpl) DoLimit( request *pb.RateLimitRequest, limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { + limitsJSON, _ := json.Marshal(limits) + logger.Debugf("[memcached] limits: %s", limitsJSON) + requestJSON, _ := json.Marshal(request) + logger.Debugf("[memcached] request: %s", requestJSON) + logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.MinInt64(1, int64(request.HitsAddend)) + hitsAddend := utils.MaxInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) + logger.Debugf("[memcached] hitsAddend: %d", hitsAddend) + cacheKeysJSON, _ := json.Marshal(cacheKeys) + logger.Debugf("[memcached] cacheKeys: %s", cacheKeysJSON) + isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) keysToGet := make([]string, 0, len(request.Descriptors)) @@ -88,15 +98,25 @@ func (this *fixedRateLimitCacheImpl) DoLimit( } else { result = decoded } - } + cacheKeyJSON, _ := json.Marshal(cacheKey) + logger.Debugf("[memcached] cacheKey: %s", cacheKeyJSON) + limitiJSON, _ := json.Marshal(limits[i]) + logger.Debugf("[memcached] limits[i]: %s", limitiJSON) + logger.Debugf("[memcached] result: %d", result) + logger.Debugf("[memcached] isOverLimitWithLocalCache[i]: %t", isOverLimitWithLocalCache[i]) + logger.Debugf("[memcached] int64(hitsAddend): %t", int64(hitsAddend)) + responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], result, isOverLimitWithLocalCache[i], int64(hitsAddend)) } this.waitGroup.Add(1) go this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, uint64(hitsAddend)) + responseDescriptorStatusesJSON, _ := json.Marshal(responseDescriptorStatuses) + logger.Debugf("[memcached] responseDescriptorStatuses: %s", responseDescriptorStatusesJSON) + return responseDescriptorStatuses } diff --git a/src/memcached/windowed_cache_impl.go b/src/memcached/windowed_cache_impl.go index fa0fb919e..73cbccfc1 100644 --- a/src/memcached/windowed_cache_impl.go +++ b/src/memcached/windowed_cache_impl.go @@ -2,6 +2,7 @@ package memcached import ( "context" + "encoding/json" "math/rand" "strconv" "sync" @@ -37,14 +38,23 @@ func (this *windowedRateLimitCacheImpl) DoLimit( request *pb.RateLimitRequest, limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { + limitsJSON, _ := json.Marshal(limits) + logger.Debugf("[memcached] limits: %s", limitsJSON) + requestJSON, _ := json.Marshal(request) + logger.Debugf("[memcached] request: %s", requestJSON) + logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.MinInt64(1, int64(request.HitsAddend)) + hitsAddend := utils.MaxInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) + logger.Debugf("[memcached] hitsAddend: %d", hitsAddend) + cacheKeysJSON, _ := json.Marshal(cacheKeys) + logger.Debugf("[memcached] cacheKeys: %s", cacheKeysJSON) + isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) keysToGet := make([]string, 0, len(request.Descriptors)) @@ -92,6 +102,14 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } } + cacheKeyJSON, _ := json.Marshal(cacheKey) + logger.Debugf("[memcached] cacheKey: %s", cacheKeyJSON) + limitiJSON, _ := json.Marshal(limits[i]) + logger.Debugf("[memcached] limits[i]: %s", limitiJSON) + logger.Debugf("[memcached] tat: %d", tat) + logger.Debugf("[memcached] isOverLimitWithLocalCache[i]: %t", isOverLimitWithLocalCache[i]) + logger.Debugf("[memcached] int64(hitsAddend): %t", int64(hitsAddend)) + responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], tat, isOverLimitWithLocalCache[i], int64(hitsAddend)) if responseDescriptorStatuses[i].Code == pb.RateLimitResponse_OVER_LIMIT { @@ -103,6 +121,9 @@ func (this *windowedRateLimitCacheImpl) DoLimit( arrivedAt := this.algorithm.GetArrivedAt() newTats[i] = this.algorithm.GetNewTat() + logger.Debugf("[memcached] arrivedAt: %d", arrivedAt) + logger.Debugf("[memcached] newTat: %d", newTats[i]) + expirationSeconds[i] = utils.NanosecondsToSeconds(newTats[i]-arrivedAt) + 1 if this.expirationJitterMaxSeconds > 0 { expirationSeconds[i] += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index a016e7d45..fad6c4e6a 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + "encoding/json" "math/rand" "github.com/coocood/freecache" @@ -35,14 +36,23 @@ func (this *fixedRateLimitCacheImpl) DoLimit( request *pb.RateLimitRequest, limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { + limitsJSON, _ := json.Marshal(limits) + logger.Debugf("[redis] limits: %s", limitsJSON) + requestJSON, _ := json.Marshal(request) + logger.Debugf("[redis] request: %s", requestJSON) + logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.MinInt64(1, int64(request.HitsAddend)) + hitsAddend := utils.MaxInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) + logger.Debugf("[redis] hitsAddend: %d", hitsAddend) + cacheKeysJSON, _ := json.Marshal(cacheKeys) + logger.Debugf("[redis] cacheKeys: %s", cacheKeysJSON) + isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) results := make([]int64, len(request.Descriptors)) var pipeline, perSecondPipeline driver.Pipeline @@ -91,9 +101,20 @@ func (this *fixedRateLimitCacheImpl) DoLimit( len(request.Descriptors)) for i, cacheKey := range cacheKeys { - responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], int64(results[i]), isOverLimitWithLocalCache[i], int64(hitsAddend)) + cacheKeyJSON, _ := json.Marshal(cacheKey) + logger.Debugf("[redis] cacheKey: %s", cacheKeyJSON) + limitiJSON, _ := json.Marshal(limits[i]) + logger.Debugf("[redis] limits[i]: %s", limitiJSON) + logger.Debugf("[redis] results[i]: %d", results[i]) + logger.Debugf("[redis] isOverLimitWithLocalCache[i]: %t", isOverLimitWithLocalCache[i]) + logger.Debugf("[redis] int64(hitsAddend): %t", int64(hitsAddend)) + + responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], results[i], isOverLimitWithLocalCache[i], int64(hitsAddend)) } + responseDescriptorStatusesJSON, _ := json.Marshal(responseDescriptorStatuses) + logger.Debugf("[redis] responseDescriptorStatuses: %s", responseDescriptorStatusesJSON) + return responseDescriptorStatuses } diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index 77206092a..00ff5cb19 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -1,6 +1,7 @@ package redis import ( + "encoding/json" "math/rand" "github.com/coocood/freecache" @@ -44,14 +45,23 @@ func (this *windowedRateLimitCacheImpl) DoLimit( request *pb.RateLimitRequest, limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { + limitsJSON, _ := json.Marshal(limits) + logger.Debugf("[redis] limits: %s", limitsJSON) + requestJSON, _ := json.Marshal(request) + logger.Debugf("[redis] request: %s", requestJSON) + logger.Debugf("starting windowed cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. - hitsAddend := utils.MinInt64(1, int64(request.HitsAddend)) + hitsAddend := utils.MaxInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) + logger.Debugf("[redis] hitsAddend: %d", hitsAddend) + cacheKeysJSON, _ := json.Marshal(cacheKeys) + logger.Debugf("[redis] cacheKeys: %s", cacheKeysJSON) + isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) tats := make([]int64, len(request.Descriptors)) var pipeline, perSecondPipeline driver.Pipeline @@ -99,11 +109,22 @@ func (this *windowedRateLimitCacheImpl) DoLimit( responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) for i, cacheKey := range cacheKeys { + cacheKeyJSON, _ := json.Marshal(cacheKey) + logger.Debugf("[redis] cacheKey: %s", cacheKeyJSON) + limitiJSON, _ := json.Marshal(limits[i]) + logger.Debugf("[redis] limits[i]: %s", limitiJSON) + logger.Debugf("[redis] tats[i]: %d", tats[i]) + logger.Debugf("[redis] isOverLimitWithLocalCache[i]: %t", isOverLimitWithLocalCache[i]) + logger.Debugf("[redis] int64(hitsAddend): %t", int64(hitsAddend)) + responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], int64(tats[i]), isOverLimitWithLocalCache[i], int64(hitsAddend)) arrivedAt := this.algorithm.GetArrivedAt() newTat := this.algorithm.GetNewTat() + logger.Debugf("[redis] arrivedAt: %d", arrivedAt) + logger.Debugf("[redis] newTat: %d", newTat) + // Store new tat for initial tat of next requests expirationSeconds := utils.NanosecondsToSeconds(newTat-arrivedAt) + 1 if this.expirationJitterMaxSeconds > 0 { diff --git a/src/utils/utilities.go b/src/utils/utilities.go index ec673aa34..edbcc9645 100644 --- a/src/utils/utilities.go +++ b/src/utils/utilities.go @@ -27,6 +27,13 @@ func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { panic("should not get here") } +func MaxInt(a int, b int) int { + if a > b { + return a + } + return b +} + func MaxInt64(a int64, b int64) int64 { if a > b { return a From 510abf51d2a61cc2341cf804bcd8946f8dbb7278 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Thu, 21 Jan 2021 09:32:48 +0100 Subject: [PATCH 21/31] refactor redis & memcached unit tests Signed-off-by: zufardhiyaulhaq --- Makefile | 5 +- src/algorithm/fixed_window.go | 20 +- src/algorithm/rolling_window.go | 34 +- src/memcached/cache_impl.go | 6 - src/memcached/fixed_cache_impl.go | 32 +- src/memcached/windowed_cache_impl.go | 21 -- src/redis/cache_impl.go | 17 +- src/redis/fixed_cache_impl.go | 29 +- src/redis/windowed_cache_impl.go | 33 +- test/memcached/fixed_cache_impl_test.go | 120 ++++--- test/mocks/algorithm/ratelimit_algorithm.go | 103 ++++-- test/redis/bench_test.go | 7 +- test/redis/fixed_cache_impl_test.go | 311 ++++++---------- test/redis/windowed_cache_impl_test.go | 370 +++++++++++--------- 14 files changed, 477 insertions(+), 631 deletions(-) diff --git a/Makefile b/Makefile index 4f30d659b..168aa1a65 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ export GO111MODULE=on PROJECT = ratelimit -REGISTRY ?= zufardhiyaulhaq +REGISTRY ?= envoyproxy IMAGE := $(REGISTRY)/$(PROJECT) INTEGRATION_IMAGE := $(REGISTRY)/$(PROJECT)_integration MODULE = github.com/envoyproxy/ratelimit @@ -112,8 +112,7 @@ docker_tests: docker run $$(tty -s && echo "-it" || echo) $(INTEGRATION_IMAGE):$(VERSION) .PHONY: docker_image -# docker_image: docker_tests -docker_image: +docker_image: docker_tests docker build . -t $(IMAGE):$(VERSION) .PHONY: docker_push diff --git a/src/algorithm/fixed_window.go b/src/algorithm/fixed_window.go index f010d8d10..50512d839 100644 --- a/src/algorithm/fixed_window.go +++ b/src/algorithm/fixed_window.go @@ -1,7 +1,6 @@ package algorithm import ( - "encoding/json" "math" "github.com/coocood/freecache" @@ -19,14 +18,6 @@ type FixedWindowImpl struct { } func (fw *FixedWindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus { - - logger.Debugf("[fixed] key: %s", key) - logger.Debugf("[fixed] results: %d", results) - logger.Debugf("[fixed] hitsAddend: %d", hitsAddend) - logger.Debugf("[fixed] isOverLimitWithLocalCache: %t", isOverLimitWithLocalCache) - limitJSON, _ := json.Marshal(limit) - logger.Debugf("[fixed] key: %s", limitJSON) - if key == "" { return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -46,10 +37,6 @@ func (fw *FixedWindowImpl) GetResponseDescriptorStatus(key string, limit *config isOverLimit, limitRemaining, durationUntilReset := fw.IsOverLimit(limit, int64(results), hitsAddend) - logger.Debugf("[fixed] limitRemaining: %d", limitRemaining) - logger.Debugf("[fixed] isOverLimit: %t", isOverLimit) - logger.Debugf("[fixed] durationUntilReset: %d", durationUntilReset) - if !isOverLimit { return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -60,7 +47,7 @@ func (fw *FixedWindowImpl) GetResponseDescriptorStatus(key string, limit *config } else { if fw.localCache != nil { durationUntilReset = utils.MaxInt(1, durationUntilReset) - logger.Debugf("[fixed] duration until reset in local cache: %d", durationUntilReset) + err := fw.localCache.Set([]byte(key), []byte{}, durationUntilReset) if err != nil { logger.Errorf("Failing to set local cache key: %s", key) @@ -82,11 +69,6 @@ func (fw *FixedWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, h overLimitThreshold := int64(limit.Limit.RequestsPerUnit) nearLimitThreshold := int64(math.Floor(float64(float32(overLimitThreshold) * fw.nearLimitRatio))) - logger.Debugf("[fixed] limitAfterIncrease: %d", limitAfterIncrease) - logger.Debugf("[fixed] limitBeforeIncrease: %d", limitBeforeIncrease) - logger.Debugf("[fixed] overLimitThreshold: %d", overLimitThreshold) - logger.Debugf("[fixed] nearLimitThreshold: %d", nearLimitThreshold) - if limitAfterIncrease > overLimitThreshold { if limitBeforeIncrease >= overLimitThreshold { fw.PopulateStats(limit, 0, uint64(hitsAddend), 0) diff --git a/src/algorithm/rolling_window.go b/src/algorithm/rolling_window.go index a38280680..c7de26133 100644 --- a/src/algorithm/rolling_window.go +++ b/src/algorithm/rolling_window.go @@ -1,7 +1,6 @@ package algorithm import ( - "encoding/json" "math" "github.com/coocood/freecache" @@ -26,14 +25,6 @@ type RollingWindowImpl struct { } func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus { - - logger.Debugf("[rolling] key: %s", key) - logger.Debugf("[rolling] results: %d", results) - logger.Debugf("[rolling] hitsAddend: %d", hitsAddend) - logger.Debugf("[rolling] isOverLimitWithLocalCache: %t", isOverLimitWithLocalCache) - limitJSON, _ := json.Marshal(limit) - logger.Debugf("[rolling] limit: %s", limitJSON) - if key == "" { return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -57,10 +48,6 @@ func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *conf isOverLimit, limitRemaining, durationUntilReset := rw.IsOverLimit(limit, int64(results), hitsAddend) - logger.Debugf("[rolling] limitRemaining: %d", limitRemaining) - logger.Debugf("[rolling] isOverLimit: %t", isOverLimit) - logger.Debugf("[rolling] durationUntilReset: %d", durationUntilReset) - if !isOverLimit { return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -71,7 +58,7 @@ func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *conf } else { if rw.localCache != nil { durationUntilReset = utils.MaxInt(1, durationUntilReset) - logger.Debugf("[rolling] duration until reset in local cache: %d", durationUntilReset) + err := rw.localCache.Set([]byte(key), []byte{}, durationUntilReset) if err != nil { logger.Errorf("Failing to set local cache key: %s", key) @@ -81,7 +68,7 @@ func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *conf return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limit.Limit, - LimitRemaining: uint32(limitRemaining), + LimitRemaining: 0, DurationUntilReset: utils.NanosecondsToDuration(int64(math.Ceil(float64(rw.tat - rw.arrivedAt)))), } } @@ -98,12 +85,6 @@ func (rw *RollingWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, period := utils.SecondsToNanoseconds(utils.UnitToDivider(limit.Limit.Unit)) quantity := int64(hitsAddend) - logger.Debugf("[rolling] rw.arrivedAt: %d", rw.arrivedAt) - logger.Debugf("[rolling] tat: %d", rw.tat) - logger.Debugf("[rolling] totalLimit: %d", totalLimit) - logger.Debugf("[rolling] period: %d", period) - logger.Debugf("[rolling] quantity: %d", quantity) - // GCRA computation // Emission interval is the cost of each request emissionInterval := period / totalLimit @@ -113,11 +94,6 @@ func (rw *RollingWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, allowAt := rw.newTat - period rw.diff = rw.arrivedAt - allowAt - logger.Debugf("[rolling] emissionInterval: %d", emissionInterval) - logger.Debugf("[rolling] rw.newTat: %d", rw.newTat) - logger.Debugf("[rolling] allowAt: %d", allowAt) - logger.Debugf("[rolling] diff: %d", rw.diff) - previousAllowAt := rw.tat - period previousLimitRemaining := int64(math.Ceil(float64((rw.arrivedAt - previousAllowAt) / emissionInterval))) previousLimitRemaining = utils.MaxInt64(previousLimitRemaining, 0) @@ -125,12 +101,6 @@ func (rw *RollingWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, limitRemaining := int64(math.Ceil(float64(rw.diff / emissionInterval))) hitNearLimit := quantity - (utils.MaxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) - logger.Debugf("[rolling] previousAllowAt: %d", previousAllowAt) - logger.Debugf("[rolling] previousLimitRemaining: %d", previousLimitRemaining) - logger.Debugf("[rolling] nearLimitWindow: %d", nearLimitWindow) - logger.Debugf("[rolling] limitRemaining: %d", limitRemaining) - logger.Debugf("[rolling] hitNearLimit: %d", hitNearLimit) - if rw.diff < 0 { rw.PopulateStats(limit, uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow)), uint64(quantity-previousLimitRemaining), 0) diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index bef5e71dc..4f50ffc87 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -31,11 +31,6 @@ import ( func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.TimeSource, jitterRand *rand.Rand, localCache *freecache.Cache, scope stats.Scope) (limiter.RateLimitCache, error) { if s.RateLimitAlgorithm == settings.FixedRateLimit { - ratelimitAlgorithm := algorithm.NewFixedWindowAlgorithm( - timeSource, - localCache, - s.NearLimitRatio, - ) return NewFixedRateLimitCacheImpl( memcache.New(s.MemcacheHostPort), timeSource, @@ -44,7 +39,6 @@ func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.Tim localCache, scope, s.NearLimitRatio, - ratelimitAlgorithm, ), nil } if s.RateLimitAlgorithm == settings.WindowedRateLimit { diff --git a/src/memcached/fixed_cache_impl.go b/src/memcached/fixed_cache_impl.go index ae0b60ef8..dc6d92d3c 100644 --- a/src/memcached/fixed_cache_impl.go +++ b/src/memcached/fixed_cache_impl.go @@ -2,7 +2,6 @@ package memcached import ( "context" - "encoding/json" "math/rand" "strconv" "sync" @@ -38,11 +37,6 @@ func (this *fixedRateLimitCacheImpl) DoLimit( request *pb.RateLimitRequest, limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { - limitsJSON, _ := json.Marshal(limits) - logger.Debugf("[memcached] limits: %s", limitsJSON) - requestJSON, _ := json.Marshal(request) - logger.Debugf("[memcached] request: %s", requestJSON) - logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. @@ -51,10 +45,6 @@ func (this *fixedRateLimitCacheImpl) DoLimit( // First build a list of all cache keys that we are actually going to hit. cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) - logger.Debugf("[memcached] hitsAddend: %d", hitsAddend) - cacheKeysJSON, _ := json.Marshal(cacheKeys) - logger.Debugf("[memcached] cacheKeys: %s", cacheKeysJSON) - isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) keysToGet := make([]string, 0, len(request.Descriptors)) @@ -100,23 +90,13 @@ func (this *fixedRateLimitCacheImpl) DoLimit( } } - cacheKeyJSON, _ := json.Marshal(cacheKey) - logger.Debugf("[memcached] cacheKey: %s", cacheKeyJSON) - limitiJSON, _ := json.Marshal(limits[i]) - logger.Debugf("[memcached] limits[i]: %s", limitiJSON) - logger.Debugf("[memcached] result: %d", result) - logger.Debugf("[memcached] isOverLimitWithLocalCache[i]: %t", isOverLimitWithLocalCache[i]) - logger.Debugf("[memcached] int64(hitsAddend): %t", int64(hitsAddend)) - - responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], result, isOverLimitWithLocalCache[i], int64(hitsAddend)) + resultAfterIncrease := result + hitsAddend + responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], resultAfterIncrease, isOverLimitWithLocalCache[i], int64(hitsAddend)) } this.waitGroup.Add(1) go this.increaseAsync(cacheKeys, isOverLimitWithLocalCache, limits, uint64(hitsAddend)) - responseDescriptorStatusesJSON, _ := json.Marshal(responseDescriptorStatuses) - logger.Debugf("[memcached] responseDescriptorStatuses: %s", responseDescriptorStatusesJSON) - return responseDescriptorStatuses } @@ -166,7 +146,7 @@ func (this *fixedRateLimitCacheImpl) Flush() { } func NewFixedRateLimitCacheImpl(client driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { + expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, timeSource: timeSource, @@ -175,6 +155,10 @@ func NewFixedRateLimitCacheImpl(client driver.Client, timeSource utils.TimeSourc expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - algorithm: algorithm, + algorithm: algorithm.NewFixedWindowAlgorithm( + timeSource, + localCache, + nearLimitRatio, + ), } } diff --git a/src/memcached/windowed_cache_impl.go b/src/memcached/windowed_cache_impl.go index 73cbccfc1..031b29c23 100644 --- a/src/memcached/windowed_cache_impl.go +++ b/src/memcached/windowed_cache_impl.go @@ -2,7 +2,6 @@ package memcached import ( "context" - "encoding/json" "math/rand" "strconv" "sync" @@ -38,11 +37,6 @@ func (this *windowedRateLimitCacheImpl) DoLimit( request *pb.RateLimitRequest, limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { - limitsJSON, _ := json.Marshal(limits) - logger.Debugf("[memcached] limits: %s", limitsJSON) - requestJSON, _ := json.Marshal(request) - logger.Debugf("[memcached] request: %s", requestJSON) - logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. @@ -51,10 +45,6 @@ func (this *windowedRateLimitCacheImpl) DoLimit( // First build a list of all cache keys that we are actually going to hit. cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) - logger.Debugf("[memcached] hitsAddend: %d", hitsAddend) - cacheKeysJSON, _ := json.Marshal(cacheKeys) - logger.Debugf("[memcached] cacheKeys: %s", cacheKeysJSON) - isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) keysToGet := make([]string, 0, len(request.Descriptors)) @@ -102,14 +92,6 @@ func (this *windowedRateLimitCacheImpl) DoLimit( } } - cacheKeyJSON, _ := json.Marshal(cacheKey) - logger.Debugf("[memcached] cacheKey: %s", cacheKeyJSON) - limitiJSON, _ := json.Marshal(limits[i]) - logger.Debugf("[memcached] limits[i]: %s", limitiJSON) - logger.Debugf("[memcached] tat: %d", tat) - logger.Debugf("[memcached] isOverLimitWithLocalCache[i]: %t", isOverLimitWithLocalCache[i]) - logger.Debugf("[memcached] int64(hitsAddend): %t", int64(hitsAddend)) - responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], tat, isOverLimitWithLocalCache[i], int64(hitsAddend)) if responseDescriptorStatuses[i].Code == pb.RateLimitResponse_OVER_LIMIT { @@ -121,9 +103,6 @@ func (this *windowedRateLimitCacheImpl) DoLimit( arrivedAt := this.algorithm.GetArrivedAt() newTats[i] = this.algorithm.GetNewTat() - logger.Debugf("[memcached] arrivedAt: %d", arrivedAt) - logger.Debugf("[memcached] newTat: %d", newTats[i]) - expirationSeconds[i] = utils.NanosecondsToSeconds(newTats[i]-arrivedAt) + 1 if this.expirationJitterMaxSeconds > 0 { expirationSeconds[i] += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index a9df77ae6..74e1a25b8 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -6,7 +6,6 @@ import ( "github.com/coocood/freecache" - "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/server" @@ -25,11 +24,6 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca s.RedisPipelineWindow, s.RedisPipelineLimit) if s.RateLimitAlgorithm == settings.FixedRateLimit { - ratelimitAlgorithm := algorithm.NewFixedWindowAlgorithm( - timeSource, - localCache, - s.NearLimitRatio, - ) return NewFixedRateLimitCacheImpl( otherPool, perSecondPool, @@ -37,15 +31,9 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca jitterRand, expirationJitterMaxSeconds, localCache, - s.NearLimitRatio, - ratelimitAlgorithm), nil + s.NearLimitRatio), nil } if s.RateLimitAlgorithm == settings.WindowedRateLimit { - ratelimitAlgorithm := algorithm.NewRollingWindowAlgorithm( - timeSource, - localCache, - s.NearLimitRatio, - ) return NewWindowedRateLimitCacheImpl( otherPool, perSecondPool, @@ -53,8 +41,7 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca jitterRand, expirationJitterMaxSeconds, localCache, - s.NearLimitRatio, - ratelimitAlgorithm), nil + s.NearLimitRatio), nil } return nil, fmt.Errorf("Unknown rate limit algorithm. %s\n", s.RateLimitAlgorithm) } diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index fad6c4e6a..826f0dbc8 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -1,7 +1,6 @@ package redis import ( - "encoding/json" "math/rand" "github.com/coocood/freecache" @@ -36,11 +35,6 @@ func (this *fixedRateLimitCacheImpl) DoLimit( request *pb.RateLimitRequest, limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { - limitsJSON, _ := json.Marshal(limits) - logger.Debugf("[redis] limits: %s", limitsJSON) - requestJSON, _ := json.Marshal(request) - logger.Debugf("[redis] request: %s", requestJSON) - logger.Debugf("starting cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. @@ -49,10 +43,6 @@ func (this *fixedRateLimitCacheImpl) DoLimit( // First build a list of all cache keys that we are actually going to hit. cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) - logger.Debugf("[redis] hitsAddend: %d", hitsAddend) - cacheKeysJSON, _ := json.Marshal(cacheKeys) - logger.Debugf("[redis] cacheKeys: %s", cacheKeysJSON) - isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) results := make([]int64, len(request.Descriptors)) var pipeline, perSecondPipeline driver.Pipeline @@ -101,20 +91,9 @@ func (this *fixedRateLimitCacheImpl) DoLimit( len(request.Descriptors)) for i, cacheKey := range cacheKeys { - cacheKeyJSON, _ := json.Marshal(cacheKey) - logger.Debugf("[redis] cacheKey: %s", cacheKeyJSON) - limitiJSON, _ := json.Marshal(limits[i]) - logger.Debugf("[redis] limits[i]: %s", limitiJSON) - logger.Debugf("[redis] results[i]: %d", results[i]) - logger.Debugf("[redis] isOverLimitWithLocalCache[i]: %t", isOverLimitWithLocalCache[i]) - logger.Debugf("[redis] int64(hitsAddend): %t", int64(hitsAddend)) - responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], results[i], isOverLimitWithLocalCache[i], int64(hitsAddend)) } - responseDescriptorStatusesJSON, _ := json.Marshal(responseDescriptorStatuses) - logger.Debugf("[redis] responseDescriptorStatuses: %s", responseDescriptorStatusesJSON) - return responseDescriptorStatuses } @@ -125,7 +104,7 @@ func fixedPipelineAppend(client driver.Client, pipeline *driver.Pipeline, key st *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) } -func NewFixedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { +func NewFixedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, @@ -135,6 +114,10 @@ func NewFixedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Cli cacheKeyGenerator: utils.NewCacheKeyGenerator(), localCache: localCache, nearLimitRatio: nearLimitRatio, - algorithm: algorithm, + algorithm: algorithm.NewFixedWindowAlgorithm( + timeSource, + localCache, + nearLimitRatio, + ), } } diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index 00ff5cb19..00659f04e 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -1,7 +1,6 @@ package redis import ( - "encoding/json" "math/rand" "github.com/coocood/freecache" @@ -45,11 +44,6 @@ func (this *windowedRateLimitCacheImpl) DoLimit( request *pb.RateLimitRequest, limits []*config.RateLimit) []*pb.RateLimitResponse_DescriptorStatus { - limitsJSON, _ := json.Marshal(limits) - logger.Debugf("[redis] limits: %s", limitsJSON) - requestJSON, _ := json.Marshal(request) - logger.Debugf("[redis] request: %s", requestJSON) - logger.Debugf("starting windowed cache lookup") // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. @@ -58,10 +52,6 @@ func (this *windowedRateLimitCacheImpl) DoLimit( // First build a list of all cache keys that we are actually going to hit. cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) - logger.Debugf("[redis] hitsAddend: %d", hitsAddend) - cacheKeysJSON, _ := json.Marshal(cacheKeys) - logger.Debugf("[redis] cacheKeys: %s", cacheKeysJSON) - isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) tats := make([]int64, len(request.Descriptors)) var pipeline, perSecondPipeline driver.Pipeline @@ -109,22 +99,15 @@ func (this *windowedRateLimitCacheImpl) DoLimit( responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) for i, cacheKey := range cacheKeys { - cacheKeyJSON, _ := json.Marshal(cacheKey) - logger.Debugf("[redis] cacheKey: %s", cacheKeyJSON) - limitiJSON, _ := json.Marshal(limits[i]) - logger.Debugf("[redis] limits[i]: %s", limitiJSON) - logger.Debugf("[redis] tats[i]: %d", tats[i]) - logger.Debugf("[redis] isOverLimitWithLocalCache[i]: %t", isOverLimitWithLocalCache[i]) - logger.Debugf("[redis] int64(hitsAddend): %t", int64(hitsAddend)) - responseDescriptorStatuses[i] = this.algorithm.GetResponseDescriptorStatus(cacheKey.Key, limits[i], int64(tats[i]), isOverLimitWithLocalCache[i], int64(hitsAddend)) + if cacheKey.Key == "" || isOverLimitWithLocalCache[i] { + continue + } + arrivedAt := this.algorithm.GetArrivedAt() newTat := this.algorithm.GetNewTat() - logger.Debugf("[redis] arrivedAt: %d", arrivedAt) - logger.Debugf("[redis] newTat: %d", newTat) - // Store new tat for initial tat of next requests expirationSeconds := utils.NanosecondsToSeconds(newTat-arrivedAt) + 1 if this.expirationJitterMaxSeconds > 0 { @@ -166,7 +149,7 @@ func windowedSetNewTatPipelineAppend(client driver.Client, pipeline *driver.Pipe *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) } -func NewWindowedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, algorithm algorithm.RatelimitAlgorithm) limiter.RateLimitCache { +func NewWindowedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32) limiter.RateLimitCache { return &windowedRateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, @@ -176,6 +159,10 @@ func NewWindowedRateLimitCacheImpl(client driver.Client, perSecondClient driver. cacheKeyGenerator: utils.NewCacheKeyGenerator(), localCache: localCache, nearLimitRatio: nearLimitRatio, - algorithm: algorithm, + algorithm: algorithm.NewRollingWindowAlgorithm( + timeSource, + localCache, + nearLimitRatio, + ), } } diff --git a/test/memcached/fixed_cache_impl_test.go b/test/memcached/fixed_cache_impl_test.go index e5fbfe723..b3dfc06df 100644 --- a/test/memcached/fixed_cache_impl_test.go +++ b/test/memcached/fixed_cache_impl_test.go @@ -14,7 +14,6 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/src/utils" stats "github.com/lyft/gostats" @@ -36,17 +35,17 @@ func TestMemcached(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( - getMultiResult(map[string]int{"domain_key_value_1234": 4}), nil, + getMultiResult(map[string]int{"domain_key_value_1234": 0}), nil, ) - client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return(uint64(5), nil) - - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + client.EXPECT().Increment("domain_key_value_1234", uint64(1)) assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -69,7 +68,7 @@ func TestMemcached(t *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[1].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) @@ -99,8 +98,8 @@ func TestMemcached(t *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[1].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -132,7 +131,7 @@ func TestMemcachedGetError(t *testing.T) { limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -149,7 +148,7 @@ func TestMemcachedGetError(t *testing.T) { limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value1", statsStore)} assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -205,7 +204,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, localCache, statsStore, 0.8) - localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + localCacheStats := utils.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) @@ -221,7 +220,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -240,7 +239,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -259,7 +258,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -275,7 +274,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { client.EXPECT().Increment("domain_key4_value4_997200", uint64(1)).Times(0) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -298,6 +297,10 @@ func TestNearLimit(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} + // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key4_value4_997200"}).Return( @@ -305,14 +308,9 @@ func TestNearLimit(t *testing.T) { ) client.EXPECT().Increment("domain_key4_value4_997200", uint64(1)).Return(uint64(11), nil) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) - - limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} - assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -327,7 +325,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -343,7 +341,7 @@ func TestNearLimit(t *testing.T) { assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -351,102 +349,102 @@ func TestNearLimit(t *testing.T) { // Now test hitsAddend that is greater than 1 // All of it under limit, under near limit + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key5_value5_1234"}).Return( getMultiResult(map[string]int{"domain_key5_value5_1234": 2}), nil, ) client.EXPECT().Increment("domain_key5_value5_1234", uint64(3)).Return(uint64(5), nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} - assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // All of it under limit, some over near limit + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) + limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key6_value6_1234"}).Return( getMultiResult(map[string]int{"domain_key6_value6_1234": 5}), nil, ) client.EXPECT().Increment("domain_key6_value6_1234", uint64(2)).Return(uint64(7), nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) - limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} - assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) // All of it under limit, all of it over near limit + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key7_value7_1234"}).Return( getMultiResult(map[string]int{"domain_key7_value7_1234": 16}), nil, ) client.EXPECT().Increment("domain_key7_value7_1234", uint64(3)).Return(uint64(19), nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} - assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(3), limits[0].Stats.NearLimit.Value()) // Some of it over limit, all of it over near limit + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key8_value8_1234"}).Return( getMultiResult(map[string]int{"domain_key8_value8_1234": 19}), nil, ) client.EXPECT().Increment("domain_key8_value8_1234", uint64(3)).Return(uint64(22), nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} - assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) // Some of it in all three places + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) + limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key9_value9_1234"}).Return( getMultiResult(map[string]int{"domain_key9_value9_1234": 15}), nil, ) client.EXPECT().Increment("domain_key9_value9_1234", uint64(7)).Return(uint64(22), nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) - limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} - assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(7), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(4), limits[0].Stats.NearLimit.Value()) // all of it over limit + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key10_value10_1234"}).Return( getMultiResult(map[string]int{"domain_key10_value10_1234": 27}), nil, ) client.EXPECT().Increment("domain_key10_value10_1234", uint64(3)).Return(uint64(30), nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} - assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(3), limits[0].Stats.OverLimit.Value()) @@ -466,6 +464,9 @@ func TestMemcacheWithJitter(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, statsStore, 0.8) + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) jitterSource.EXPECT().Int63().Return(int64(100)) @@ -484,11 +485,8 @@ func TestMemcacheWithJitter(t *testing.T) { }, ).Return(nil) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -507,6 +505,9 @@ func TestMemcacheAdd(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8) + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + // Test a race condition with the initial add timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) @@ -525,17 +526,17 @@ func TestMemcacheAdd(t *testing.T) { client.EXPECT().Increment("domain_key_value_1234", uint64(1)).Return( uint64(2), nil) - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // A rate limit with 1-minute window + request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) + limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2", statsStore)} + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key2_value2_1200"}).Return(nil, nil) client.EXPECT().Increment("domain_key2_value2_1200", uint64(1)).Return( @@ -548,11 +549,8 @@ func TestMemcacheAdd(t *testing.T) { }, ).Return(nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key2", "value2"}}}, 1) - limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2", statsStore)} - assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) diff --git a/test/mocks/algorithm/ratelimit_algorithm.go b/test/mocks/algorithm/ratelimit_algorithm.go index 58e2233c9..c2d5dd035 100644 --- a/test/mocks/algorithm/ratelimit_algorithm.go +++ b/test/mocks/algorithm/ratelimit_algorithm.go @@ -5,10 +5,9 @@ package mock_algorithm import ( - envoy_extensions_common_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" config "github.com/envoyproxy/ratelimit/src/config" - limiter "github.com/envoyproxy/ratelimit/src/limiter" - driver "github.com/envoyproxy/ratelimit/src/redis/driver" + utils "github.com/envoyproxy/ratelimit/src/utils" gomock "github.com/golang/mock/gomock" reflect "reflect" ) @@ -36,66 +35,100 @@ func (m *MockRatelimitAlgorithm) EXPECT() *MockRatelimitAlgorithmMockRecorder { return m.recorder } -// GenerateCacheKey mocks base method -func (m *MockRatelimitAlgorithm) GenerateCacheKey(domain string, descriptor *envoy_extensions_common_ratelimit_v3.RateLimitDescriptor, limit *config.RateLimit) limiter.CacheKey { +// IsOverLimit mocks base method +func (m *MockRatelimitAlgorithm) IsOverLimit(limit *config.RateLimit, results, hitsAddend int64) (bool, int64, int) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GenerateCacheKey", domain, descriptor, limit) - ret0, _ := ret[0].(limiter.CacheKey) + ret := m.ctrl.Call(m, "IsOverLimit", limit, results, hitsAddend) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(int64) + ret2, _ := ret[2].(int) + return ret0, ret1, ret2 +} + +// IsOverLimit indicates an expected call of IsOverLimit +func (mr *MockRatelimitAlgorithmMockRecorder) IsOverLimit(limit, results, hitsAddend interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsOverLimit", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).IsOverLimit), limit, results, hitsAddend) +} + +// IsOverLimitWithLocalCache mocks base method +func (m *MockRatelimitAlgorithm) IsOverLimitWithLocalCache(key string) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsOverLimitWithLocalCache", key) + ret0, _ := ret[0].(bool) return ret0 } -// GenerateCacheKey indicates an expected call of GenerateCacheKey -func (mr *MockRatelimitAlgorithmMockRecorder) GenerateCacheKey(domain, descriptor, limit interface{}) *gomock.Call { +// IsOverLimitWithLocalCache indicates an expected call of IsOverLimitWithLocalCache +func (mr *MockRatelimitAlgorithmMockRecorder) IsOverLimitWithLocalCache(key interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateCacheKey", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).GenerateCacheKey), domain, descriptor, limit) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsOverLimitWithLocalCache", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).IsOverLimitWithLocalCache), key) } -// AppendPipeline mocks base method -func (m *MockRatelimitAlgorithm) AppendPipeline(client driver.Client, pipeline driver.Pipeline, key string, hitsAddend uint32, result interface{}, expirationSeconds int64) driver.Pipeline { +// GetResponseDescriptorStatus mocks base method +func (m *MockRatelimitAlgorithm) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppendPipeline", client, pipeline, key, hitsAddend, result, expirationSeconds) - ret0, _ := ret[0].(driver.Pipeline) + ret := m.ctrl.Call(m, "GetResponseDescriptorStatus", key, limit, results, isOverLimitWithLocalCache, hitsAddend) + ret0, _ := ret[0].(*envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus) return ret0 } -// AppendPipeline indicates an expected call of AppendPipeline -func (mr *MockRatelimitAlgorithmMockRecorder) AppendPipeline(client, pipeline, key, hitsAddend, result, expirationSeconds interface{}) *gomock.Call { +// GetResponseDescriptorStatus indicates an expected call of GetResponseDescriptorStatus +func (mr *MockRatelimitAlgorithmMockRecorder) GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendPipeline", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).AppendPipeline), client, pipeline, key, hitsAddend, result, expirationSeconds) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResponseDescriptorStatus", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).GetResponseDescriptorStatus), key, limit, results, isOverLimitWithLocalCache, hitsAddend) } -// PopulateStats mocks base method -func (m *MockRatelimitAlgorithm) PopulateStats() { +// GetNewTat mocks base method +func (m *MockRatelimitAlgorithm) GetNewTat() int64 { m.ctrl.T.Helper() - m.ctrl.Call(m, "PopulateStats") + ret := m.ctrl.Call(m, "GetNewTat") + ret0, _ := ret[0].(int64) + return ret0 } -// PopulateStats indicates an expected call of PopulateStats -func (mr *MockRatelimitAlgorithmMockRecorder) PopulateStats() *gomock.Call { +// GetNewTat indicates an expected call of GetNewTat +func (mr *MockRatelimitAlgorithmMockRecorder) GetNewTat() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PopulateStats", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).PopulateStats)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNewTat", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).GetNewTat)) } -// CalculateResetDuration mocks base method -func (m *MockRatelimitAlgorithm) CalculateResetDuration() { +// GetArrivedAt mocks base method +func (m *MockRatelimitAlgorithm) GetArrivedAt() int64 { m.ctrl.T.Helper() - m.ctrl.Call(m, "CalculateResetDuration") + ret := m.ctrl.Call(m, "GetArrivedAt") + ret0, _ := ret[0].(int64) + return ret0 } -// CalculateResetDuration indicates an expected call of CalculateResetDuration -func (mr *MockRatelimitAlgorithmMockRecorder) CalculateResetDuration() *gomock.Call { +// GetArrivedAt indicates an expected call of GetArrivedAt +func (mr *MockRatelimitAlgorithmMockRecorder) GetArrivedAt() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateResetDuration", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).CalculateResetDuration)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetArrivedAt", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).GetArrivedAt)) } -// IsUnderLimit mocks base method -func (m *MockRatelimitAlgorithm) IsUnderLimit() { +// GenerateCacheKeys mocks base method +func (m *MockRatelimitAlgorithm) GenerateCacheKeys(request *envoy_service_ratelimit_v3.RateLimitRequest, limits []*config.RateLimit, hitsAddend int64) []utils.CacheKey { m.ctrl.T.Helper() - m.ctrl.Call(m, "IsUnderLimit") + ret := m.ctrl.Call(m, "GenerateCacheKeys", request, limits, hitsAddend) + ret0, _ := ret[0].([]utils.CacheKey) + return ret0 } -// IsUnderLimit indicates an expected call of IsUnderLimit -func (mr *MockRatelimitAlgorithmMockRecorder) IsUnderLimit() *gomock.Call { +// GenerateCacheKeys indicates an expected call of GenerateCacheKeys +func (mr *MockRatelimitAlgorithmMockRecorder) GenerateCacheKeys(request, limits, hitsAddend interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateCacheKeys", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).GenerateCacheKeys), request, limits, hitsAddend) +} + +// PopulateStats mocks base method +func (m *MockRatelimitAlgorithm) PopulateStats(limit *config.RateLimit, nearLimit, overLimit, overLimitWithLocalCache uint64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "PopulateStats", limit, nearLimit, overLimit, overLimitWithLocalCache) +} + +// PopulateStats indicates an expected call of PopulateStats +func (mr *MockRatelimitAlgorithmMockRecorder) PopulateStats(limit, nearLimit, overLimit, overLimitWithLocalCache interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnderLimit", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).IsUnderLimit)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PopulateStats", reflect.TypeOf((*MockRatelimitAlgorithm)(nil).PopulateStats), limit, nearLimit, overLimit, overLimitWithLocalCache) } diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index ab5d1c686..ec7c1ebff 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -10,7 +10,6 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" stats "github.com/lyft/gostats" - "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" @@ -50,11 +49,9 @@ func BenchmarkParallelDoLimit(b *testing.B) { var cache limiter.RateLimitCache timeSource := utils.NewTimeSourceImpl() if rateLimitAlgorithm == settings.FixedRateLimit { - algorithmImpl := algorithm.NewFixedWindowAlgorithm(timeSource) - cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, algorithmImpl) + cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) } else if rateLimitAlgorithm == settings.WindowedRateLimit { - algorithmImpl := algorithm.NewRollingWindowAlgorithm() - cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8, algorithmImpl) + cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(utils.NewLockedSource(time.Now().Unix())), 10, nil, 0.8) } else { b.Fatalf("unknown rate limit type %s", rateLimitAlgorithm) } diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 3a85b0f2b..d70807b03 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -14,7 +14,6 @@ import ( redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/utils" "github.com/envoyproxy/ratelimit/test/common" - mock_algorithm "github.com/envoyproxy/ratelimit/test/mocks/algorithm" mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" mock_driver "github.com/envoyproxy/ratelimit/test/mocks/redis/driver" @@ -40,15 +39,6 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { client := mock_driver.NewMockClient(controller) perSecondClient := mock_driver.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) - ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) - var cache limiter.RateLimitCache - if usePerSecondRedis { - cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) - } else { - cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) - } - statsStore := stats.NewStore(stats.NewNullSink(), false) - domain := "domain" var clientUsed *mock_driver.MockClient if usePerSecondRedis { @@ -57,24 +47,28 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client } + var cache limiter.RateLimitCache + if usePerSecondRedis { + cache = redis.NewFixedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + } else { + cache = redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + } + + statsStore := stats.NewStore(stats.NewNullSink(), false) + domain := "domain" + // Test 1 request := common.NewRateLimitRequest(domain, [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) - clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key_value_1234", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_1234", uint32(1), gomock.Any(), int64(1)). - SetArg(4, uint32(5)). - Return(redis_driver.Pipeline{}) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", int64(1)).SetArg(1, int64(1)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -92,25 +86,15 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} clientUsed = client - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) - clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[1], limits[1]).Return(limiter.CacheKey{ - Key: "domain_key2_value2_subkey2_subvalue2_1200", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key2_value2_subkey2_subvalue2_1200", uint32(1), gomock.Any(), int64(60)). - SetArg(4, uint32(11)). - Return(redis_driver.Pipeline{}) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", int64(1)).SetArg(1, int64(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key2_value2_subkey2_subvalue2_1200", int64(60)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[1].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) @@ -128,37 +112,25 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} clientUsed = client - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(4) - clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(5) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key3_value3_997200", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[1], limits[1]).Return(limiter.CacheKey{ - Key: "domain_key3_value3_subkey3_subvalue3_950400", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key3_value3_997200", uint32(1), gomock.Any(), int64(3600)). - SetArg(4, uint32(11)). - Return(redis_driver.Pipeline{}) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key3_value3_subkey3_subvalue3_950400", uint32(1), gomock.Any(), int64(86400)). - SetArg(4, uint32(13)). - Return(redis_driver.Pipeline{}) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_997200", int64(1)).SetArg(1, int64(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_997200", int64(3600)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", int64(1)).SetArg(1, int64(13)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_subkey3_subvalue3_950400", int64(86400)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[1].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[1].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) } } @@ -206,31 +178,26 @@ func TestOverLimitWithLocalCache(t *testing.T) { client := mock_driver.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, ratelimitAlgorithm) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) - localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + localCacheStats := utils.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) domain := "domain" // Test Near Limit Stats. Under Near Limit Ratio request := common.NewRateLimitRequest(domain, [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_997200", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). - SetArg(4, uint32(11)). - Return(redis_driver.Pipeline{}) + + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", int64(1)).SetArg(1, int64(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -241,20 +208,15 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) // Test Near Limit Stats. At Near Limit Ratio, still OK - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_997200", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). - SetArg(4, uint32(13)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", int64(1)).SetArg(1, int64(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -265,20 +227,15 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) // Test Over limit stats - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_997200", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). - SetArg(4, uint32(16)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", int64(1)).SetArg(1, int64(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -289,14 +246,11 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) // Test Over limit stats with local cache - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_997200", - PerSecond: true, - }) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -314,8 +268,7 @@ func TestNearLimit(t *testing.T) { client := mock_driver.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) - ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) statsStore := stats.NewStore(stats.NewNullSink(), false) domain := "domain" @@ -323,40 +276,31 @@ func TestNearLimit(t *testing.T) { request := common.NewRateLimitRequest(domain, [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key4_value4", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_997200", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). - SetArg(4, uint32(11)). - Return(redis_driver.Pipeline{}) + + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", int64(1)).SetArg(1, int64(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 4, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // Test Near Limit Stats. At Near Limit Ratio, still OK - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_997200", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). - SetArg(4, uint32(13)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", int64(1)).SetArg(1, int64(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -364,20 +308,15 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases // when we are near limit, not after we have passed the limit. - timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_997200", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_997200", uint32(1), gomock.Any(), int64(3600)). - SetArg(4, uint32(16)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1000000)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", int64(1)).SetArg(1, int64(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -388,19 +327,14 @@ func TestNearLimit(t *testing.T) { request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key5_value5_1234", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key5_value5_1234", uint32(3), gomock.Any(), int64(1)). - SetArg(4, uint32(5)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_1234", int64(3)).SetArg(1, int64(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 15, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -410,19 +344,14 @@ func TestNearLimit(t *testing.T) { request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key6_value6_1234", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key6_value6_1234", uint32(2), gomock.Any(), int64(1)). - SetArg(4, uint32(7)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key6_value6_1234", int64(2)).SetArg(1, int64(7)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key6_value6_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -432,19 +361,14 @@ func TestNearLimit(t *testing.T) { request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key7_value7_1234", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key7_value7_1234", uint32(3), gomock.Any(), int64(1)). - SetArg(4, uint32(19)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key7_value7_1234", int64(3)).SetArg(1, int64(19)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key7_value7_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -454,19 +378,14 @@ func TestNearLimit(t *testing.T) { request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key8_value8_1234", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key8_value8_1234", uint32(3), gomock.Any(), int64(1)). - SetArg(4, uint32(22)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key8_value8_1234", int64(3)).SetArg(1, int64(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key8_value8_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -476,19 +395,14 @@ func TestNearLimit(t *testing.T) { request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key9_value9_1234", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key9_value9_1234", uint32(7), gomock.Any(), int64(1)). - SetArg(4, uint32(22)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key9_value9_1234", int64(7)).SetArg(1, int64(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key9_value9_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(7), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -498,19 +412,14 @@ func TestNearLimit(t *testing.T) { request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key10_value10_1234", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key10_value10_1234", uint32(3), gomock.Any(), int64(1)). - SetArg(4, uint32(30)). - Return(redis_driver.Pipeline{}) + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key10_value10_1234", int64(3)).SetArg(1, int64(30)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key10_value10_1234", int64(1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(3), limits[0].Stats.OverLimit.Value()) @@ -525,27 +434,23 @@ func TestRedisWithJitter(t *testing.T) { client := mock_driver.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) jitterSource := mock_limiter.NewMockJitterRandSource(controller) - ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) - cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, ratelimitAlgorithm) + cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8) statsStore := stats.NewStore(stats.NewNullSink(), false) domain := "domain" request := common.NewRateLimitRequest(domain, [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(2) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key_value_1234", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_1234", uint32(1), gomock.Any(), int64(101)). - SetArg(4, uint32(5)). - Return(redis_driver.Pipeline{}) - jitterSource.EXPECT().Int63().Return(int64(100)) + + timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", int64(1)).SetArg(1, int64(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(101)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) + jitterSource.EXPECT().Int63().Return(int64(100)) + assert.Equal( - []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateReset(limits[0].Limit, timeSource)}}, + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 5, DurationUntilReset: utils.CalculateFixedReset(limits[0].Limit, timeSource)}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) diff --git a/test/redis/windowed_cache_impl_test.go b/test/redis/windowed_cache_impl_test.go index 94b83959c..6bb45c653 100644 --- a/test/redis/windowed_cache_impl_test.go +++ b/test/redis/windowed_cache_impl_test.go @@ -4,17 +4,16 @@ import ( "math/rand" "testing" + "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" - redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" + "github.com/envoyproxy/ratelimit/src/utils" "github.com/envoyproxy/ratelimit/test/common" - mock_algorithm "github.com/envoyproxy/ratelimit/test/mocks/algorithm" mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" redis_driver_mock "github.com/envoyproxy/ratelimit/test/mocks/redis/driver" - "github.com/coocood/freecache" "github.com/golang/mock/gomock" "github.com/golang/protobuf/ptypes/duration" stats "github.com/lyft/gostats" @@ -35,42 +34,38 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { client := redis_driver_mock.NewMockClient(controller) perSecondClient := redis_driver_mock.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) - ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) - var cache limiter.RateLimitCache - if usePerSecondRedis { - cache = redis.NewWindowedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) - } else { - cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) - } - statsStore := stats.NewStore(stats.NewNullSink(), false) - domain := "domain" + var clientUsed *redis_driver_mock.MockClient if usePerSecondRedis { clientUsed = perSecondClient + } else { clientUsed = client } + var cache limiter.RateLimitCache + if usePerSecondRedis { + cache = redis.NewWindowedRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + } else { + cache = redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) + } + statsStore := stats.NewStore(stats.NewNullSink(), false) + // Test 1 request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(1)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(1e9+1e8)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(1)).DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key_value_0", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(1)). - SetArg(4, int64(0)). - Return(redis_driver.Pipeline{}) - assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Nanos: 1e8}}}, cache.DoLimit(nil, request, limits)) @@ -88,29 +83,26 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { limits = []*config.RateLimit{ nil, config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + clientUsed = client + + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key2_value2_subkey2_subvalue2_0", int64(0)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key2_value2_subkey2_subvalue2_0", int64(60)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key2_value2_subkey2_subvalue2_0").DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[1], limits[1]).Return(limiter.CacheKey{ - Key: "domain_key2_value2_subkey2_subvalue2_0", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key2_value2_subkey2_subvalue2_0", gomock.Any(), gomock.Any(), int64(60)). - SetArg(4, int64(70e9)). - Return(redis_driver.Pipeline{}) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key2_value2_subkey2_subvalue2_0", int64(1e9+6e9)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key2_value2_subkey2_subvalue2_0", int64(7)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 69}}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Seconds: 6}}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) // Test 3 @@ -123,38 +115,37 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { limits = []*config.RateLimit{ config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} - timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(5) + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(2) + clientUsed = client + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key3_value3_0", int64(0)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_0", int64(3600)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key3_value3_0").DoAndReturn(pipeAppend) + + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key3_value3_subkey3_subvalue3_0", int64(0)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_subkey3_subvalue3_0", int64(24*3600)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key3_value3_subkey3_subvalue3_0").DoAndReturn(pipeAppend) clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key3_value3_0", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key3_value3_0", gomock.Any(), gomock.Any(), int64(60*60)). - SetArg(4, int64(60*60*1e9)). - Return(redis_driver.Pipeline{}) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[1], limits[1]).Return(limiter.CacheKey{ - Key: "domain_key3_value3_subkey3_subvalue3_0", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key3_value3_subkey3_subvalue3_0", gomock.Any(), gomock.Any(), int64(60*60*24)). - SetArg(4, int64(60*60*24*1e9)). - Return(redis_driver.Pipeline{}) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key3_value3_0", int64(361*1e9)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_0", int64(361)).DoAndReturn(pipeAppend) + + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key3_value3_subkey3_subvalue3_0", int64((24*360*1e9)+1e9)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key3_value3_subkey3_subvalue3_0", int64((24*360)+1)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: (60 * 60) - 1}}, - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: (60 * 60 * 24) - 1}}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Seconds: 360}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Seconds: 24 * 360}}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) - assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) - assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) } } @@ -165,8 +156,7 @@ func TestNearLimitWindowed(t *testing.T) { client := redis_driver_mock.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) - ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) - cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, ratelimitAlgorithm) + cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8) statsStore := stats.NewStore(stats.NewNullSink(), false) domain := "domain" request := common.NewRateLimitRequest(domain, [][][2]string{{{"key4", "value4"}}}, 1) @@ -174,19 +164,27 @@ func TestNearLimitWindowed(t *testing.T) { config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key4_value4", statsStore)} // Test Near Limit Stats. Under Near Limit Ratio - timeSource.EXPECT().UnixNanoNow().Return(int64(50e9)).MaxTimes(1) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_0", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_0", gomock.Any(), gomock.Any(), int64(60)). - SetArg(4, int64(50e9)). - Return(redis_driver.Pipeline{}) + // periode = 1 minute = 60 second + // limit = 10 request/minute + // emissionInterval = 6 second + // request = 1 + // increment = emissionInterval*request = 6 second + + // arriveAt = 01 second + // tat = 01 second + + // newTat should be max(arriveAt,tat)+increment = 7 second + // expire should be (newtat-arriveat)+1 = 7 second + // DurationUntilReset should be newtat-arriveat = 6 second + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_0").SetArg(1, int64(1e9)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key4_value4_0", int64(50e9+6e9)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(50+6-50+1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key4_value4_0", int64(1e9+6e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(6+1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( @@ -198,24 +196,32 @@ func TestNearLimitWindowed(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // Test Near Limit Stats. At Near Limit Ratio, still OK - timeSource.EXPECT().UnixNanoNow().Return(int64(50e9)).MaxTimes(1) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_0", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_0", gomock.Any(), gomock.Any(), int64(60)). - SetArg(4, int64(98e9)). - Return(redis_driver.Pipeline{}) + // periode = 1 minute = 60 second + // limit = 10 request/minute + // emissionInterval = 6 second + // request = 1 + // increment = emissionInterval*request = 6 second + + // arriveAt = 07 second + // tat = 54 second + + // newTat should be max(arriveAt,tat)+increment = 60 second + // expire should be (newtat-arriveat)+1 = 54 second + // DurationUntilReset should be newtat-arriveat = 53 second + timeSource.EXPECT().UnixNanoNow().Return(int64(7e9)).MaxTimes(1) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_0").SetArg(1, int64(54e9)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key4_value4_0", int64(98e9+6e9)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(98+6-50+1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key4_value4_0", int64(54e9+6e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60-7+1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: &duration.Duration{Seconds: 54}}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: &duration.Duration{Seconds: 53}}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -223,20 +229,32 @@ func TestNearLimitWindowed(t *testing.T) { // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases // when we are near limit, not after we have passed the limit. - timeSource.EXPECT().UnixNanoNow().Return(int64(50e9)).MaxTimes(1) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key4_value4_0", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key4_value4_0", gomock.Any(), gomock.Any(), int64(60)). - SetArg(4, int64(110e9)). - Return(redis_driver.Pipeline{}) + // periode = 1 minute = 60 second + // limit = 10 request/minute + // emissionInterval = 6 second + // request = 1 + // increment = emissionInterval*request = 6 second + + // arriveAt = 04 second + // tat = 60 second + + // newTat should be max(arriveAt,tat)+increment = 66 second + // expire should be (newtat-arriveat)+1 = 7 second + // DurationUntilReset should be newtat-arriveat = 6 second + timeSource.EXPECT().UnixNanoNow().Return(int64(4e9)).MaxTimes(1) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key4_value4_0").SetArg(1, int64(60e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key4_value4_0", int64(60e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_0", int64(60-4+1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 110 - 50}}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 56}}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -251,36 +269,43 @@ func TestWindowedOverLimitWithLocalCache(t *testing.T) { client := redis_driver_mock.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) - cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, ratelimitAlgorithm) + cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) domain := "domain" - localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + localCacheStats := utils.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) request := common.NewRateLimitRequest(domain, [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} // Test Near Limit Stats. Under Near Limit Ratio - timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key_value_0", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(60*60)). - SetArg(4, int64(71*4*60*1e9)). - Return(redis_driver.Pipeline{}) + // periode = 60 minute = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute + // request = 1 + // increment = emissionInterval*request = 6 minute + + // arriveAt = 1 minute + // tat = 12 minite + + // newTat should be max(arriveAt,tat)+increment = 18 minute + // expire should be (newtat-arriveat)+1 second = 17 minute 1 second + // DurationUntilReset should be newtat-arriveat = 17 minute + timeSource.EXPECT().UnixNanoNow().Return(int64(1 * 60 * 1e9)).MaxTimes(1) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(12*60*1e9)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(72*4*60*1e9)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(12*4*60+1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(18*60*1e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64((17*60)+1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 3, DurationUntilReset: &duration.Duration{Seconds: 12 * 4 * 60}}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 7, DurationUntilReset: &duration.Duration{Seconds: 17 * 60}}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -291,29 +316,32 @@ func TestWindowedOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) // Test Near Limit Stats. At Near Limit Ratio, still OK - timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key_value_0", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(60*60)). - SetArg(4, int64(72*4*60*1e9)). - Return(redis_driver.Pipeline{}) + // periode = 60 minute = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute + // request = 1 + // increment = emissionInterval*request = 6 minute + + // arriveAt = 12 minute + // tat = 60 minute + + // newTat should be max(arriveAt,tat)+increment = 66 minute + // expire should be (newtat-arriveat)+1 second = 54 minute 1 second + // DurationUntilReset should be newtat-arriveat = 54 minute + timeSource.EXPECT().UnixNanoNow().Return(int64(12 * 60 * 1e9)).MaxTimes(1) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(60*60*1e9)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(73*4*60*1e9)).DoAndReturn(pipeAppend) - client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(13*4*60+1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(66*60*1e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64((54*60)+1)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - - limits = []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} - assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 2, DurationUntilReset: &duration.Duration{Seconds: 13 * 4 * 60}}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: &duration.Duration{Seconds: 54 * 60}}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) @@ -324,25 +352,32 @@ func TestWindowedOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) // Test Over limit stats - timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key_value_0", - PerSecond: false, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(60*60)). - SetArg(4, int64(75*4*60*1e9)). - Return(redis_driver.Pipeline{}) + // periode = 60 minute = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute + // request = 1 + // increment = emissionInterval*request = 6 minute + + // arriveAt = 2 minute + // tat = 72 minute + + // newTat should be max(arriveAt,tat)+increment = 78 minute (not used) + // expire should be (tat-arriveat)+1 second = 70 minute 1 second + // DurationUntilReset should be tat-arriveat = 70 minute + timeSource.EXPECT().UnixNanoNow().Return(int64(2 * 60 * 1e9)).MaxTimes(1) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(72*60*1e9)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - - limits = []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(72*60*1e9)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64((70*60)+1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 15 * 4 * 60}}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 70 * 60}}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) @@ -353,19 +388,23 @@ func TestWindowedOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) // Test Over limit stats with local cache - request = common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limits = []*config.RateLimit{ - config.NewRateLimit(15, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} + // periode = 60 minute = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute + // request = 1 + // increment = emissionInterval*request = 6 minute + + // arriveAt = 3 minute + // tat = 72 minute - timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 4 * 60 * 1e9)).MaxTimes(1) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key_value_0", - PerSecond: false, - }) + // newTat should be max(arriveAt,tat)+increment = 78 minute (not used) + // expire should be (tat-arriveat)+1 second = 69 minute 1 second + // DurationUntilReset should be secondsToReset-(arriveAt%secondsToReset) = 57 minute + timeSource.EXPECT().UnixNanoNow().Return(int64(3 * 60 * 1e9)).MaxTimes(1) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ - {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 15 * 4 * 60}}}, + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 57 * 60}}}, cache.DoLimit(nil, request, limits)) assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) @@ -384,29 +423,38 @@ func TestRedisWindowedWithJitter(t *testing.T) { client := redis_driver_mock.NewMockClient(controller) timeSource := mock_limiter.NewMockTimeSource(controller) jitterSource := mock_limiter.NewMockJitterRandSource(controller) - ratelimitAlgorithm := mock_algorithm.NewMockRatelimitAlgorithm(controller) - cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, ratelimitAlgorithm) + cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8) statsStore := stats.NewStore(stats.NewNullSink(), false) - domain := "domain" request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + // periode = 1 second + // limit = 10 request/second + // emissionInterval = 1/10 second + // request = 1 + // increment = emissionInterval*request = 1/10 second + + // arriveAt = 1 second + // tat = 1 second + + // newTat should be max(arriveAt,tat)+increment = 1,1 second + // expire should be (tat-arriveat)+1 second = 1 second + // DurationUntilReset should be newTat-arriveat = 0.1 second + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) - ratelimitAlgorithm.EXPECT().GenerateCacheKey(domain, request.Descriptors[0], limits[0]).Return(limiter.CacheKey{ - Key: "domain_key_value_0", - PerSecond: true, - }) - ratelimitAlgorithm.EXPECT(). - AppendPipeline(gomock.Any(), gomock.Any(), "domain_key_value_0", gomock.Any(), gomock.Any(), int64(1)). - SetArg(4, int64(0)). - Return(redis_driver.Pipeline{}) - jitterSource.EXPECT().Int63().Return(int64(100)) + + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key_value_0", int64(0)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "GET", "domain_key_value_0").SetArg(1, int64(1e9)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SET", "domain_key_value_0", int64(1e9+1e8)).DoAndReturn(pipeAppend) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_0", int64(101)).DoAndReturn(pipeAppend) client.EXPECT().PipeDo(gomock.Any()).Return(nil) + jitterSource.EXPECT().Int63().Return(int64(100)) + assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Nanos: 1e8}}}, cache.DoLimit(nil, request, limits)) From 307c27d2968ab6258e4ae5119e7804c53f4c0047 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Tue, 26 Jan 2021 11:41:38 +0100 Subject: [PATCH 22/31] add compile time check and fix readme Signed-off-by: zufardhiyaulhaq --- README.md | 3 +-- src/algorithm/fixed_window.go | 2 ++ src/algorithm/rolling_window.go | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b5ce7118a..f633b361d 100644 --- a/README.md +++ b/README.md @@ -190,8 +190,7 @@ For a limit of 60 requests per hour, there can only 60 requests in a single time Fixed window algorithm does not care when did the request arrive, all 60 can arrive at 01:01 or 01:50 and the limit will still reset at 02:00. 2. Rolling window -For a limit of 60 requests per hour. Initially it able to take a burst of 60 requests at once, then the limit restore by 1 each minute. -Requests are allowed as long as there's still some available limit. +For a limit of 60 requests per hour. Initially it is able to take a burst of 60 requests at once, then the limit is restored by 1 each minute. Requests are allowed as long as there's still some available limit. Configure rate limit algorithm with `RATE_LIMIT_ALGORITHM` environment variable. Use `FIXED_WINDOW` and `ROLLING_WINDOW` respectively. diff --git a/src/algorithm/fixed_window.go b/src/algorithm/fixed_window.go index 50512d839..9cbc3e9c2 100644 --- a/src/algorithm/fixed_window.go +++ b/src/algorithm/fixed_window.go @@ -10,6 +10,8 @@ import ( logger "github.com/sirupsen/logrus" ) +var _ RatelimitAlgorithm = (*FixedWindowImpl)(nil) + type FixedWindowImpl struct { timeSource utils.TimeSource cacheKeyGenerator utils.CacheKeyGenerator diff --git a/src/algorithm/rolling_window.go b/src/algorithm/rolling_window.go index c7de26133..4ed2a9ab3 100644 --- a/src/algorithm/rolling_window.go +++ b/src/algorithm/rolling_window.go @@ -13,6 +13,8 @@ import ( const DummyCacheKeyTime = 0 +var _ RatelimitAlgorithm = (*RollingWindowImpl)(nil) + type RollingWindowImpl struct { timeSource utils.TimeSource cacheKeyGenerator utils.CacheKeyGenerator From baf012b34202e757dd194c854e7b934d95ff396a Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Thu, 4 Feb 2021 06:26:36 +0100 Subject: [PATCH 23/31] refactor algorithm interfaces Signed-off-by: zufardhiyaulhaq --- src/algorithm/fixed_window.go | 5 +++-- src/algorithm/ratelimit_algorithm.go | 4 ++-- src/algorithm/rolling_window.go | 13 ++++++++----- src/memcached/windowed_cache_impl.go | 6 ++---- src/redis/windowed_cache_impl.go | 7 +++---- 5 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/algorithm/fixed_window.go b/src/algorithm/fixed_window.go index b3514f61f..ec111ad21 100644 --- a/src/algorithm/fixed_window.go +++ b/src/algorithm/fixed_window.go @@ -113,10 +113,11 @@ func (fw *FixedWindowImpl) PopulateStats(limit *config.RateLimit, nearLimit uint limit.Stats.OverLimitWithLocalCache.Add(overLimitWithLocalCache) } -func (fw *FixedWindowImpl) GetNewTat() int64 { +func (fw *FixedWindowImpl) GetExpirationSeconds() int64 { return 0 } -func (fw *FixedWindowImpl) GetArrivedAt() int64 { + +func (fw *FixedWindowImpl) GetResultsAfterIncrease() int64 { return 0 } diff --git a/src/algorithm/ratelimit_algorithm.go b/src/algorithm/ratelimit_algorithm.go index 9223c51b8..e1d5fa9e9 100644 --- a/src/algorithm/ratelimit_algorithm.go +++ b/src/algorithm/ratelimit_algorithm.go @@ -11,8 +11,8 @@ type RatelimitAlgorithm interface { IsOverLimitWithLocalCache(key string) bool GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus - GetNewTat() int64 - GetArrivedAt() int64 + GetExpirationSeconds() int64 + GetResultsAfterIncrease() int64 GenerateCacheKeys(request *pb.RateLimitRequest, limits []*config.RateLimit, hitsAddend int64) []utils.CacheKey diff --git a/src/algorithm/rolling_window.go b/src/algorithm/rolling_window.go index 5321b78f6..2aad3bfcc 100644 --- a/src/algorithm/rolling_window.go +++ b/src/algorithm/rolling_window.go @@ -126,15 +126,18 @@ func (rw *RollingWindowImpl) IsOverLimitWithLocalCache(key string) bool { return false } -func (rw *RollingWindowImpl) GetNewTat() int64 { +func (rw *RollingWindowImpl) GetExpirationSeconds() int64 { if rw.diff < 0 { - return rw.tat + return utils.NanosecondsToSeconds(rw.tat-rw.arrivedAt) + 1 } - return rw.newTat + return utils.NanosecondsToSeconds(rw.newTat-rw.arrivedAt) + 1 } -func (rw *RollingWindowImpl) GetArrivedAt() int64 { - return rw.arrivedAt +func (rw *RollingWindowImpl) GetResultsAfterIncrease() int64 { + if rw.diff < 0 { + return rw.tat + } + return rw.newTat } func (rw *RollingWindowImpl) GenerateCacheKeys(request *pb.RateLimitRequest, diff --git a/src/memcached/windowed_cache_impl.go b/src/memcached/windowed_cache_impl.go index 2ca6ff3b4..b7ae5967f 100644 --- a/src/memcached/windowed_cache_impl.go +++ b/src/memcached/windowed_cache_impl.go @@ -99,10 +99,8 @@ func (this *windowedRateLimitCacheImpl) DoLimit( isOverLimit[i] = false } - arrivedAt := this.algorithm.GetArrivedAt() - newTats[i] = this.algorithm.GetNewTat() - - expirationSeconds[i] = utils.NanosecondsToSeconds(newTats[i]-arrivedAt) + 1 + newTats[i] = this.algorithm.GetResultsAfterIncrease() + expirationSeconds[i] = this.algorithm.GetExpirationSeconds() if this.expirationJitterMaxSeconds > 0 { expirationSeconds[i] += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) } diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index 159ddb350..14d4fe87c 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -104,11 +104,10 @@ func (this *windowedRateLimitCacheImpl) DoLimit( continue } - arrivedAt := this.algorithm.GetArrivedAt() - newTat := this.algorithm.GetNewTat() - // Store new tat for initial tat of next requests - expirationSeconds := utils.NanosecondsToSeconds(newTat-arrivedAt) + 1 + newTat := this.algorithm.GetResultsAfterIncrease() + expirationSeconds := this.algorithm.GetExpirationSeconds() + if this.expirationJitterMaxSeconds > 0 { expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) } From 1face4c1c526face953799c94d7b3649f3c80391 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Tue, 9 Feb 2021 12:16:16 +0100 Subject: [PATCH 24/31] fix mocks & add memcached windowed test Signed-off-by: zufardhiyaulhaq --- test/memcached/windowed_cache_impl_test.go | 62 +++++++ test/mocks/limiter/limiter.go | 156 ------------------ test/mocks/limiter/rate_limit_cache.go | 62 +++++++ .../utils/{utils.go => jitter_rand_source.go} | 55 +----- test/mocks/utils/time.go | 61 +++++++ test/redis/fixed_cache_impl_test.go | 12 +- test/redis/windowed_cache_impl_test.go | 12 +- 7 files changed, 202 insertions(+), 218 deletions(-) create mode 100644 test/memcached/windowed_cache_impl_test.go delete mode 100644 test/mocks/limiter/limiter.go create mode 100644 test/mocks/limiter/rate_limit_cache.go rename test/mocks/utils/{utils.go => jitter_rand_source.go} (50%) create mode 100644 test/mocks/utils/time.go diff --git a/test/memcached/windowed_cache_impl_test.go b/test/memcached/windowed_cache_impl_test.go new file mode 100644 index 000000000..b416931c6 --- /dev/null +++ b/test/memcached/windowed_cache_impl_test.go @@ -0,0 +1,62 @@ +package memcached_test + +import ( + "strconv" + "testing" + + "github.com/bradfitz/gomemcache/memcache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/memcached" + "github.com/envoyproxy/ratelimit/test/common" + mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" + mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes/duration" + stats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" +) + +func TestMemcachedWindowed(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + client := mock_memcached.NewMockClient(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + cache := memcached.NewWindowedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + + // Test 1 + // periode = 1 second + // limit = 10 request/second + // emissionInterval = 0.1 second + // request = 1 + // increment = emissionInterval*request = 0.1 second + + // arriveAt = 1 second + // tat = 1 second + + // newTat should be max(arriveAt,tat)+increment = 1.1 second + // DurationUntilReset should be newTat-arriveat = 0.1 minute + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + client.EXPECT().GetMulti([]string{"domain_key_value_0"}).Return( + getMultiResult(map[string]int{"domain_key_value_0": 1e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key_value_0", + Value: []byte(strconv.FormatInt(int64(1e9+1e8), 10)), + Expiration: int32(1), + }) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Nanos: 1e8}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) +} diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go deleted file mode 100644 index 927031404..000000000 --- a/test/mocks/limiter/limiter.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/envoyproxy/ratelimit/src/limiter (interfaces: RateLimitCache) - -// Package mock_limiter is a generated GoMock package. -package mock_limiter - -import ( - context "context" - envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - config "github.com/envoyproxy/ratelimit/src/config" - gomock "github.com/golang/mock/gomock" - reflect "reflect" -) - -// MockRateLimitCache is a mock of RateLimitCache interface -type MockRateLimitCache struct { - ctrl *gomock.Controller - recorder *MockRateLimitCacheMockRecorder -} - -// MockRateLimitCacheMockRecorder is the mock recorder for MockRateLimitCache -type MockRateLimitCacheMockRecorder struct { - mock *MockRateLimitCache -} - -// NewMockRateLimitCache creates a new mock instance -func NewMockRateLimitCache(ctrl *gomock.Controller) *MockRateLimitCache { - mock := &MockRateLimitCache{ctrl: ctrl} - mock.recorder = &MockRateLimitCacheMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockRateLimitCache) EXPECT() *MockRateLimitCacheMockRecorder { - return m.recorder -} - -// DoLimit mocks base method -func (m *MockRateLimitCache) DoLimit(arg0 context.Context, arg1 *envoy_service_ratelimit_v3.RateLimitRequest, arg2 []*config.RateLimit) []*envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DoLimit", arg0, arg1, arg2) - ret0, _ := ret[0].([]*envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus) - return ret0 -} - -// DoLimit indicates an expected call of DoLimit -func (mr *MockRateLimitCacheMockRecorder) DoLimit(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), arg0, arg1, arg2) -} - -// Flush mocks base method -func (m *MockRateLimitCache) Flush() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Flush") -} - -// Flush indicates an expected call of Flush -func (mr *MockRateLimitCacheMockRecorder) Flush() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockRateLimitCache)(nil).Flush)) -} - -// MockTimeSource is a mock of TimeSource interface -type MockTimeSource struct { - ctrl *gomock.Controller - recorder *MockTimeSourceMockRecorder -} - -// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource -type MockTimeSourceMockRecorder struct { - mock *MockTimeSource -} - -// NewMockTimeSource creates a new mock instance -func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { - mock := &MockTimeSource{ctrl: ctrl} - mock.recorder = &MockTimeSourceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { - return m.recorder -} - -// UnixNanoNow mocks base method -func (m *MockTimeSource) UnixNanoNow() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UnixNanoNow") - ret0, _ := ret[0].(int64) - return ret0 -} - -// UnixNanoNow indicates an expected call of UnixNanoNow -func (mr *MockTimeSourceMockRecorder) UnixNanoNow() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNanoNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNanoNow)) -} - -// UnixNow mocks base method -func (m *MockTimeSource) UnixNow() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UnixNow") - ret0, _ := ret[0].(int64) - return ret0 -} - -// UnixNow indicates an expected call of UnixNow -func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) -} - -// MockJitterRandSource is a mock of JitterRandSource interface -type MockJitterRandSource struct { - ctrl *gomock.Controller - recorder *MockJitterRandSourceMockRecorder -} - -// MockJitterRandSourceMockRecorder is the mock recorder for MockJitterRandSource -type MockJitterRandSourceMockRecorder struct { - mock *MockJitterRandSource -} - -// NewMockJitterRandSource creates a new mock instance -func NewMockJitterRandSource(ctrl *gomock.Controller) *MockJitterRandSource { - mock := &MockJitterRandSource{ctrl: ctrl} - mock.recorder = &MockJitterRandSourceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockJitterRandSource) EXPECT() *MockJitterRandSourceMockRecorder { - return m.recorder -} - -// Int63 mocks base method -func (m *MockJitterRandSource) Int63() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Int63") - ret0, _ := ret[0].(int64) - return ret0 -} - -// Int63 indicates an expected call of Int63 -func (mr *MockJitterRandSourceMockRecorder) Int63() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int63", reflect.TypeOf((*MockJitterRandSource)(nil).Int63)) -} - -// Seed mocks base method -func (m *MockJitterRandSource) Seed(arg0 int64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Flush") -} diff --git a/test/mocks/limiter/rate_limit_cache.go b/test/mocks/limiter/rate_limit_cache.go new file mode 100644 index 000000000..5e9520b3b --- /dev/null +++ b/test/mocks/limiter/rate_limit_cache.go @@ -0,0 +1,62 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./src/limiter/rate_limit_cache.go + +// Package mock_limiter is a generated GoMock package. +package mock_limiter + +import ( + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + config "github.com/envoyproxy/ratelimit/src/config" + gomock "github.com/golang/mock/gomock" + context "golang.org/x/net/context" + reflect "reflect" +) + +// MockRateLimitCache is a mock of RateLimitCache interface +type MockRateLimitCache struct { + ctrl *gomock.Controller + recorder *MockRateLimitCacheMockRecorder +} + +// MockRateLimitCacheMockRecorder is the mock recorder for MockRateLimitCache +type MockRateLimitCacheMockRecorder struct { + mock *MockRateLimitCache +} + +// NewMockRateLimitCache creates a new mock instance +func NewMockRateLimitCache(ctrl *gomock.Controller) *MockRateLimitCache { + mock := &MockRateLimitCache{ctrl: ctrl} + mock.recorder = &MockRateLimitCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRateLimitCache) EXPECT() *MockRateLimitCacheMockRecorder { + return m.recorder +} + +// DoLimit mocks base method +func (m *MockRateLimitCache) DoLimit(ctx context.Context, request *envoy_service_ratelimit_v3.RateLimitRequest, limits []*config.RateLimit) []*envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DoLimit", ctx, request, limits) + ret0, _ := ret[0].([]*envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus) + return ret0 +} + +// DoLimit indicates an expected call of DoLimit +func (mr *MockRateLimitCacheMockRecorder) DoLimit(ctx, request, limits interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), ctx, request, limits) +} + +// Flush mocks base method +func (m *MockRateLimitCache) Flush() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Flush") +} + +// Flush indicates an expected call of Flush +func (mr *MockRateLimitCacheMockRecorder) Flush() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockRateLimitCache)(nil).Flush)) +} diff --git a/test/mocks/utils/utils.go b/test/mocks/utils/jitter_rand_source.go similarity index 50% rename from test/mocks/utils/utils.go rename to test/mocks/utils/jitter_rand_source.go index 64e93d631..2289cf8b9 100644 --- a/test/mocks/utils/utils.go +++ b/test/mocks/utils/jitter_rand_source.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/envoyproxy/ratelimit/src/utils (interfaces: TimeSource,JitterRandSource) +// Source: ./src/utils/jitter_rand_source.go // Package mock_utils is a generated GoMock package. package mock_utils @@ -9,51 +9,6 @@ import ( reflect "reflect" ) -// MockTimeSource is a mock of TimeSource interface -type MockTimeSource struct { - ctrl *gomock.Controller - recorder *MockTimeSourceMockRecorder -} - -// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource -type MockTimeSourceMockRecorder struct { - mock *MockTimeSource -} - -// NewMockTimeSource creates a new mock instance -func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { - mock := &MockTimeSource{ctrl: ctrl} - mock.recorder = &MockTimeSourceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { - return m.recorder -} - -// UnixNow mocks base method -func (m *MockTimeSource) UnixNow() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UnixNow") - ret0, _ := ret[0].(int64) - return ret0 -} - -// UnixNow mocks base method -func (m *MockTimeSource) UnixNanoNow() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UnixNanoNow") - ret0, _ := ret[0].(int64) - return ret0 -} - -// UnixNow indicates an expected call of UnixNow -func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) -} - // MockJitterRandSource is a mock of JitterRandSource interface type MockJitterRandSource struct { ctrl *gomock.Controller @@ -92,13 +47,13 @@ func (mr *MockJitterRandSourceMockRecorder) Int63() *gomock.Call { } // Seed mocks base method -func (m *MockJitterRandSource) Seed(arg0 int64) { +func (m *MockJitterRandSource) Seed(seed int64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "Seed", arg0) + m.ctrl.Call(m, "Seed", seed) } // Seed indicates an expected call of Seed -func (mr *MockJitterRandSourceMockRecorder) Seed(arg0 interface{}) *gomock.Call { +func (mr *MockJitterRandSourceMockRecorder) Seed(seed interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seed", reflect.TypeOf((*MockJitterRandSource)(nil).Seed), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seed", reflect.TypeOf((*MockJitterRandSource)(nil).Seed), seed) } diff --git a/test/mocks/utils/time.go b/test/mocks/utils/time.go new file mode 100644 index 000000000..148bdc521 --- /dev/null +++ b/test/mocks/utils/time.go @@ -0,0 +1,61 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./src/utils/time.go + +// Package mock_utils is a generated GoMock package. +package mock_utils + +import ( + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockTimeSource is a mock of TimeSource interface +type MockTimeSource struct { + ctrl *gomock.Controller + recorder *MockTimeSourceMockRecorder +} + +// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource +type MockTimeSourceMockRecorder struct { + mock *MockTimeSource +} + +// NewMockTimeSource creates a new mock instance +func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { + mock := &MockTimeSource{ctrl: ctrl} + mock.recorder = &MockTimeSourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { + return m.recorder +} + +// UnixNow mocks base method +func (m *MockTimeSource) UnixNow() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnixNow") + ret0, _ := ret[0].(int64) + return ret0 +} + +// UnixNow indicates an expected call of UnixNow +func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) +} + +// UnixNanoNow mocks base method +func (m *MockTimeSource) UnixNanoNow() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnixNanoNow") + ret0, _ := ret[0].(int64) + return ret0 +} + +// UnixNanoNow indicates an expected call of UnixNanoNow +func (mr *MockTimeSourceMockRecorder) UnixNanoNow() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNanoNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNanoNow)) +} diff --git a/test/redis/fixed_cache_impl_test.go b/test/redis/fixed_cache_impl_test.go index 44dc7bdc9..031c311c3 100644 --- a/test/redis/fixed_cache_impl_test.go +++ b/test/redis/fixed_cache_impl_test.go @@ -14,8 +14,8 @@ import ( redis_driver "github.com/envoyproxy/ratelimit/src/redis/driver" "github.com/envoyproxy/ratelimit/src/utils" "github.com/envoyproxy/ratelimit/test/common" - mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" mock_driver "github.com/envoyproxy/ratelimit/test/mocks/redis/driver" + mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" "math/rand" "testing" @@ -38,7 +38,7 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { client := mock_driver.NewMockClient(controller) perSecondClient := mock_driver.NewMockClient(controller) - timeSource := mock_limiter.NewMockTimeSource(controller) + timeSource := mock_utils.NewMockTimeSource(controller) var clientUsed *mock_driver.MockClient if usePerSecondRedis { @@ -176,7 +176,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { defer controller.Finish() client := mock_driver.NewMockClient(controller) - timeSource := mock_limiter.NewMockTimeSource(controller) + timeSource := mock_utils.NewMockTimeSource(controller) localCache := freecache.NewCache(100) cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "") sink := &common.TestStatSink{} @@ -267,7 +267,7 @@ func TestNearLimit(t *testing.T) { defer controller.Finish() client := mock_driver.NewMockClient(controller) - timeSource := mock_limiter.NewMockTimeSource(controller) + timeSource := mock_utils.NewMockTimeSource(controller) cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") statsStore := stats.NewStore(stats.NewNullSink(), false) domain := "domain" @@ -432,8 +432,8 @@ func TestRedisWithJitter(t *testing.T) { defer controller.Finish() client := mock_driver.NewMockClient(controller) - timeSource := mock_limiter.NewMockTimeSource(controller) - jitterSource := mock_limiter.NewMockJitterRandSource(controller) + timeSource := mock_utils.NewMockTimeSource(controller) + jitterSource := mock_utils.NewMockJitterRandSource(controller) cache := redis.NewFixedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") statsStore := stats.NewStore(stats.NewNullSink(), false) domain := "domain" diff --git a/test/redis/windowed_cache_impl_test.go b/test/redis/windowed_cache_impl_test.go index 6cdff2c37..421a34e65 100644 --- a/test/redis/windowed_cache_impl_test.go +++ b/test/redis/windowed_cache_impl_test.go @@ -11,8 +11,8 @@ import ( "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/utils" "github.com/envoyproxy/ratelimit/test/common" - mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" redis_driver_mock "github.com/envoyproxy/ratelimit/test/mocks/redis/driver" + mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" "github.com/golang/mock/gomock" "github.com/golang/protobuf/ptypes/duration" @@ -33,7 +33,7 @@ func testRedisWindowed(usePerSecondRedis bool) func(*testing.T) { client := redis_driver_mock.NewMockClient(controller) perSecondClient := redis_driver_mock.NewMockClient(controller) - timeSource := mock_limiter.NewMockTimeSource(controller) + timeSource := mock_utils.NewMockTimeSource(controller) var clientUsed *redis_driver_mock.MockClient if usePerSecondRedis { @@ -155,7 +155,7 @@ func TestNearLimitWindowed(t *testing.T) { defer controller.Finish() client := redis_driver_mock.NewMockClient(controller) - timeSource := mock_limiter.NewMockTimeSource(controller) + timeSource := mock_utils.NewMockTimeSource(controller) cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8, "") statsStore := stats.NewStore(stats.NewNullSink(), false) domain := "domain" @@ -267,7 +267,7 @@ func TestWindowedOverLimitWithLocalCache(t *testing.T) { defer controller.Finish() client := redis_driver_mock.NewMockClient(controller) - timeSource := mock_limiter.NewMockTimeSource(controller) + timeSource := mock_utils.NewMockTimeSource(controller) localCache := freecache.NewCache(100) cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8, "") sink := &common.TestStatSink{} @@ -421,8 +421,8 @@ func TestRedisWindowedWithJitter(t *testing.T) { defer controller.Finish() client := redis_driver_mock.NewMockClient(controller) - timeSource := mock_limiter.NewMockTimeSource(controller) - jitterSource := mock_limiter.NewMockJitterRandSource(controller) + timeSource := mock_utils.NewMockTimeSource(controller) + jitterSource := mock_utils.NewMockJitterRandSource(controller) cache := redis.NewWindowedRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") statsStore := stats.NewStore(stats.NewNullSink(), false) From 8ce4708f616d05408d2504bb3dc7611dd1490a2c Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Tue, 9 Feb 2021 12:28:58 +0100 Subject: [PATCH 25/31] fix format settings.go Signed-off-by: zufardhiyaulhaq --- src/settings/settings.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/settings/settings.go b/src/settings/settings.go index b92f406c7..2c2e6b915 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -64,7 +64,7 @@ type Settings struct { MemcacheHostPort string `envconfig:"MEMCACHE_HOST_PORT" default:""` // Algorithm settings - RateLimitAlgorithm string `envconfig:"RATE_LIMIT_ALGORITHM" default:"FIXED_WINDOW"` + RateLimitAlgorithm string `envconfig:"RATE_LIMIT_ALGORITHM" default:"FIXED_WINDOW"` } type Option func(*Settings) From d2d32b422d08cfab5a9349655535c927dd0384a2 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Tue, 9 Feb 2021 15:36:35 +0100 Subject: [PATCH 26/31] fix memcached windowed unit test Signed-off-by: zufardhiyaulhaq --- src/memcached/windowed_cache_impl.go | 2 +- test/memcached/fixed_cache_impl_test.go | 2 +- .../memcached/stats_collecting_client_test.go | 2 +- test/memcached/windowed_cache_impl_test.go | 56 +++++++++++++++++- test/mocks/memcached/{ => driver}/client.go | 57 ++++++++++--------- 5 files changed, 86 insertions(+), 33 deletions(-) rename test/mocks/memcached/{ => driver}/client.go (67%) diff --git a/src/memcached/windowed_cache_impl.go b/src/memcached/windowed_cache_impl.go index b7ae5967f..4ca153397 100644 --- a/src/memcached/windowed_cache_impl.go +++ b/src/memcached/windowed_cache_impl.go @@ -119,7 +119,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( func (this *windowedRateLimitCacheImpl) increaseAsync(isOverLimitWithLocalCache []bool, isOverLimit []bool, cacheKeys []utils.CacheKey, expirationSeconds []int64, newTats []int64) { defer this.waitGroup.Done() for i, cacheKey := range cacheKeys { - if cacheKey.Key == "" || isOverLimitWithLocalCache[i] || isOverLimit[i] { + if cacheKey.Key == "" || isOverLimitWithLocalCache[i] { continue } diff --git a/test/memcached/fixed_cache_impl_test.go b/test/memcached/fixed_cache_impl_test.go index e0a1a3757..f8e87fed7 100644 --- a/test/memcached/fixed_cache_impl_test.go +++ b/test/memcached/fixed_cache_impl_test.go @@ -19,7 +19,7 @@ import ( stats "github.com/lyft/gostats" "github.com/envoyproxy/ratelimit/test/common" - mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" + mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached/driver" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" diff --git a/test/memcached/stats_collecting_client_test.go b/test/memcached/stats_collecting_client_test.go index 1d27f1e35..d24f92f54 100644 --- a/test/memcached/stats_collecting_client_test.go +++ b/test/memcached/stats_collecting_client_test.go @@ -7,7 +7,7 @@ import ( "github.com/bradfitz/gomemcache/memcache" "github.com/envoyproxy/ratelimit/src/memcached" - mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" + mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached/driver" "github.com/golang/mock/gomock" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" diff --git a/test/memcached/windowed_cache_impl_test.go b/test/memcached/windowed_cache_impl_test.go index b416931c6..7a09369fa 100644 --- a/test/memcached/windowed_cache_impl_test.go +++ b/test/memcached/windowed_cache_impl_test.go @@ -9,7 +9,7 @@ import ( "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/memcached" "github.com/envoyproxy/ratelimit/test/common" - mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached" + mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached/driver" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" "github.com/golang/mock/gomock" "github.com/golang/protobuf/ptypes/duration" @@ -27,7 +27,9 @@ func TestMemcachedWindowed(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) cache := memcached.NewWindowedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") - // Test 1 + // test 1 + // test initial rate limit process + // periode = 1 second // limit = 10 request/second // emissionInterval = 0.1 second @@ -39,6 +41,8 @@ func TestMemcachedWindowed(t *testing.T) { // newTat should be max(arriveAt,tat)+increment = 1.1 second // DurationUntilReset should be newTat-arriveat = 0.1 minute + // expiration should be second(newTat-arriveat)+1 = 0.1 minute + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -59,4 +63,52 @@ func TestMemcachedWindowed(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + + // test 2 + // test rate limit with multiple description + + // periode = 1 minute = 60 second + // limit = 10 request/minute + // emissionInterval = 6 second + // request = 1 + // increment = emissionInterval*request = 6 second + + // arriveAt = 1 second + // tat = 1 second + + // newTat should be max(arriveAt,tat)+increment = 7 second + // DurationUntilReset should be newTat-arriveat = 6 second + // expiration should be second(newTat-arriveat)+1 = 7 second + + request = common.NewRateLimitRequest( + "domain", + [][][2]string{ + {{"key2", "value2"}}, + {{"key2", "value2"}, {"subkey2", "subvalue2"}}, + }, 1) + limits = []*config.RateLimit{ + nil, + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key2_value2_subkey2_subvalue2", statsStore)} + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + client.EXPECT().GetMulti([]string{"domain_key2_value2_subkey2_subvalue2_0"}).Return( + getMultiResult(map[string]int{"domain_key2_value2_subkey2_subvalue2_0": 1e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key2_value2_subkey2_subvalue2_0", + Value: []byte(strconv.FormatInt(int64(1e9+6e9), 10)), + Expiration: int32(7), + }) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Seconds: 6}}}, + cache.DoLimit(nil, request, limits)) + + assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) + + cache.Flush() } diff --git a/test/mocks/memcached/client.go b/test/mocks/memcached/driver/client.go similarity index 67% rename from test/mocks/memcached/client.go rename to test/mocks/memcached/driver/client.go index 690900e59..3bc29d8d9 100644 --- a/test/mocks/memcached/client.go +++ b/test/mocks/memcached/driver/client.go @@ -1,13 +1,14 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/envoyproxy/ratelimit/src/memcached (interfaces: Client) +// Source: ./src/memcached/driver/client.go // Package mock_memcached is a generated GoMock package. package mock_memcached import ( + reflect "reflect" + memcache "github.com/bradfitz/gomemcache/memcache" gomock "github.com/golang/mock/gomock" - reflect "reflect" ) // MockClient is a mock of Client interface @@ -33,60 +34,60 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder { return m.recorder } -// Add mocks base method -func (m *MockClient) Add(arg0 *memcache.Item) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Add", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Add indicates an expected call of Add -func (mr *MockClientMockRecorder) Add(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockClient)(nil).Add), arg0) -} - // GetMulti mocks base method -func (m *MockClient) GetMulti(arg0 []string) (map[string]*memcache.Item, error) { +func (m *MockClient) GetMulti(keys []string) (map[string]*memcache.Item, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMulti", arg0) + ret := m.ctrl.Call(m, "GetMulti", keys) ret0, _ := ret[0].(map[string]*memcache.Item) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMulti indicates an expected call of GetMulti -func (mr *MockClientMockRecorder) GetMulti(arg0 interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) GetMulti(keys interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMulti", reflect.TypeOf((*MockClient)(nil).GetMulti), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMulti", reflect.TypeOf((*MockClient)(nil).GetMulti), keys) } // Increment mocks base method -func (m *MockClient) Increment(arg0 string, arg1 uint64) (uint64, error) { +func (m *MockClient) Increment(key string, delta uint64) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Increment", arg0, arg1) + ret := m.ctrl.Call(m, "Increment", key, delta) ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } // Increment indicates an expected call of Increment -func (mr *MockClientMockRecorder) Increment(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) Increment(key, delta interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Increment", reflect.TypeOf((*MockClient)(nil).Increment), key, delta) +} + +// Add mocks base method +func (m *MockClient) Add(item *memcache.Item) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", item) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add +func (mr *MockClientMockRecorder) Add(item interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Increment", reflect.TypeOf((*MockClient)(nil).Increment), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockClient)(nil).Add), item) } // Set mocks base method -func (m *MockClient) Set(arg0 *memcache.Item) error { +func (m *MockClient) Set(item *memcache.Item) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Set", arg0) + ret := m.ctrl.Call(m, "Set", item) ret0, _ := ret[0].(error) return ret0 } // Set indicates an expected call of Set -func (mr *MockClientMockRecorder) Set(arg0 interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) Set(item interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockClient)(nil).Set), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockClient)(nil).Set), item) } From bc25eb7eb0d1cb2a6a283f76a94c3ca06779f3c8 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Wed, 10 Feb 2021 05:44:57 +0100 Subject: [PATCH 27/31] add unit tests for windowed memcached Signed-off-by: zufardhiyaulhaq --- src/algorithm/rolling_window.go | 1 + src/memcached/cache_impl.go | 2 - src/memcached/fixed_cache_impl.go | 3 +- src/memcached/windowed_cache_impl.go | 3 +- test/memcached/fixed_cache_impl_test.go | 12 +- test/memcached/windowed_cache_impl_test.go | 393 ++++++++++++++++++++- test/redis/windowed_cache_impl_test.go | 4 +- 7 files changed, 403 insertions(+), 15 deletions(-) diff --git a/src/algorithm/rolling_window.go b/src/algorithm/rolling_window.go index 2aad3bfcc..3ec839aef 100644 --- a/src/algorithm/rolling_window.go +++ b/src/algorithm/rolling_window.go @@ -40,6 +40,7 @@ func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *conf secondsToReset := utils.UnitToDivider(limit.Limit.Unit) secondsToReset -= utils.NanosecondsToSeconds(rw.timeSource.UnixNanoNow()) % secondsToReset + return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limit.Limit, diff --git a/src/memcached/cache_impl.go b/src/memcached/cache_impl.go index 8f0de20d1..de7941d93 100644 --- a/src/memcached/cache_impl.go +++ b/src/memcached/cache_impl.go @@ -38,7 +38,6 @@ func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.Tim jitterRand, s.ExpirationJitterMaxSeconds, localCache, - scope, s.NearLimitRatio, s.CacheKeyPrefix), nil } @@ -50,7 +49,6 @@ func NewRateLimitCacheImplFromSettings(s settings.Settings, timeSource utils.Tim jitterRand, s.ExpirationJitterMaxSeconds, localCache, - scope, s.NearLimitRatio, s.CacheKeyPrefix), nil } diff --git a/src/memcached/fixed_cache_impl.go b/src/memcached/fixed_cache_impl.go index 734bffec5..94628fb89 100644 --- a/src/memcached/fixed_cache_impl.go +++ b/src/memcached/fixed_cache_impl.go @@ -14,7 +14,6 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached/driver" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" ) @@ -149,7 +148,7 @@ func (this *fixedRateLimitCacheImpl) Flush() { } func NewFixedRateLimitCacheImpl(client driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { return &fixedRateLimitCacheImpl{ client: client, timeSource: timeSource, diff --git a/src/memcached/windowed_cache_impl.go b/src/memcached/windowed_cache_impl.go index 4ca153397..002de0c5f 100644 --- a/src/memcached/windowed_cache_impl.go +++ b/src/memcached/windowed_cache_impl.go @@ -14,7 +14,6 @@ import ( "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/memcached/driver" "github.com/envoyproxy/ratelimit/src/utils" - stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" ) @@ -141,7 +140,7 @@ func (this *windowedRateLimitCacheImpl) Flush() { } func NewWindowedRateLimitCacheImpl(client driver.Client, timeSource utils.TimeSource, jitterRand *rand.Rand, - expirationJitterMaxSeconds int64, localCache *freecache.Cache, scope stats.Scope, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { + expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) limiter.RateLimitCache { return &windowedRateLimitCacheImpl{ client: client, timeSource: timeSource, diff --git a/test/memcached/fixed_cache_impl_test.go b/test/memcached/fixed_cache_impl_test.go index f8e87fed7..d1f329d3c 100644 --- a/test/memcached/fixed_cache_impl_test.go +++ b/test/memcached/fixed_cache_impl_test.go @@ -33,7 +33,7 @@ func TestMemcached(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, 0.8, "") request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -119,7 +119,7 @@ func TestMemcachedGetError(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, 0.8, "") timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3) client.EXPECT().GetMulti([]string{"domain_key_value_1234"}).Return( @@ -203,7 +203,7 @@ func TestOverLimitWithLocalCache(t *testing.T) { localCache := freecache.NewCache(100) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) - cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, localCache, statsStore, 0.8, "") + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, localCache, 0.8, "") localCacheStats := utils.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio @@ -295,7 +295,7 @@ func TestNearLimit(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, 0.8, "") request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) limits := []*config.RateLimit{ @@ -462,7 +462,7 @@ func TestMemcacheWithJitter(t *testing.T) { client := mock_memcached.NewMockClient(controller) jitterSource := mock_utils.NewMockJitterRandSource(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, statsStore, 0.8, "") + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -503,7 +503,7 @@ func TestMemcacheAdd(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + cache := memcached.NewFixedRateLimitCacheImpl(client, timeSource, nil, 0, nil, 0.8, "") request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} diff --git a/test/memcached/windowed_cache_impl_test.go b/test/memcached/windowed_cache_impl_test.go index 7a09369fa..aad609780 100644 --- a/test/memcached/windowed_cache_impl_test.go +++ b/test/memcached/windowed_cache_impl_test.go @@ -1,13 +1,16 @@ package memcached_test import ( + "math/rand" "strconv" "testing" "github.com/bradfitz/gomemcache/memcache" + "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/memcached" + "github.com/envoyproxy/ratelimit/src/utils" "github.com/envoyproxy/ratelimit/test/common" mock_memcached "github.com/envoyproxy/ratelimit/test/mocks/memcached/driver" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" @@ -25,7 +28,7 @@ func TestMemcachedWindowed(t *testing.T) { timeSource := mock_utils.NewMockTimeSource(controller) client := mock_memcached.NewMockClient(controller) statsStore := stats.NewStore(stats.NewNullSink(), false) - cache := memcached.NewWindowedRateLimitCacheImpl(client, timeSource, nil, 0, nil, statsStore, 0.8, "") + cache := memcached.NewWindowedRateLimitCacheImpl(client, timeSource, nil, 0, nil, 0.8, "") // test 1 // test initial rate limit process @@ -110,5 +113,393 @@ func TestMemcachedWindowed(t *testing.T) { assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) + // test 3 + // test rate limit with multiple description and different limit configuration + + // periode = 1 hour = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute = 360 second + // request = 1 + // increment = emissionInterval*request = 6 minute = 360 second + + // arriveAt = 1 second + // tat = 1 second + + // newTat should be max(arriveAt,tat)+increment = 361 second + // DurationUntilReset should be newTat-arriveat = 360 second + // expiration should be second(newTat-arriveat)+1 = 361 second + + // periode = 1 day = 86400 second + // limit = 10 request/day + // emissionInterval = 8640 second + // request = 1 + // increment = emissionInterval*request = 8640 second + + // arriveAt = 1 second + // tat = 1 second + + // newTat should be max(arriveAt,tat)+increment = 8641 second + // DurationUntilReset should be newTat-arriveat = 8640 second + // expiration should be second(newTat-arriveat)+1 = 8641 second + + request = common.NewRateLimitRequest( + "domain", + [][][2]string{ + {{"key3", "value3"}}, + {{"key3", "value3"}, {"subkey3", "subvalue3"}}, + }, 1) + limits = []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key3_value3", statsStore), + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_DAY, "key3_value3_subkey3_subvalue3", statsStore)} + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(2) + + client.EXPECT().GetMulti([]string{"domain_key3_value3_0", "domain_key3_value3_subkey3_subvalue3_0"}).Return( + getMultiResult(map[string]int{"domain_key3_value3_0": 1e9, "domain_key3_value3_subkey3_subvalue3_0": 1e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key3_value3_0", + Value: []byte(strconv.FormatInt(int64(1e9+360e9), 10)), + Expiration: int32(361), + }) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key3_value3_subkey3_subvalue3_0", + Value: []byte(strconv.FormatInt(int64(1e9+8640e9), 10)), + Expiration: int32(8641), + }) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Seconds: 360}}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Seconds: 8640}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + assert.Equal(uint64(1), limits[1].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[1].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) + + cache.Flush() +} + +func TestNearLimitWindowed(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + client := mock_memcached.NewMockClient(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + cache := memcached.NewWindowedRateLimitCacheImpl(client, timeSource, nil, 0, nil, 0.8, "") + domain := "domain" + + request := common.NewRateLimitRequest(domain, [][][2]string{{{"key4", "value4"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key4_value4", statsStore)} + + // Test Near Limit Stats. Under Near Limit Ratio + // periode = 1 minute = 60 second + // limit = 10 request/minute + // emissionInterval = 6 second + // request = 1 + // increment = emissionInterval*request = 6 second + + // arriveAt = 01 second + // tat = 01 second + + // newTat should be max(arriveAt,tat)+increment = 7 second + // expire should be (newtat-arriveat)+1 = 7 second + // DurationUntilReset should be newtat-arriveat = 6 second + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + client.EXPECT().GetMulti([]string{"domain_key4_value4_0"}).Return( + getMultiResult(map[string]int{"domain_key4_value4_0": 1e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key4_value4_0", + Value: []byte(strconv.FormatInt(int64(1e9+6e9), 10)), + Expiration: int32(7), + }) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Seconds: 6}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + + // Test Near Limit Stats. At Near Limit Ratio, still OK + // periode = 1 minute = 60 second + // limit = 10 request/minute + // emissionInterval = 6 second + // request = 1 + // increment = emissionInterval*request = 6 second + + // arriveAt = 07 second + // tat = 54 second + + // newTat should be max(arriveAt,tat)+increment = 60 second + // expire should be (newtat-arriveat)+1 = 54 second + // DurationUntilReset should be newtat-arriveat = 53 second + timeSource.EXPECT().UnixNanoNow().Return(int64(7e9)).MaxTimes(1) + + client.EXPECT().GetMulti([]string{"domain_key4_value4_0"}).Return( + getMultiResult(map[string]int{"domain_key4_value4_0": 54e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key4_value4_0", + Value: []byte(strconv.FormatInt(int64(60e9), 10)), + Expiration: int32(54), + }) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: &duration.Duration{Seconds: 53}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases + // when we are near limit, not after we have passed the limit. + // periode = 1 minute = 60 second + // limit = 10 request/minute + // emissionInterval = 6 second + // request = 1 + // increment = emissionInterval*request = 6 second + + // arriveAt = 04 second + // tat = 60 second + + // newTat should be max(arriveAt,tat)+increment = 66 second + // expire should be (tat-arriveat)+1 = 57 second + // DurationUntilReset should be tat-arriveat = 56 second + timeSource.EXPECT().UnixNanoNow().Return(int64(4e9)).MaxTimes(1) + + client.EXPECT().GetMulti([]string{"domain_key4_value4_0"}).Return( + getMultiResult(map[string]int{"domain_key4_value4_0": 60e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key4_value4_0", + Value: []byte(strconv.FormatInt(int64(60e9), 10)), + Expiration: int32(57), + }) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 56}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + cache.Flush() +} + +func TestWindowedOverLimitWithLocalCache(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + client := mock_memcached.NewMockClient(controller) + sink := &common.TestStatSink{} + statsStore := stats.NewStore(sink, true) + localCache := freecache.NewCache(100) + localCacheStats := utils.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + cache := memcached.NewWindowedRateLimitCacheImpl(client, timeSource, nil, 0, localCache, 0.8, "") + domain := "domain" + + request := common.NewRateLimitRequest(domain, [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore)} + // Test Near Limit Stats. Under Near Limit Ratio + // periode = 60 minute = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute + // request = 1 + // increment = emissionInterval*request = 6 minute + + // arriveAt = 1 minute + // tat = 12 minite + + // newTat should be max(arriveAt,tat)+increment = 18 minute + // expire should be (newtat-arriveat)+1 second = 17 minute 1 second + // DurationUntilReset should be newtat-arriveat = 17 minute + timeSource.EXPECT().UnixNanoNow().Return(int64(1 * 60 * 1e9)).MaxTimes(1) + + client.EXPECT().GetMulti([]string{"domain_key_value_0"}).Return( + getMultiResult(map[string]int{"domain_key_value_0": 12 * 60 * 1e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key_value_0", + Value: []byte(strconv.FormatInt(int64(18*60*1e9), 10)), + Expiration: int32((17 * 60) + 1), + }) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 7, DurationUntilReset: &duration.Duration{Seconds: 17 * 60}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) + + // Test Near Limit Stats. At Near Limit Ratio, still OK + // periode = 60 minute = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute + // request = 1 + // increment = emissionInterval*request = 6 minute + + // arriveAt = 12 minute + // tat = 60 minute + + // newTat should be max(arriveAt,tat)+increment = 66 minute + // expire should be (newtat-arriveat)+1 second = 54 minute 1 second + // DurationUntilReset should be newtat-arriveat = 54 minute + timeSource.EXPECT().UnixNanoNow().Return(int64(12 * 60 * 1e9)).MaxTimes(1) + + client.EXPECT().GetMulti([]string{"domain_key_value_0"}).Return( + getMultiResult(map[string]int{"domain_key_value_0": 60 * 60 * 1e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key_value_0", + Value: []byte(strconv.FormatInt(int64(66*60*1e9), 10)), + Expiration: int32((54 * 60) + 1), + }) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 1, DurationUntilReset: &duration.Duration{Seconds: 54 * 60}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(2), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) + + // Test Over limit stats + // periode = 60 minute = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute + // request = 1 + // increment = emissionInterval*request = 6 minute + + // arriveAt = 2 minute + // tat = 72 minute + + // newTat should be max(arriveAt,tat)+increment = 78 minute (not used) + // expire should be (tat-arriveat)+1 second = 70 minute 1 second + // DurationUntilReset should be tat-arriveat = 70 minute + timeSource.EXPECT().UnixNanoNow().Return(int64(2 * 60 * 1e9)).MaxTimes(1) + + client.EXPECT().GetMulti([]string{"domain_key_value_0"}).Return( + getMultiResult(map[string]int{"domain_key_value_0": 72 * 60 * 1e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key_value_0", + Value: []byte(strconv.FormatInt(int64(72*60*1e9), 10)), + Expiration: int32((70 * 60) + 1), + }) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 70 * 60}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(3), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) + + // Test Over limit stats with local cache + // periode = 60 minute = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute + // request = 1 + // increment = emissionInterval*request = 6 minute + + // arriveAt = 3 minute + // tat = 72 minute + + // newTat should be max(arriveAt,tat)+increment = 78 minute (not used) + // expire should be (tat-arriveat)+1 second = 69 minute 1 second + // DurationUntilReset should be secondsToReset-(arriveAt%secondsToReset) = 57 minute + timeSource.EXPECT().UnixNanoNow().Return(int64(3 * 60 * 1e9)).MaxTimes(1) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0, DurationUntilReset: &duration.Duration{Seconds: 57 * 60}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(4), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(1), limits[0].Stats.OverLimitWithLocalCache.Value()) + assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 1, 3, 4, 0, 1) + + cache.Flush() +} + +func TestRedisWindowedWithJitter(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + client := mock_memcached.NewMockClient(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + jitterSource := mock_utils.NewMockJitterRandSource(controller) + cache := memcached.NewWindowedRateLimitCacheImpl(client, timeSource, rand.New(jitterSource), 3600, nil, 0.8, "") + + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + // periode = 1 second + // limit = 10 request/second + // emissionInterval = 1/10 second + // request = 1 + // increment = emissionInterval*request = 1/10 second + + // arriveAt = 1 second + // tat = 1 second + + // newTat should be max(arriveAt,tat)+increment = 1,1 second + // expire should be (tat-arriveat)+1 second = 1 second + // DurationUntilReset should be newTat-arriveat = 0.1 second + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + client.EXPECT().GetMulti([]string{"domain_key_value_0"}).Return( + getMultiResult(map[string]int{"domain_key_value_0": 1e9}), nil, + ) + client.EXPECT().Set(&memcache.Item{ + Key: "domain_key_value_0", + Value: []byte(strconv.FormatInt(int64(1e9+1e8), 10)), + Expiration: int32(100 + 1), + }) + + jitterSource.EXPECT().Int63().Return(int64(100)) + + assert.Equal( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 9, DurationUntilReset: &duration.Duration{Nanos: 1e8}}}, + cache.DoLimit(nil, request, limits)) + assert.Equal(uint64(1), limits[0].Stats.TotalHits.Value()) + assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) + assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + cache.Flush() } diff --git a/test/redis/windowed_cache_impl_test.go b/test/redis/windowed_cache_impl_test.go index 421a34e65..90caf9b0c 100644 --- a/test/redis/windowed_cache_impl_test.go +++ b/test/redis/windowed_cache_impl_test.go @@ -239,8 +239,8 @@ func TestNearLimitWindowed(t *testing.T) { // tat = 60 second // newTat should be max(arriveAt,tat)+increment = 66 second - // expire should be (newtat-arriveat)+1 = 7 second - // DurationUntilReset should be newtat-arriveat = 6 second + // expire should be (tat-arriveat)+1 = 57 second + // DurationUntilReset should be tat-arriveat = 56 second timeSource.EXPECT().UnixNanoNow().Return(int64(4e9)).MaxTimes(1) client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "SETNX", "domain_key4_value4_0", int64(0)).DoAndReturn(pipeAppend) From 10404f4147f47b585f84cc13f7bb2bf6eb0066af Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Wed, 10 Feb 2021 07:07:10 +0100 Subject: [PATCH 28/31] add fixed algorithm unit test Signed-off-by: zufardhiyaulhaq --- test/algorithm/fixed_algorithm_test.go | 149 +++++++++++++++++++++++++ 1 file changed, 149 insertions(+) create mode 100644 test/algorithm/fixed_algorithm_test.go diff --git a/test/algorithm/fixed_algorithm_test.go b/test/algorithm/fixed_algorithm_test.go new file mode 100644 index 000000000..94e16eb63 --- /dev/null +++ b/test/algorithm/fixed_algorithm_test.go @@ -0,0 +1,149 @@ +package algorithm + +import ( + "testing" + + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/algorithm" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/utils" + "github.com/envoyproxy/ratelimit/test/common" + mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" + "github.com/golang/mock/gomock" + stats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" +) + +func TestIsOverLimit(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewFixedWindowAlgorithm(timeSource, nil, 0.8, "") + + var result int64 = 1 + var hitsAddend int64 = 1 + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + + actualIsOverLimit, actualLimitRemaining, actualDurationUntilReset := algorithm.IsOverLimit(limit, result, hitsAddend) + + assert.Equal(false, actualIsOverLimit) + assert.Equal(int64(9), actualLimitRemaining) + assert.Equal(1, actualDurationUntilReset) + + result = 10 + hitsAddend = 1 + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + + actualIsOverLimit, actualLimitRemaining, actualDurationUntilReset = algorithm.IsOverLimit(limit, result, hitsAddend) + + assert.Equal(false, actualIsOverLimit) + assert.Equal(int64(0), actualLimitRemaining) + assert.Equal(1, actualDurationUntilReset) + + result = 11 + hitsAddend = 1 + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + + actualIsOverLimit, actualLimitRemaining, actualDurationUntilReset = algorithm.IsOverLimit(limit, result, hitsAddend) + + assert.Equal(true, actualIsOverLimit) + assert.Equal(int64(0), actualLimitRemaining) + assert.Equal(1, actualDurationUntilReset) +} + +func TestIsOverLimitWithLocalCache(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + key := "key_value" + + timeSource := mock_utils.NewMockTimeSource(controller) + localCache := freecache.NewCache(100) + + algorithm := algorithm.NewFixedWindowAlgorithm(timeSource, localCache, 0.8, "") + assert.Equal(false, algorithm.IsOverLimitWithLocalCache(key)) + + localCache.Set([]byte(key), []byte{}, 1) + assert.Equal(true, algorithm.IsOverLimitWithLocalCache(key)) +} + +func TestGenerateCacheKeys(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewFixedWindowAlgorithm(timeSource, nil, 0.8, "") + + var hitsAddend int64 = 1 + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limit := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + timeSource.EXPECT().UnixNow().Return(int64(1)).MaxTimes(1) + + expectedResult := []utils.CacheKey([]utils.CacheKey{{Key: "domain_key_value_1", PerSecond: true}}) + actualResult := algorithm.GenerateCacheKeys(request, limit, hitsAddend) + assert.Equal(expectedResult, actualResult) +} + +func TestPopulateStats(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewFixedWindowAlgorithm(timeSource, nil, 0.8, "") + + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + + timeSource.EXPECT().UnixNow().Return(int64(1)).MaxTimes(1) + + algorithm.PopulateStats(limit, 1, 0, 0) + assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) + assert.Equal(uint64(0), limit.Stats.OverLimit.Value()) + assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + + algorithm.PopulateStats(limit, 0, 1, 0) + assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) + assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) + assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + + algorithm.PopulateStats(limit, 0, 0, 1) + assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) + assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) + assert.Equal(uint64(1), limit.Stats.OverLimitWithLocalCache.Value()) +} + +func TestGetResponseDescriptorStatus(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewFixedWindowAlgorithm(timeSource, nil, 0.8, "") + + key := "key_value" + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + var results int64 = 1 + var hitsAddend int64 = 1 + isOverLimitWithLocalCache := false + + timeSource.EXPECT().UnixNow().Return(int64(1)).MaxTimes(2) + + expectedResult := &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limit.Limit, + LimitRemaining: 9, + DurationUntilReset: utils.CalculateFixedReset(limit.Limit, timeSource)} + + actualResult := algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + assert.Equal(expectedResult, actualResult) +} From 3ec9ccab1b73e20ceaa1999a0c83ae6f56a4fcae Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Wed, 10 Feb 2021 09:45:20 +0100 Subject: [PATCH 29/31] add rolling window algorithm unit test Signed-off-by: zufardhiyaulhaq --- ...algorithm_test.go => fixed_window_test.go} | 10 +- test/algorithm/rolling_window_test.go | 277 ++++++++++++++++++ 2 files changed, 282 insertions(+), 5 deletions(-) rename test/algorithm/{fixed_algorithm_test.go => fixed_window_test.go} (95%) create mode 100644 test/algorithm/rolling_window_test.go diff --git a/test/algorithm/fixed_algorithm_test.go b/test/algorithm/fixed_window_test.go similarity index 95% rename from test/algorithm/fixed_algorithm_test.go rename to test/algorithm/fixed_window_test.go index 94e16eb63..565a1b170 100644 --- a/test/algorithm/fixed_algorithm_test.go +++ b/test/algorithm/fixed_window_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestIsOverLimit(t *testing.T) { +func TestFixedIsOverLimit(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() @@ -55,7 +55,7 @@ func TestIsOverLimit(t *testing.T) { assert.Equal(1, actualDurationUntilReset) } -func TestIsOverLimitWithLocalCache(t *testing.T) { +func TestFixedIsOverLimitWithLocalCache(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() @@ -72,7 +72,7 @@ func TestIsOverLimitWithLocalCache(t *testing.T) { assert.Equal(true, algorithm.IsOverLimitWithLocalCache(key)) } -func TestGenerateCacheKeys(t *testing.T) { +func TestFixedGenerateCacheKeys(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() @@ -92,7 +92,7 @@ func TestGenerateCacheKeys(t *testing.T) { assert.Equal(expectedResult, actualResult) } -func TestPopulateStats(t *testing.T) { +func TestFixedPopulateStats(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() @@ -121,7 +121,7 @@ func TestPopulateStats(t *testing.T) { assert.Equal(uint64(1), limit.Stats.OverLimitWithLocalCache.Value()) } -func TestGetResponseDescriptorStatus(t *testing.T) { +func TestFixedGetResponseDescriptorStatus(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() diff --git a/test/algorithm/rolling_window_test.go b/test/algorithm/rolling_window_test.go new file mode 100644 index 000000000..a1b816b3b --- /dev/null +++ b/test/algorithm/rolling_window_test.go @@ -0,0 +1,277 @@ +package algorithm + +import ( + "testing" + + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/algorithm" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/utils" + "github.com/envoyproxy/ratelimit/test/common" + mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes/duration" + stats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" +) + +func TestRollingIsOverLimit(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + + var result int64 = 1e9 + var hitsAddend int64 = 1 + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + actualIsOverLimit, actualLimitRemaining, actualDurationUntilReset := algorithm.IsOverLimit(limit, result, hitsAddend) + + assert.Equal(false, actualIsOverLimit) + assert.Equal(int64(9), actualLimitRemaining) + assert.Equal(0, actualDurationUntilReset) + + result = 0 + hitsAddend = 1 + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1e9) + actualIsOverLimit, actualLimitRemaining, actualDurationUntilReset = algorithm.IsOverLimit(limit, result, hitsAddend) + + assert.Equal(false, actualIsOverLimit) + assert.Equal(int64(9), actualLimitRemaining) + assert.Equal(0, actualDurationUntilReset) + + result = 3600e9 + hitsAddend = 1 + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore) + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(4e9) + actualIsOverLimit, actualLimitRemaining, actualDurationUntilReset = algorithm.IsOverLimit(limit, result, hitsAddend) + + assert.Equal(true, actualIsOverLimit) + assert.Equal(int64(0), actualLimitRemaining) + assert.Equal(359, actualDurationUntilReset) +} + +func TestRollingIsOverLimitWithLocalCache(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + key := "key_value" + + timeSource := mock_utils.NewMockTimeSource(controller) + localCache := freecache.NewCache(100) + + algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, localCache, 0.8, "") + assert.Equal(false, algorithm.IsOverLimitWithLocalCache(key)) + + localCache.Set([]byte(key), []byte{}, 1) + assert.Equal(true, algorithm.IsOverLimitWithLocalCache(key)) +} + +func TestRollingGenerateCacheKeys(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + + var hitsAddend int64 = 1 + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limit := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + expectedResult := []utils.CacheKey([]utils.CacheKey{{Key: "domain_key_value_0", PerSecond: true}}) + actualResult := algorithm.GenerateCacheKeys(request, limit, hitsAddend) + assert.Equal(expectedResult, actualResult) +} + +func TestRollingPopulateStats(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + algorithm.PopulateStats(limit, 1, 0, 0) + assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) + assert.Equal(uint64(0), limit.Stats.OverLimit.Value()) + assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + + algorithm.PopulateStats(limit, 0, 1, 0) + assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) + assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) + assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + + algorithm.PopulateStats(limit, 0, 0, 1) + assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) + assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) + assert.Equal(uint64(1), limit.Stats.OverLimitWithLocalCache.Value()) +} + +func TestRollingGetResponseDescriptorStatus(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + + key := "key_value" + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + var results int64 = 0 + var hitsAddend int64 = 1 + isOverLimitWithLocalCache := false + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + expectedResult := &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limit.Limit, + LimitRemaining: 9, + DurationUntilReset: &duration.Duration{Nanos: 1e8}} + + actualResult := algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + assert.Equal(expectedResult, actualResult) +} + +func TestRollingGetExpirationSeconds(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + + key := "key_value" + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + var results int64 = 0 + var hitsAddend int64 = 1 + isOverLimitWithLocalCache := false + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + + expectedResult := int64(1) + actualResult := algorithm.GetExpirationSeconds() + assert.Equal(expectedResult, actualResult) + + key = "key_value" + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + results = 2e9 + hitsAddend = 1 + isOverLimitWithLocalCache = false + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9 + 4e6)).MaxTimes(1) + algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + + expectedResult = int64(1) + actualResult = algorithm.GetExpirationSeconds() + assert.Equal(expectedResult, actualResult) + + key = "key_value" + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) + results = 0 + hitsAddend = 1 + isOverLimitWithLocalCache = false + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + + expectedResult = int64(7) + actualResult = algorithm.GetExpirationSeconds() + assert.Equal(expectedResult, actualResult) + + key = "key_value" + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) + results = 60e9 + hitsAddend = 1 + isOverLimitWithLocalCache = false + + timeSource.EXPECT().UnixNanoNow().Return(int64(4e9)).MaxTimes(1) + algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + + expectedResult = int64(57) + actualResult = algorithm.GetExpirationSeconds() + assert.Equal(expectedResult, actualResult) +} + +func TestRollingGetResultsAfterIncrease(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + + key := "key_value" + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + var results int64 = 0 + var hitsAddend int64 = 1 + isOverLimitWithLocalCache := false + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + + expectedResult := int64(1e9 + 1e8) + actualResult := algorithm.GetResultsAfterIncrease() + assert.Equal(expectedResult, actualResult) + + key = "key_value" + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + results = 2e9 + hitsAddend = 1 + isOverLimitWithLocalCache = false + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9 + 4e6)).MaxTimes(1) + algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + + expectedResult = int64(2e9) + actualResult = algorithm.GetResultsAfterIncrease() + assert.Equal(expectedResult, actualResult) + + key = "key_value" + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) + results = 0 + hitsAddend = 1 + isOverLimitWithLocalCache = false + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + + expectedResult = int64(7e9) + actualResult = algorithm.GetResultsAfterIncrease() + assert.Equal(expectedResult, actualResult) + + key = "key_value" + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) + results = 60e9 + hitsAddend = 1 + isOverLimitWithLocalCache = false + + timeSource.EXPECT().UnixNanoNow().Return(int64(4e9)).MaxTimes(1) + algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + + expectedResult = int64(60e9) + actualResult = algorithm.GetResultsAfterIncrease() + assert.Equal(expectedResult, actualResult) +} From 73df311c946c9da9043d5fac88f656d272adddf3 Mon Sep 17 00:00:00 2001 From: Kateryna Nezdolii Date: Mon, 15 Feb 2021 17:28:05 +0100 Subject: [PATCH 30/31] Refactor fixed and rolling window Signed-off-by: zufardhiyaulhaq --- src/algorithm/base_window.go | 104 +++++++++++++++++++++++++++ src/algorithm/fixed_window.go | 86 ++++------------------ src/algorithm/ratelimit_algorithm.go | 11 +-- src/algorithm/rolling_window.go | 94 +++++------------------- src/memcached/fixed_cache_impl.go | 12 ++-- src/memcached/windowed_cache_impl.go | 14 ++-- src/redis/fixed_cache_impl.go | 12 ++-- src/redis/windowed_cache_impl.go | 14 ++-- 8 files changed, 164 insertions(+), 183 deletions(-) create mode 100644 src/algorithm/base_window.go diff --git a/src/algorithm/base_window.go b/src/algorithm/base_window.go new file mode 100644 index 000000000..8a156a325 --- /dev/null +++ b/src/algorithm/base_window.go @@ -0,0 +1,104 @@ +package algorithm + +import ( + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/utils" + logger "github.com/sirupsen/logrus" +) + +type WindowImpl struct { + algorithm RatelimitAlgorithm + cacheKeyGenerator utils.CacheKeyGenerator + localCache *freecache.Cache + timeSource utils.TimeSource +} + +func (w *WindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, + isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus { + if key == "" { + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: nil, + LimitRemaining: 0, + } + } + + if isOverLimitWithLocalCache { + PopulateStats(limit, 0, uint64(hitsAddend), uint64(hitsAddend)) + + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limit.Limit, + LimitRemaining: 0, + DurationUntilReset: w.algorithm.CalculateSimpleReset(limit, w.timeSource), + } + } + + isOverLimit, limitRemaining, durationUntilReset := w.algorithm.IsOverLimit(limit, int64(results), hitsAddend) + + if !isOverLimit { + duration := w.algorithm.CalculateReset(true, limit, w.timeSource) + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limit.Limit, + LimitRemaining: uint32(limitRemaining), + DurationUntilReset: duration, + } + } else { + if w.localCache != nil { + durationUntilReset = utils.MaxInt(1, durationUntilReset) + + err := w.localCache.Set([]byte(key), []byte{}, durationUntilReset) + if err != nil { + logger.Errorf("Failing to set local cache key: %s", key) + } + } + duration := w.algorithm.CalculateReset(false, limit, w.timeSource) + return &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limit.Limit, + LimitRemaining: 0, + DurationUntilReset: duration, + } + } +} + +func (w *WindowImpl) IsOverLimitWithLocalCache(key string) bool { + if w.localCache != nil { + _, err := w.localCache.Get([]byte(key)) + if err == nil { + return true + } + } + return false +} + +func (w *WindowImpl) GenerateCacheKeys(request *pb.RateLimitRequest, + limits []*config.RateLimit, hitsAddend int64, timestamp int64) []utils.CacheKey { + return w.cacheKeyGenerator.GenerateCacheKeys(request, limits, uint32(hitsAddend), timestamp) +} + +func PopulateStats(limit *config.RateLimit, nearLimit uint64, overLimit uint64, overLimitWithLocalCache uint64) { + limit.Stats.NearLimit.Add(nearLimit) + limit.Stats.OverLimit.Add(overLimit) + limit.Stats.OverLimitWithLocalCache.Add(overLimitWithLocalCache) +} + +func (w *WindowImpl) GetExpirationSeconds() int64 { + return w.algorithm.GetExpirationSeconds() +} + +func (w *WindowImpl) GetResultsAfterIncrease() int64 { + return w.algorithm.GetResultsAfterIncrease() +} + +func NewWindow(algorithm RatelimitAlgorithm, cacheKeyPrefix string, localCache *freecache.Cache, timeSource utils.TimeSource) *WindowImpl { + return &WindowImpl{ + algorithm: algorithm, + cacheKeyGenerator: utils.NewCacheKeyGenerator(cacheKeyPrefix), + localCache: localCache, + timeSource: timeSource, + } +} diff --git a/src/algorithm/fixed_window.go b/src/algorithm/fixed_window.go index ec111ad21..2ef390224 100644 --- a/src/algorithm/fixed_window.go +++ b/src/algorithm/fixed_window.go @@ -1,13 +1,12 @@ package algorithm import ( + "github.com/golang/protobuf/ptypes/duration" "math" "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" - logger "github.com/sirupsen/logrus" ) var _ RatelimitAlgorithm = (*FixedWindowImpl)(nil) @@ -19,52 +18,6 @@ type FixedWindowImpl struct { nearLimitRatio float32 } -func (fw *FixedWindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus { - if key == "" { - return &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: nil, - LimitRemaining: 0, - } - } - if isOverLimitWithLocalCache { - fw.PopulateStats(limit, 0, uint64(hitsAddend), uint64(hitsAddend)) - return &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: limit.Limit, - LimitRemaining: 0, - DurationUntilReset: utils.CalculateFixedReset(limit.Limit, fw.timeSource), - } - } - - isOverLimit, limitRemaining, durationUntilReset := fw.IsOverLimit(limit, int64(results), hitsAddend) - - if !isOverLimit { - return &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: limit.Limit, - LimitRemaining: uint32(limitRemaining), - DurationUntilReset: utils.CalculateFixedReset(limit.Limit, fw.timeSource), - } - } else { - if fw.localCache != nil { - durationUntilReset = utils.MaxInt(1, durationUntilReset) - - err := fw.localCache.Set([]byte(key), []byte{}, durationUntilReset) - if err != nil { - logger.Errorf("Failing to set local cache key: %s", key) - } - } - - return &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: limit.Limit, - LimitRemaining: uint32(limitRemaining), - DurationUntilReset: utils.CalculateFixedReset(limit.Limit, fw.timeSource), - } - } -} - func (fw *FixedWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, int) { limitAfterIncrease := results limitBeforeIncrease := limitAfterIncrease - int64(hitsAddend) @@ -73,18 +26,18 @@ func (fw *FixedWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, h if limitAfterIncrease > overLimitThreshold { if limitBeforeIncrease >= overLimitThreshold { - fw.PopulateStats(limit, 0, uint64(hitsAddend), 0) + PopulateStats(limit, 0, uint64(hitsAddend), 0) } else { - fw.PopulateStats(limit, uint64(overLimitThreshold-utils.MaxInt64(nearLimitThreshold, limitBeforeIncrease)), uint64(limitAfterIncrease-overLimitThreshold), 0) + PopulateStats(limit, uint64(overLimitThreshold-utils.MaxInt64(nearLimitThreshold, limitBeforeIncrease)), uint64(limitAfterIncrease-overLimitThreshold), 0) } return true, 0, int(utils.UnitToDivider(limit.Limit.Unit)) } else { if limitAfterIncrease > nearLimitThreshold { if limitBeforeIncrease >= nearLimitThreshold { - fw.PopulateStats(limit, uint64(hitsAddend), 0, 0) + PopulateStats(limit, uint64(hitsAddend), 0, 0) } else { - fw.PopulateStats(limit, uint64(limitAfterIncrease-nearLimitThreshold), 0, 0) + PopulateStats(limit, uint64(limitAfterIncrease-nearLimitThreshold), 0, 0) } } @@ -92,27 +45,6 @@ func (fw *FixedWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, h } } -func (fw *FixedWindowImpl) IsOverLimitWithLocalCache(key string) bool { - if fw.localCache != nil { - _, err := fw.localCache.Get([]byte(key)) - if err == nil { - return true - } - } - return false -} - -func (fw *FixedWindowImpl) GenerateCacheKeys(request *pb.RateLimitRequest, - limits []*config.RateLimit, hitsAddend int64) []utils.CacheKey { - return fw.cacheKeyGenerator.GenerateCacheKeys(request, limits, uint32(hitsAddend), fw.timeSource.UnixNow()) -} - -func (fw *FixedWindowImpl) PopulateStats(limit *config.RateLimit, nearLimit uint64, overLimit uint64, overLimitWithLocalCache uint64) { - limit.Stats.NearLimit.Add(nearLimit) - limit.Stats.OverLimit.Add(overLimit) - limit.Stats.OverLimitWithLocalCache.Add(overLimitWithLocalCache) -} - func (fw *FixedWindowImpl) GetExpirationSeconds() int64 { return 0 } @@ -129,3 +61,11 @@ func NewFixedWindowAlgorithm(timeSource utils.TimeSource, localCache *freecache. nearLimitRatio: nearLimitRatio, } } + +func (c *FixedWindowImpl) CalculateSimpleReset(limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { + return utils.CalculateFixedReset(limit.Limit, timeSource) +} + +func (c *FixedWindowImpl) CalculateReset(isOverLimit bool, limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { + return c.CalculateSimpleReset(limit, timeSource) +} diff --git a/src/algorithm/ratelimit_algorithm.go b/src/algorithm/ratelimit_algorithm.go index e1d5fa9e9..be2395981 100644 --- a/src/algorithm/ratelimit_algorithm.go +++ b/src/algorithm/ratelimit_algorithm.go @@ -1,20 +1,15 @@ package algorithm import ( - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" + "github.com/golang/protobuf/ptypes/duration" ) type RatelimitAlgorithm interface { + CalculateSimpleReset(limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration + CalculateReset(isOverLimit bool, limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, int) - IsOverLimitWithLocalCache(key string) bool - - GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus GetExpirationSeconds() int64 GetResultsAfterIncrease() int64 - - GenerateCacheKeys(request *pb.RateLimitRequest, - limits []*config.RateLimit, hitsAddend int64) []utils.CacheKey - PopulateStats(limit *config.RateLimit, nearLimit uint64, overLimit uint64, overLimitWithLocalCache uint64) } diff --git a/src/algorithm/rolling_window.go b/src/algorithm/rolling_window.go index 3ec839aef..98feeb0e4 100644 --- a/src/algorithm/rolling_window.go +++ b/src/algorithm/rolling_window.go @@ -4,15 +4,11 @@ import ( "math" "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" "github.com/golang/protobuf/ptypes/duration" - logger "github.com/sirupsen/logrus" ) -const DummyCacheKeyTime = 0 - var _ RatelimitAlgorithm = (*RollingWindowImpl)(nil) type RollingWindowImpl struct { @@ -26,57 +22,6 @@ type RollingWindowImpl struct { diff int64 } -func (rw *RollingWindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateLimit, results int64, isOverLimitWithLocalCache bool, hitsAddend int64) *pb.RateLimitResponse_DescriptorStatus { - if key == "" { - return &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: nil, - LimitRemaining: 0, - } - } - - if isOverLimitWithLocalCache { - rw.PopulateStats(limit, 0, uint64(hitsAddend), uint64(hitsAddend)) - - secondsToReset := utils.UnitToDivider(limit.Limit.Unit) - secondsToReset -= utils.NanosecondsToSeconds(rw.timeSource.UnixNanoNow()) % secondsToReset - - return &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: limit.Limit, - LimitRemaining: 0, - DurationUntilReset: &duration.Duration{Seconds: secondsToReset}, - } - } - - isOverLimit, limitRemaining, durationUntilReset := rw.IsOverLimit(limit, int64(results), hitsAddend) - - if !isOverLimit { - return &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: limit.Limit, - LimitRemaining: uint32(limitRemaining), - DurationUntilReset: utils.NanosecondsToDuration(rw.newTat - rw.arrivedAt), - } - } else { - if rw.localCache != nil { - durationUntilReset = utils.MaxInt(1, durationUntilReset) - - err := rw.localCache.Set([]byte(key), []byte{}, durationUntilReset) - if err != nil { - logger.Errorf("Failing to set local cache key: %s", key) - } - } - - return &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OVER_LIMIT, - CurrentLimit: limit.Limit, - LimitRemaining: 0, - DurationUntilReset: utils.NanosecondsToDuration(int64(math.Ceil(float64(rw.tat - rw.arrivedAt)))), - } - } -} - func (rw *RollingWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, hitsAddend int64) (bool, int64, int) { now := rw.timeSource.UnixNanoNow() @@ -105,28 +50,18 @@ func (rw *RollingWindowImpl) IsOverLimit(limit *config.RateLimit, results int64, hitNearLimit := quantity - (utils.MaxInt64(previousLimitRemaining, nearLimitWindow) - nearLimitWindow) if rw.diff < 0 { - rw.PopulateStats(limit, uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow)), uint64(quantity-previousLimitRemaining), 0) + PopulateStats(limit, uint64(utils.MinInt64(previousLimitRemaining, nearLimitWindow)), uint64(quantity-previousLimitRemaining), 0) return true, 0, int(utils.NanosecondsToSeconds(-rw.diff)) } else { if hitNearLimit > 0 { - rw.PopulateStats(limit, uint64(hitNearLimit), 0, 0) + PopulateStats(limit, uint64(hitNearLimit), 0, 0) } return false, limitRemaining, 0 } } -func (rw *RollingWindowImpl) IsOverLimitWithLocalCache(key string) bool { - if rw.localCache != nil { - _, err := rw.localCache.Get([]byte(key)) - if err == nil { - return true - } - } - return false -} - func (rw *RollingWindowImpl) GetExpirationSeconds() int64 { if rw.diff < 0 { return utils.NanosecondsToSeconds(rw.tat-rw.arrivedAt) + 1 @@ -141,17 +76,6 @@ func (rw *RollingWindowImpl) GetResultsAfterIncrease() int64 { return rw.newTat } -func (rw *RollingWindowImpl) GenerateCacheKeys(request *pb.RateLimitRequest, - limits []*config.RateLimit, hitsAddend int64) []utils.CacheKey { - return rw.cacheKeyGenerator.GenerateCacheKeys(request, limits, uint32(hitsAddend), DummyCacheKeyTime) -} - -func (rw *RollingWindowImpl) PopulateStats(limit *config.RateLimit, nearLimit uint64, overLimit uint64, overLimitWithLocalCache uint64) { - limit.Stats.NearLimit.Add(nearLimit) - limit.Stats.OverLimit.Add(overLimit) - limit.Stats.OverLimitWithLocalCache.Add(overLimitWithLocalCache) -} - func NewRollingWindowAlgorithm(timeSource utils.TimeSource, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *RollingWindowImpl { return &RollingWindowImpl{ timeSource: timeSource, @@ -160,3 +84,17 @@ func NewRollingWindowAlgorithm(timeSource utils.TimeSource, localCache *freecach nearLimitRatio: nearLimitRatio, } } + +func (rw *RollingWindowImpl) CalculateSimpleReset(limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { + secondsToReset := utils.UnitToDivider(limit.Limit.Unit) + secondsToReset -= utils.NanosecondsToSeconds(timeSource.UnixNanoNow()) % secondsToReset + return &duration.Duration{Seconds: secondsToReset} +} + +func (rw *RollingWindowImpl) CalculateReset(isOverLimit bool, limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { + if isOverLimit { + return utils.NanosecondsToDuration(rw.newTat - rw.arrivedAt) + } else { + return utils.NanosecondsToDuration(int64(math.Ceil(float64(rw.tat - rw.arrivedAt)))) + } +} diff --git a/src/memcached/fixed_cache_impl.go b/src/memcached/fixed_cache_impl.go index 94628fb89..ff2cb50cb 100644 --- a/src/memcached/fixed_cache_impl.go +++ b/src/memcached/fixed_cache_impl.go @@ -25,7 +25,7 @@ type fixedRateLimitCacheImpl struct { localCache *freecache.Cache waitGroup sync.WaitGroup nearLimitRatio float32 - algorithm algorithm.RatelimitAlgorithm + algorithm *algorithm.WindowImpl } var _ limiter.RateLimitCache = (*fixedRateLimitCacheImpl)(nil) @@ -41,7 +41,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( hitsAddend := utils.MaxInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) + cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend, this.timeSource.UnixNow()) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) keysToGet := make([]string, 0, len(request.Descriptors)) @@ -156,11 +156,11 @@ func NewFixedRateLimitCacheImpl(client driver.Client, timeSource utils.TimeSourc expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - algorithm: algorithm.NewFixedWindowAlgorithm( - timeSource, - localCache, - nearLimitRatio, + algorithm: algorithm.NewWindow( + algorithm.NewFixedWindowAlgorithm(timeSource, localCache, nearLimitRatio, cacheKeyPrefix), cacheKeyPrefix, + localCache, + timeSource, ), } } diff --git a/src/memcached/windowed_cache_impl.go b/src/memcached/windowed_cache_impl.go index 002de0c5f..cbc20088c 100644 --- a/src/memcached/windowed_cache_impl.go +++ b/src/memcached/windowed_cache_impl.go @@ -25,11 +25,13 @@ type windowedRateLimitCacheImpl struct { localCache *freecache.Cache waitGroup sync.WaitGroup nearLimitRatio float32 - algorithm algorithm.RatelimitAlgorithm + algorithm *algorithm.WindowImpl } var _ limiter.RateLimitCache = (*windowedRateLimitCacheImpl)(nil) +const DummyCacheKeyTime = 0 + func (this *windowedRateLimitCacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, @@ -41,7 +43,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( hitsAddend := utils.MaxInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) + cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend, DummyCacheKeyTime) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) keysToGet := make([]string, 0, len(request.Descriptors)) @@ -148,11 +150,11 @@ func NewWindowedRateLimitCacheImpl(client driver.Client, timeSource utils.TimeSo expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - algorithm: algorithm.NewRollingWindowAlgorithm( - timeSource, - localCache, - nearLimitRatio, + algorithm: algorithm.NewWindow( + algorithm.NewRollingWindowAlgorithm(timeSource, localCache, nearLimitRatio, cacheKeyPrefix), cacheKeyPrefix, + localCache, + timeSource, ), } } diff --git a/src/redis/fixed_cache_impl.go b/src/redis/fixed_cache_impl.go index d5de8fadd..d09e8940f 100644 --- a/src/redis/fixed_cache_impl.go +++ b/src/redis/fixed_cache_impl.go @@ -26,7 +26,7 @@ type fixedRateLimitCacheImpl struct { expirationJitterMaxSeconds int64 localCache *freecache.Cache nearLimitRatio float32 - algorithm algorithm.RatelimitAlgorithm + algorithm *algorithm.WindowImpl } func (this *fixedRateLimitCacheImpl) DoLimit( @@ -40,7 +40,7 @@ func (this *fixedRateLimitCacheImpl) DoLimit( hitsAddend := utils.MaxInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) + cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend, this.timeSource.UnixNow()) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) results := make([]int64, len(request.Descriptors)) @@ -112,11 +112,11 @@ func NewFixedRateLimitCacheImpl(client driver.Client, perSecondClient driver.Cli expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - algorithm: algorithm.NewFixedWindowAlgorithm( - timeSource, - localCache, - nearLimitRatio, + algorithm: algorithm.NewWindow( + algorithm.NewFixedWindowAlgorithm(timeSource, localCache, nearLimitRatio, cacheKeyPrefix), cacheKeyPrefix, + localCache, + timeSource, ), } } diff --git a/src/redis/windowed_cache_impl.go b/src/redis/windowed_cache_impl.go index 14d4fe87c..1f9556869 100644 --- a/src/redis/windowed_cache_impl.go +++ b/src/redis/windowed_cache_impl.go @@ -35,9 +35,11 @@ type windowedRateLimitCacheImpl struct { expirationJitterMaxSeconds int64 localCache *freecache.Cache nearLimitRatio float32 - algorithm algorithm.RatelimitAlgorithm + algorithm *algorithm.WindowImpl } +const DummyCacheKeyTime = 0 + func (this *windowedRateLimitCacheImpl) DoLimit( ctx context.Context, request *pb.RateLimitRequest, @@ -49,7 +51,7 @@ func (this *windowedRateLimitCacheImpl) DoLimit( hitsAddend := utils.MaxInt64(1, int64(request.HitsAddend)) // First build a list of all cache keys that we are actually going to hit. - cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend) + cacheKeys := this.algorithm.GenerateCacheKeys(request, limits, hitsAddend, DummyCacheKeyTime) isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) tats := make([]int64, len(request.Descriptors)) @@ -156,11 +158,11 @@ func NewWindowedRateLimitCacheImpl(client driver.Client, perSecondClient driver. expirationJitterMaxSeconds: expirationJitterMaxSeconds, localCache: localCache, nearLimitRatio: nearLimitRatio, - algorithm: algorithm.NewRollingWindowAlgorithm( - timeSource, - localCache, - nearLimitRatio, + algorithm: algorithm.NewWindow( + algorithm.NewRollingWindowAlgorithm(timeSource, localCache, nearLimitRatio, cacheKeyPrefix), cacheKeyPrefix, + localCache, + timeSource, ), } } From a400046e3fd9408a9adba5ef0e10127331145118 Mon Sep 17 00:00:00 2001 From: zufardhiyaulhaq Date: Sat, 20 Feb 2021 07:54:05 +0100 Subject: [PATCH 31/31] refactor & add base window testing Signed-off-by: zufardhiyaulhaq --- README.md | 2 +- src/algorithm/base_window.go | 16 +-- src/algorithm/fixed_window.go | 19 ++-- src/algorithm/rolling_window.go | 20 ++-- test/algorithm/base_window_test.go | 157 ++++++++++++++++++++++++++ test/algorithm/fixed_window_test.go | 94 +++++---------- test/algorithm/rolling_window_test.go | 157 ++++++++++++-------------- 7 files changed, 284 insertions(+), 181 deletions(-) create mode 100644 test/algorithm/base_window_test.go diff --git a/README.md b/README.md index 3b3e4852d..9a6a19f31 100644 --- a/README.md +++ b/README.md @@ -190,7 +190,7 @@ For a limit of 60 requests per hour, there can only 60 requests in a single time Fixed window algorithm does not care when did the request arrive, all 60 can arrive at 01:01 or 01:50 and the limit will still reset at 02:00. 2. Rolling window -For a limit of 60 requests per hour. Initially it is able to take a burst of 60 requests at once, then the limit is restored by 1 each minute. Requests are allowed as long as there's still some available limit. +For a limit of 60 requests per hour. Initially rate limiter can take a burst of 60 requests at once, then the limit is restored by 1 each minute. Requests are allowed as long as there's still some available limit. Configure rate limit algorithm with `RATE_LIMIT_ALGORITHM` environment variable. Use `FIXED_WINDOW` and `ROLLING_WINDOW` respectively. diff --git a/src/algorithm/base_window.go b/src/algorithm/base_window.go index 8a156a325..bd9de5545 100644 --- a/src/algorithm/base_window.go +++ b/src/algorithm/base_window.go @@ -39,7 +39,7 @@ func (w *WindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateL isOverLimit, limitRemaining, durationUntilReset := w.algorithm.IsOverLimit(limit, int64(results), hitsAddend) if !isOverLimit { - duration := w.algorithm.CalculateReset(true, limit, w.timeSource) + duration := w.algorithm.CalculateReset(isOverLimit, limit, w.timeSource) return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, CurrentLimit: limit.Limit, @@ -55,7 +55,7 @@ func (w *WindowImpl) GetResponseDescriptorStatus(key string, limit *config.RateL logger.Errorf("Failing to set local cache key: %s", key) } } - duration := w.algorithm.CalculateReset(false, limit, w.timeSource) + duration := w.algorithm.CalculateReset(isOverLimit, limit, w.timeSource) return &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limit.Limit, @@ -80,12 +80,6 @@ func (w *WindowImpl) GenerateCacheKeys(request *pb.RateLimitRequest, return w.cacheKeyGenerator.GenerateCacheKeys(request, limits, uint32(hitsAddend), timestamp) } -func PopulateStats(limit *config.RateLimit, nearLimit uint64, overLimit uint64, overLimitWithLocalCache uint64) { - limit.Stats.NearLimit.Add(nearLimit) - limit.Stats.OverLimit.Add(overLimit) - limit.Stats.OverLimitWithLocalCache.Add(overLimitWithLocalCache) -} - func (w *WindowImpl) GetExpirationSeconds() int64 { return w.algorithm.GetExpirationSeconds() } @@ -94,6 +88,12 @@ func (w *WindowImpl) GetResultsAfterIncrease() int64 { return w.algorithm.GetResultsAfterIncrease() } +func PopulateStats(limit *config.RateLimit, nearLimit uint64, overLimit uint64, overLimitWithLocalCache uint64) { + limit.Stats.NearLimit.Add(nearLimit) + limit.Stats.OverLimit.Add(overLimit) + limit.Stats.OverLimitWithLocalCache.Add(overLimitWithLocalCache) +} + func NewWindow(algorithm RatelimitAlgorithm, cacheKeyPrefix string, localCache *freecache.Cache, timeSource utils.TimeSource) *WindowImpl { return &WindowImpl{ algorithm: algorithm, diff --git a/src/algorithm/fixed_window.go b/src/algorithm/fixed_window.go index 2ef390224..8b361f47e 100644 --- a/src/algorithm/fixed_window.go +++ b/src/algorithm/fixed_window.go @@ -1,9 +1,10 @@ package algorithm import ( - "github.com/golang/protobuf/ptypes/duration" "math" + "github.com/golang/protobuf/ptypes/duration" + "github.com/coocood/freecache" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/utils" @@ -53,6 +54,14 @@ func (fw *FixedWindowImpl) GetResultsAfterIncrease() int64 { return 0 } +func (fw *FixedWindowImpl) CalculateSimpleReset(limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { + return utils.CalculateFixedReset(limit.Limit, timeSource) +} + +func (fw *FixedWindowImpl) CalculateReset(isOverLimit bool, limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { + return fw.CalculateSimpleReset(limit, timeSource) +} + func NewFixedWindowAlgorithm(timeSource utils.TimeSource, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *FixedWindowImpl { return &FixedWindowImpl{ timeSource: timeSource, @@ -61,11 +70,3 @@ func NewFixedWindowAlgorithm(timeSource utils.TimeSource, localCache *freecache. nearLimitRatio: nearLimitRatio, } } - -func (c *FixedWindowImpl) CalculateSimpleReset(limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { - return utils.CalculateFixedReset(limit.Limit, timeSource) -} - -func (c *FixedWindowImpl) CalculateReset(isOverLimit bool, limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { - return c.CalculateSimpleReset(limit, timeSource) -} diff --git a/src/algorithm/rolling_window.go b/src/algorithm/rolling_window.go index 98feeb0e4..2c0c43ac4 100644 --- a/src/algorithm/rolling_window.go +++ b/src/algorithm/rolling_window.go @@ -76,15 +76,6 @@ func (rw *RollingWindowImpl) GetResultsAfterIncrease() int64 { return rw.newTat } -func NewRollingWindowAlgorithm(timeSource utils.TimeSource, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *RollingWindowImpl { - return &RollingWindowImpl{ - timeSource: timeSource, - cacheKeyGenerator: utils.NewCacheKeyGenerator(cacheKeyPrefix), - localCache: localCache, - nearLimitRatio: nearLimitRatio, - } -} - func (rw *RollingWindowImpl) CalculateSimpleReset(limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { secondsToReset := utils.UnitToDivider(limit.Limit.Unit) secondsToReset -= utils.NanosecondsToSeconds(timeSource.UnixNanoNow()) % secondsToReset @@ -92,9 +83,18 @@ func (rw *RollingWindowImpl) CalculateSimpleReset(limit *config.RateLimit, timeS } func (rw *RollingWindowImpl) CalculateReset(isOverLimit bool, limit *config.RateLimit, timeSource utils.TimeSource) *duration.Duration { - if isOverLimit { + if !isOverLimit { return utils.NanosecondsToDuration(rw.newTat - rw.arrivedAt) } else { return utils.NanosecondsToDuration(int64(math.Ceil(float64(rw.tat - rw.arrivedAt)))) } } + +func NewRollingWindowAlgorithm(timeSource utils.TimeSource, localCache *freecache.Cache, nearLimitRatio float32, cacheKeyPrefix string) *RollingWindowImpl { + return &RollingWindowImpl{ + timeSource: timeSource, + cacheKeyGenerator: utils.NewCacheKeyGenerator(cacheKeyPrefix), + localCache: localCache, + nearLimitRatio: nearLimitRatio, + } +} diff --git a/test/algorithm/base_window_test.go b/test/algorithm/base_window_test.go new file mode 100644 index 000000000..dee8c3b91 --- /dev/null +++ b/test/algorithm/base_window_test.go @@ -0,0 +1,157 @@ +package algorithm + +import ( + "testing" + + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/algorithm" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/utils" + "github.com/envoyproxy/ratelimit/test/common" + mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" + "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes/duration" + stats "github.com/lyft/gostats" + "github.com/stretchr/testify/assert" +) + +func TestGetResponseDescriptorStatus(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + + // Fixed Window algorithm + fixedAlgorithm := algorithm.NewFixedWindowAlgorithm(timeSource, nil, 0.8, "") + baseAlgorithm := algorithm.NewWindow(fixedAlgorithm, "", nil, timeSource) + + key := "key_value" + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + var results int64 = 1 + var hitsAddend int64 = 1 + isOverLimitWithLocalCache := false + + timeSource.EXPECT().UnixNow().Return(int64(1)).MaxTimes(2) + + expectedResult := &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limit.Limit, + LimitRemaining: 9, + DurationUntilReset: utils.CalculateFixedReset(limit.Limit, timeSource)} + + actualResult := baseAlgorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + assert.Equal(expectedResult, actualResult) + + // Rolling Window algorithm + rollingAlgorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + baseAlgorithm = algorithm.NewWindow(rollingAlgorithm, "", nil, timeSource) + + key = "key_value" + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + results = 0 + hitsAddend = 1 + isOverLimitWithLocalCache = false + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + expectedResult = &pb.RateLimitResponse_DescriptorStatus{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: limit.Limit, + LimitRemaining: 9, + DurationUntilReset: &duration.Duration{Nanos: 1e8}} + + actualResult = baseAlgorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + assert.Equal(expectedResult, actualResult) +} + +func TestIsOverLimitWithLocalCache(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + key := "key_value" + + timeSource := mock_utils.NewMockTimeSource(controller) + + // Fixed Window algorithm + fixedLocalCache := freecache.NewCache(100) + + fixedAlgorithm := algorithm.NewFixedWindowAlgorithm(timeSource, fixedLocalCache, 0.8, "") + baseAlgorithm := algorithm.NewWindow(fixedAlgorithm, "", fixedLocalCache, timeSource) + + assert.Equal(false, baseAlgorithm.IsOverLimitWithLocalCache(key)) + + fixedLocalCache.Set([]byte(key), []byte{}, 1) + assert.Equal(true, baseAlgorithm.IsOverLimitWithLocalCache(key)) + + // Rolling Window algorithm + rollingLocalCache := freecache.NewCache(100) + + rollingAlgorithm := algorithm.NewRollingWindowAlgorithm(timeSource, rollingLocalCache, 0.8, "") + baseAlgorithm = algorithm.NewWindow(rollingAlgorithm, "", rollingLocalCache, timeSource) + + assert.Equal(false, baseAlgorithm.IsOverLimitWithLocalCache(key)) + + rollingLocalCache.Set([]byte(key), []byte{}, 1) + assert.Equal(true, baseAlgorithm.IsOverLimitWithLocalCache(key)) +} + +func TestGenerateCacheKeys(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + timeSource := mock_utils.NewMockTimeSource(controller) + statsStore := stats.NewStore(stats.NewNullSink(), false) + + var hitsAddend int64 = 1 + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limit := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + // Fixed Window algorithm + fixedAlgorithm := algorithm.NewFixedWindowAlgorithm(timeSource, nil, 0.8, "") + baseAlgorithm := algorithm.NewWindow(fixedAlgorithm, "", nil, timeSource) + + timeSource.EXPECT().UnixNow().Return(int64(1)).MaxTimes(1) + + expectedResult := []utils.CacheKey([]utils.CacheKey{{Key: "domain_key_value_1", PerSecond: true}}) + actualResult := baseAlgorithm.GenerateCacheKeys(request, limit, hitsAddend, 1) + assert.Equal(expectedResult, actualResult) + + // Rolling Window algorithm + rollingAlgorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + baseAlgorithm = algorithm.NewWindow(rollingAlgorithm, "", nil, timeSource) + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + + expectedResult = []utils.CacheKey([]utils.CacheKey{{Key: "domain_key_value_0", PerSecond: true}}) + actualResult = baseAlgorithm.GenerateCacheKeys(request, limit, hitsAddend, 0) + assert.Equal(expectedResult, actualResult) +} + +func TestPopulateStats(t *testing.T) { + assert := assert.New(t) + controller := gomock.NewController(t) + defer controller.Finish() + + statsStore := stats.NewStore(stats.NewNullSink(), false) + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + + algorithm.PopulateStats(limit, 1, 0, 0) + assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) + assert.Equal(uint64(0), limit.Stats.OverLimit.Value()) + assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + + algorithm.PopulateStats(limit, 0, 1, 0) + assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) + assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) + assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + + algorithm.PopulateStats(limit, 0, 0, 1) + assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) + assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) + assert.Equal(uint64(1), limit.Stats.OverLimitWithLocalCache.Value()) +} diff --git a/test/algorithm/fixed_window_test.go b/test/algorithm/fixed_window_test.go index 565a1b170..d3ce07dd1 100644 --- a/test/algorithm/fixed_window_test.go +++ b/test/algorithm/fixed_window_test.go @@ -3,14 +3,12 @@ package algorithm import ( "testing" - "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/utils" - "github.com/envoyproxy/ratelimit/test/common" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" "github.com/golang/mock/gomock" + "github.com/golang/protobuf/ptypes/duration" stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" ) @@ -55,24 +53,7 @@ func TestFixedIsOverLimit(t *testing.T) { assert.Equal(1, actualDurationUntilReset) } -func TestFixedIsOverLimitWithLocalCache(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() - - key := "key_value" - - timeSource := mock_utils.NewMockTimeSource(controller) - localCache := freecache.NewCache(100) - - algorithm := algorithm.NewFixedWindowAlgorithm(timeSource, localCache, 0.8, "") - assert.Equal(false, algorithm.IsOverLimitWithLocalCache(key)) - - localCache.Set([]byte(key), []byte{}, 1) - assert.Equal(true, algorithm.IsOverLimitWithLocalCache(key)) -} - -func TestFixedGenerateCacheKeys(t *testing.T) { +func TestFixedCalculateSimpleReset(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() @@ -81,47 +62,29 @@ func TestFixedGenerateCacheKeys(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) algorithm := algorithm.NewFixedWindowAlgorithm(timeSource, nil, 0.8, "") - var hitsAddend int64 = 1 - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limit := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - timeSource.EXPECT().UnixNow().Return(int64(1)).MaxTimes(1) - - expectedResult := []utils.CacheKey([]utils.CacheKey{{Key: "domain_key_value_1", PerSecond: true}}) - actualResult := algorithm.GenerateCacheKeys(request, limit, hitsAddend) - assert.Equal(expectedResult, actualResult) -} - -func TestFixedPopulateStats(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() - - timeSource := mock_utils.NewMockTimeSource(controller) - statsStore := stats.NewStore(stats.NewNullSink(), false) - algorithm := algorithm.NewFixedWindowAlgorithm(timeSource, nil, 0.8, "") - limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) - timeSource.EXPECT().UnixNow().Return(int64(1)).MaxTimes(1) + actualResetDuration := algorithm.CalculateSimpleReset(limit, timeSource) + expectedResetDuration := &duration.Duration{Seconds: 1} + assert.Equal(expectedResetDuration, actualResetDuration) + + timeSource.EXPECT().UnixNow().Return(int64(30)).MaxTimes(1) + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) - algorithm.PopulateStats(limit, 1, 0, 0) - assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) - assert.Equal(uint64(0), limit.Stats.OverLimit.Value()) - assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + actualResetDuration = algorithm.CalculateSimpleReset(limit, timeSource) + expectedResetDuration = &duration.Duration{Seconds: 30} + assert.Equal(expectedResetDuration, actualResetDuration) - algorithm.PopulateStats(limit, 0, 1, 0) - assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) - assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) - assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + timeSource.EXPECT().UnixNow().Return(int64(60)).MaxTimes(1) + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore) - algorithm.PopulateStats(limit, 0, 0, 1) - assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) - assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) - assert.Equal(uint64(1), limit.Stats.OverLimitWithLocalCache.Value()) + actualResetDuration = algorithm.CalculateSimpleReset(limit, timeSource) + expectedResetDuration = &duration.Duration{Seconds: 59 * 60} + assert.Equal(expectedResetDuration, actualResetDuration) } -func TestFixedGetResponseDescriptorStatus(t *testing.T) { +func TestFixedCalculateReset(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() @@ -130,20 +93,17 @@ func TestFixedGetResponseDescriptorStatus(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) algorithm := algorithm.NewFixedWindowAlgorithm(timeSource, nil, 0.8, "") - key := "key_value" - limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) - var results int64 = 1 - var hitsAddend int64 = 1 - isOverLimitWithLocalCache := false + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) + + timeSource.EXPECT().UnixNow().Return(int64(45)).MaxTimes(1) - timeSource.EXPECT().UnixNow().Return(int64(1)).MaxTimes(2) + actualResetDuration := algorithm.CalculateReset(true, limit, timeSource) + expectedResetDuration := &duration.Duration{Seconds: 15} + assert.Equal(expectedResetDuration, actualResetDuration) - expectedResult := &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: limit.Limit, - LimitRemaining: 9, - DurationUntilReset: utils.CalculateFixedReset(limit.Limit, timeSource)} + timeSource.EXPECT().UnixNow().Return(int64(45)).MaxTimes(1) - actualResult := algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) - assert.Equal(expectedResult, actualResult) + actualResetDuration = algorithm.CalculateReset(false, limit, timeSource) + expectedResetDuration = &duration.Duration{Seconds: 15} + assert.Equal(expectedResetDuration, actualResetDuration) } diff --git a/test/algorithm/rolling_window_test.go b/test/algorithm/rolling_window_test.go index a1b816b3b..5259d1ae5 100644 --- a/test/algorithm/rolling_window_test.go +++ b/test/algorithm/rolling_window_test.go @@ -3,12 +3,9 @@ package algorithm import ( "testing" - "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/algorithm" "github.com/envoyproxy/ratelimit/src/config" - "github.com/envoyproxy/ratelimit/src/utils" - "github.com/envoyproxy/ratelimit/test/common" mock_utils "github.com/envoyproxy/ratelimit/test/mocks/utils" "github.com/golang/mock/gomock" "github.com/golang/protobuf/ptypes/duration" @@ -59,24 +56,38 @@ func TestRollingIsOverLimit(t *testing.T) { assert.Equal(359, actualDurationUntilReset) } -func TestRollingIsOverLimitWithLocalCache(t *testing.T) { +func TestRollingCalculateSimpleReset(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() - key := "key_value" - timeSource := mock_utils.NewMockTimeSource(controller) - localCache := freecache.NewCache(100) + statsStore := stats.NewStore(stats.NewNullSink(), false) + algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + + timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + + actualResetDuration := algorithm.CalculateSimpleReset(limit, timeSource) + expectedResetDuration := &duration.Duration{Seconds: 1} + assert.Equal(expectedResetDuration, actualResetDuration) + + timeSource.EXPECT().UnixNanoNow().Return(int64(30 * 1e9)).MaxTimes(1) + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) + + actualResetDuration = algorithm.CalculateSimpleReset(limit, timeSource) + expectedResetDuration = &duration.Duration{Seconds: 30} + assert.Equal(expectedResetDuration, actualResetDuration) - algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, localCache, 0.8, "") - assert.Equal(false, algorithm.IsOverLimitWithLocalCache(key)) + timeSource.EXPECT().UnixNanoNow().Return(int64(60 * 1e9)).MaxTimes(1) + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore) - localCache.Set([]byte(key), []byte{}, 1) - assert.Equal(true, algorithm.IsOverLimitWithLocalCache(key)) + actualResetDuration = algorithm.CalculateSimpleReset(limit, timeSource) + expectedResetDuration = &duration.Duration{Seconds: 59 * 60} + assert.Equal(expectedResetDuration, actualResetDuration) } -func TestRollingGenerateCacheKeys(t *testing.T) { +func TestRollingCalculateReset(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() @@ -85,71 +96,61 @@ func TestRollingGenerateCacheKeys(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") - var hitsAddend int64 = 1 - request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) - limit := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} - timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) - expectedResult := []utils.CacheKey([]utils.CacheKey{{Key: "domain_key_value_0", PerSecond: true}}) - actualResult := algorithm.GenerateCacheKeys(request, limit, hitsAddend) - assert.Equal(expectedResult, actualResult) -} + var results int64 = 0 + var hitsAddend int64 = 1 + limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) -func TestRollingPopulateStats(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() + // populating tat, newTat, arriveAt + // that is required to execute CalculateAt - timeSource := mock_utils.NewMockTimeSource(controller) - statsStore := stats.NewStore(stats.NewNullSink(), false) - algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + // periode = 1 minute = 60 second + // limit = 10 request/minute + // emissionInterval = 6 second + // request = 1 + // increment = emissionInterval*request = 6 second - limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) + // arriveAt = 1 second + // tat = 0 second - timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + // newTat should be max(arriveAt,tat)+increment = 7 second + // DurationUntilReset should be newtat-arriveat = 6 second - algorithm.PopulateStats(limit, 1, 0, 0) - assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) - assert.Equal(uint64(0), limit.Stats.OverLimit.Value()) - assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + algorithm.IsOverLimit(limit, results, hitsAddend) + isOverLimit := false - algorithm.PopulateStats(limit, 0, 1, 0) - assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) - assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) - assert.Equal(uint64(0), limit.Stats.OverLimitWithLocalCache.Value()) + actualResetDuration := algorithm.CalculateReset(isOverLimit, limit, timeSource) + expectedResetDuration := &duration.Duration{Seconds: 6} + assert.Equal(expectedResetDuration, actualResetDuration) - algorithm.PopulateStats(limit, 0, 0, 1) - assert.Equal(uint64(1), limit.Stats.NearLimit.Value()) - assert.Equal(uint64(1), limit.Stats.OverLimit.Value()) - assert.Equal(uint64(1), limit.Stats.OverLimitWithLocalCache.Value()) -} + timeSource.EXPECT().UnixNanoNow().Return(int64(3 * 60 * 1e9)).MaxTimes(1) -func TestRollingGetResponseDescriptorStatus(t *testing.T) { - assert := assert.New(t) - controller := gomock.NewController(t) - defer controller.Finish() + results = 72 * 60 * 1e9 + hitsAddend = 1 + limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_HOUR, "key_value", statsStore) - timeSource := mock_utils.NewMockTimeSource(controller) - statsStore := stats.NewStore(stats.NewNullSink(), false) - algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") + // populating tat, newTat, arriveAt + // that is required to execute CalculateAt - key := "key_value" - limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) - var results int64 = 0 - var hitsAddend int64 = 1 - isOverLimitWithLocalCache := false + // periode = 1 hour = 3600 second + // limit = 10 request/hour + // emissionInterval = 6 minute = 360 second + // request = 1 + // increment = emissionInterval*request = 6 minute = 360 second - timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) + // arriveAt = 3 minute + // tat = 72 minute - expectedResult := &pb.RateLimitResponse_DescriptorStatus{ - Code: pb.RateLimitResponse_OK, - CurrentLimit: limit.Limit, - LimitRemaining: 9, - DurationUntilReset: &duration.Duration{Nanos: 1e8}} + // newTat should be max(arriveAt,tat)+increment = 78 minute (not used) + // DurationUntilReset should be tat-arriveat = 6 second - actualResult := algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) - assert.Equal(expectedResult, actualResult) + algorithm.IsOverLimit(limit, results, hitsAddend) + isOverLimit = true + + actualResetDuration = algorithm.CalculateReset(isOverLimit, limit, timeSource) + expectedResetDuration = &duration.Duration{Seconds: 69 * 60} + assert.Equal(expectedResetDuration, actualResetDuration) } func TestRollingGetExpirationSeconds(t *testing.T) { @@ -161,53 +162,45 @@ func TestRollingGetExpirationSeconds(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") - key := "key_value" limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) var results int64 = 0 var hitsAddend int64 = 1 - isOverLimitWithLocalCache := false timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) - algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + algorithm.IsOverLimit(limit, results, hitsAddend) expectedResult := int64(1) actualResult := algorithm.GetExpirationSeconds() assert.Equal(expectedResult, actualResult) - key = "key_value" limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) results = 2e9 hitsAddend = 1 - isOverLimitWithLocalCache = false timeSource.EXPECT().UnixNanoNow().Return(int64(1e9 + 4e6)).MaxTimes(1) - algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + algorithm.IsOverLimit(limit, results, hitsAddend) expectedResult = int64(1) actualResult = algorithm.GetExpirationSeconds() assert.Equal(expectedResult, actualResult) - key = "key_value" limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) results = 0 hitsAddend = 1 - isOverLimitWithLocalCache = false timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) - algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + algorithm.IsOverLimit(limit, results, hitsAddend) expectedResult = int64(7) actualResult = algorithm.GetExpirationSeconds() assert.Equal(expectedResult, actualResult) - key = "key_value" limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) results = 60e9 hitsAddend = 1 - isOverLimitWithLocalCache = false timeSource.EXPECT().UnixNanoNow().Return(int64(4e9)).MaxTimes(1) - algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + algorithm.IsOverLimit(limit, results, hitsAddend) expectedResult = int64(57) actualResult = algorithm.GetExpirationSeconds() @@ -223,53 +216,45 @@ func TestRollingGetResultsAfterIncrease(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) algorithm := algorithm.NewRollingWindowAlgorithm(timeSource, nil, 0.8, "") - key := "key_value" limit := config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) var results int64 = 0 var hitsAddend int64 = 1 - isOverLimitWithLocalCache := false timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) - algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + algorithm.IsOverLimit(limit, results, hitsAddend) expectedResult := int64(1e9 + 1e8) actualResult := algorithm.GetResultsAfterIncrease() assert.Equal(expectedResult, actualResult) - key = "key_value" limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore) results = 2e9 hitsAddend = 1 - isOverLimitWithLocalCache = false timeSource.EXPECT().UnixNanoNow().Return(int64(1e9 + 4e6)).MaxTimes(1) - algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + algorithm.IsOverLimit(limit, results, hitsAddend) expectedResult = int64(2e9) actualResult = algorithm.GetResultsAfterIncrease() assert.Equal(expectedResult, actualResult) - key = "key_value" limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) results = 0 hitsAddend = 1 - isOverLimitWithLocalCache = false timeSource.EXPECT().UnixNanoNow().Return(int64(1e9)).MaxTimes(1) - algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + algorithm.IsOverLimit(limit, results, hitsAddend) expectedResult = int64(7e9) actualResult = algorithm.GetResultsAfterIncrease() assert.Equal(expectedResult, actualResult) - key = "key_value" limit = config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key_value", statsStore) results = 60e9 hitsAddend = 1 - isOverLimitWithLocalCache = false timeSource.EXPECT().UnixNanoNow().Return(int64(4e9)).MaxTimes(1) - algorithm.GetResponseDescriptorStatus(key, limit, results, isOverLimitWithLocalCache, hitsAddend) + algorithm.IsOverLimit(limit, results, hitsAddend) expectedResult = int64(60e9) actualResult = algorithm.GetResultsAfterIncrease()