diff --git a/docker-compose-example.yml b/docker-compose-example.yml index f96879ee4..9739feaaf 100644 --- a/docker-compose-example.yml +++ b/docker-compose-example.yml @@ -11,12 +11,19 @@ services: statsd: image: prom/statsd-exporter:v0.18.0 + entrypoint: /bin/statsd_exporter + command: + - "--statsd.mapping-config=/etc/statsd-exporter/conf.yaml" expose: - 9125 + - 9102 ports: - 9125:9125 + - 9102:9102 # Visit http://localhost:9102/metrics to see metrics in Prometheus format networks: - ratelimit-network + volumes: + - ./examples/prom-statsd-exporter/conf.yaml:/etc/statsd-exporter/conf.yaml ratelimit: image: envoyproxy/ratelimit:master diff --git a/examples/envoy/mock.yaml b/examples/envoy/mock.yaml index bd85fc3d2..87574494b 100644 --- a/examples/envoy/mock.yaml +++ b/examples/envoy/mock.yaml @@ -1,31 +1,32 @@ static_resources: listeners: - - address: - socket_address: - address: 0.0.0.0 - port_value: 9999 - filter_chains: - - filters: - - name: envoy.http_connection_manager - config: - codec_type: auto - stat_prefix: ingress - route_config: - name: ingress - virtual_hosts: - - name: backend - domains: - - "*" - routes: - - match: - prefix: "/" - direct_response: - status: "200" - body: - inline_string: "Hello World" - http_filters: - - name: envoy.router - config: {} + - address: + socket_address: + address: 0.0.0.0 + port_value: 9999 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + stat_prefix: ingress + route_config: + name: ingress + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: "/" + direct_response: + status: "200" + body: + inline_string: "Hello World" + http_filters: + - name: envoy.filters.http.router + typed_config: {} admin: access_log_path: "/dev/null" address: diff --git a/examples/envoy/proxy.yaml b/examples/envoy/proxy.yaml index bb45503f9..4c5605d1d 100644 --- a/examples/envoy/proxy.yaml +++ b/examples/envoy/proxy.yaml @@ -41,9 +41,10 @@ static_resources: port_value: 8888 filter_chains: - filters: - - name: envoy.http_connection_manager - config: - codec_type: auto + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO stat_prefix: ingress http_filters: - name: envoy.rate_limit @@ -57,8 +58,8 @@ static_resources: grpc_service: envoy_grpc: cluster_name: ratelimit - - name: envoy.router - config: {} + - name: envoy.filters.http.router + typed_config: {} route_config: name: route virtual_hosts: @@ -72,8 +73,8 @@ static_resources: cluster: mock rate_limits: - actions: - - source_cluster: {} - - destination_cluster: {} + - source_cluster: {} # This action's value is populated by the "service-cluster" arg passed in when starting Envoy. In this example, it's "proxy" (see docker-compose-example.yml) + - destination_cluster: {} # This action's value is populated by the value set in the above "cluster" field -- "mock" - match: prefix: /header route: diff --git a/examples/prom-statsd-exporter/conf.yaml b/examples/prom-statsd-exporter/conf.yaml new file mode 100644 index 000000000..0706548d8 --- /dev/null +++ b/examples/prom-statsd-exporter/conf.yaml @@ -0,0 +1,67 @@ +mappings: # Requires statsd exporter >= v0.6.0 since it uses the "drop" action. + - match: + "ratelimit.service.rate_limit.*.*.near_limit" + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: + "ratelimit.service.rate_limit.*.*.over_limit" + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + - match: + "ratelimit.service.rate_limit.*.*.total_hits" + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + + - match: + "ratelimit.service.rate_limit.*.*.*.near_limit" + name: "ratelimit_service_rate_limit_near_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: + "ratelimit.service.rate_limit.*.*.*.over_limit" + name: "ratelimit_service_rate_limit_over_limit" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + - match: + "ratelimit.service.rate_limit.*.*.*.total_hits" + name: "ratelimit_service_rate_limit_total_hits" + timer_type: "histogram" + labels: + domain: "$1" + key1: "$2" + key2: "$3" + + - match: "ratelimit.service.call.should_rate_limit.*" + name: "ratelimit_service_should_rate_limit_error" + match_metric_type: counter + labels: + err_type: "$1" + + - match: "ratelimit.service.config_load_success" + name: "ratelimit_service_config_load_success" + match_metric_type: counter + - match: "ratelimit.service.config_load_error" + name: "ratelimit_service_config_load_error" + match_metric_type: counter + - match: "ratelimit.service.config_load_error" + name: "ratelimit_service_config_load_error" + match_metric_type: counter + - match: "." + match_type: "regex" + action: "drop" + name: "dropped" diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 91b560120..b76c8ada6 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -14,7 +14,6 @@ import ( pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/service_cmd/runner" - "github.com/envoyproxy/ratelimit/src/utils" "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/protobuf/ptypes/duration" "github.com/stretchr/testify/assert" @@ -22,18 +21,15 @@ import ( "google.golang.org/grpc" ) -func newDescriptorStatus( - status pb.RateLimitResponse_Code, requestsPerUnit uint32, - unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32) *pb.RateLimitResponse_DescriptorStatus { +func newDescriptorStatus(status pb.RateLimitResponse_Code, requestsPerUnit uint32, unit pb.RateLimitResponse_RateLimit_Unit, limitRemaining uint32, durRemaining *duration.Duration) *pb.RateLimitResponse_DescriptorStatus { limit := &pb.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit} - sec := utils.UnitToDivider(unit) - now := time.Now().Unix() + return &pb.RateLimitResponse_DescriptorStatus{ - Code: status, - CurrentLimit: limit, - LimitRemaining: limitRemaining, - DurationUntilReset: &duration.Duration{Seconds: sec - now%sec}, + Code: status, + CurrentLimit: limit, + LimitRemaining: limitRemaining, + DurationUntilReset: &duration.Duration{Seconds: durRemaining.GetSeconds()}, } } @@ -298,12 +294,14 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu response, err = c.ShouldRateLimit( context.Background(), common.NewRateLimitRequest("basic", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1)) + durRemaining := response.GetStatuses()[0].DurationUntilReset + common.AssertProtoEqual( assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49)}}, + newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining)}}, response) assert.NoError(err) @@ -338,13 +336,14 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu status = pb.RateLimitResponse_OVER_LIMIT limitRemaining = 0 } + durRemaining = response.GetStatuses()[0].DurationUntilReset common.AssertProtoEqual( assert, &pb.RateLimitResponse{ OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(status, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining)}}, + newDescriptorStatus(status, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining, durRemaining)}}, response) assert.NoError(err) key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) @@ -355,7 +354,6 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu } else { assert.Equal(0, int(key2OverlimitCounter.Value())) } - key2LocalCacheOverLimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit_with_local_cache", getCacheKey("key2", enable_local_cache))) if enable_local_cache && i >= 20 { assert.Equal(i-20, int(key2LocalCacheOverLimitCounter.Value())) @@ -402,14 +400,15 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu status = pb.RateLimitResponse_OVER_LIMIT limitRemaining2 = 0 } - + durRemaining1 := response.GetStatuses()[0].DurationUntilReset + durRemaining2 := response.GetStatuses()[1].DurationUntilReset common.AssertProtoEqual( assert, &pb.RateLimitResponse{ OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(pb.RateLimitResponse_OK, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining1), - newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}}, + newDescriptorStatus(pb.RateLimitResponse_OK, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining1, durRemaining1), + newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2, durRemaining2)}}, response) assert.NoError(err) @@ -465,8 +464,20 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu } else { assert.Equal(0, int(localCacheMissCounter.Value())) } - } + + // Test DurationUntilReset by hitting same key twice + resp1, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest("another", [][][2]string{{{getCacheKey("key4", enable_local_cache), "durTest"}}}, 1)) + + time.Sleep(2 * time.Second) // Wait to allow duration to tick down + + resp2, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest("another", [][][2]string{{{getCacheKey("key4", enable_local_cache), "durTest"}}}, 1)) + + assert.Less(resp2.GetStatuses()[0].DurationUntilReset.GetSeconds(), resp1.GetStatuses()[0].DurationUntilReset.GetSeconds()) } } @@ -669,12 +680,14 @@ func testConfigReload(grpcPort, perSecond string, local_cache_size string) func( response, err = c.ShouldRateLimit( context.Background(), common.NewRateLimitRequest("reload", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1)) + + durRemaining := response.GetStatuses()[0].DurationUntilReset common.AssertProtoEqual( assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ - newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49)}}, + newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49, durRemaining)}}, response) assert.NoError(err) diff --git a/test/integration/runtime/current/ratelimit/config/another.yaml b/test/integration/runtime/current/ratelimit/config/another.yaml index 94f8dd96e..0281bb68d 100644 --- a/test/integration/runtime/current/ratelimit/config/another.yaml +++ b/test/integration/runtime/current/ratelimit/config/another.yaml @@ -19,3 +19,13 @@ descriptors: rate_limit: unit: hour requests_per_unit: 10 + + - key: key4 + rate_limit: + unit: day + requests_per_unit: 20 + + - key: key4_local + rate_limit: + unit: day + requests_per_unit: 20 diff --git a/test/integration/runtime/current/ratelimit/config/basic.yaml b/test/integration/runtime/current/ratelimit/config/basic.yaml index 843b98873..a941ce58d 100644 --- a/test/integration/runtime/current/ratelimit/config/basic.yaml +++ b/test/integration/runtime/current/ratelimit/config/basic.yaml @@ -13,4 +13,4 @@ descriptors: - key: one_per_minute rate_limit: unit: minute - requests_per_unit: 1 \ No newline at end of file + requests_per_unit: 1