From 7459ac55d9b28811111174b390a72714d55df689 Mon Sep 17 00:00:00 2001 From: Steve Sloka Date: Mon, 9 Mar 2020 13:02:21 -0400 Subject: [PATCH 01/41] Enable go modules (#124) Signed-off-by: Steve Sloka Signed-off-by: Diego Erdody --- .gitignore | 5 + .travis.yml | 2 +- Dockerfile | 20 ++-- Makefile | 31 +++--- README.md | 13 ++- docker-compose.yml | 8 +- glide.lock | 142 -------------------------- glide.yaml | 41 -------- go.mod | 33 ++++++ go.sum | 119 +++++++++++++++++++++ script/install-glide | 23 ----- src/config_check_cmd/main.go | 2 +- src/redis/cache.go | 2 +- src/redis/cache_impl.go | 4 +- src/redis/driver_impl.go | 2 +- src/server/server_impl.go | 4 +- src/service/ratelimit.go | 6 +- src/service/ratelimit_legacy.go | 4 +- src/service_cmd/main.go | 2 +- src/service_cmd/runner/runner.go | 14 +-- test/common/common.go | 2 +- test/config/config_test.go | 2 +- test/integration/integration_test.go | 8 +- test/mocks/config/config.go | 4 +- test/mocks/mocks.go | 4 +- test/mocks/redis/redis.go | 6 +- test/redis/cache_impl_test.go | 8 +- test/server/health_test.go | 2 +- test/service/ratelimit_legacy_test.go | 10 +- test/service/ratelimit_test.go | 16 +-- 30 files changed, 241 insertions(+), 298 deletions(-) delete mode 100644 glide.lock delete mode 100644 glide.yaml create mode 100644 go.mod create mode 100644 go.sum delete mode 100755 script/install-glide diff --git a/.gitignore b/.gitignore index f5010830b..7897ead09 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,8 @@ cover.out bin/ .idea/ vendor +cert.pem +key.pem +private.pem +redis-per-second.conf +redis.conf diff --git a/.travis.yml b/.travis.yml index 02bdf2e5b..e6485c571 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,6 @@ sudo: required language: go -go: "1.11" +go: "1.14" services: redis-server before_install: sudo apt-get update -y && sudo apt-get install stunnel4 -y install: make bootstrap bootstrap_redis_tls diff --git a/Dockerfile b/Dockerfile index 57173a93a..706cefaa8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,16 @@ -FROM golang:1.10.4 AS build -WORKDIR /go/src/github.com/lyft/ratelimit +FROM golang:1.14 AS build +WORKDIR /ratelimit + +ENV GOPROXY=https://proxy.golang.org +COPY go.mod go.sum /ratelimit/ +RUN go mod download COPY src src COPY script script -COPY vendor vendor -COPY glide.yaml glide.yaml -COPY glide.lock glide.lock COPY proto proto -RUN script/install-glide -RUN glide install - -RUN CGO_ENABLED=0 GOOS=linux go build -o /usr/local/bin/ratelimit -ldflags="-w -s" -v github.com/lyft/ratelimit/src/service_cmd +RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.8 AS final +FROM alpine:3.11 AS final RUN apk --no-cache add ca-certificates -COPY --from=build /usr/local/bin/ratelimit /bin/ratelimit +COPY --from=build /go/bin/ratelimit /bin/ratelimit diff --git a/Makefile b/Makefile index 2110f1075..ac0850aee 100644 --- a/Makefile +++ b/Makefile @@ -1,18 +1,12 @@ -ifeq ("$(GOPATH)","") -$(error GOPATH must be set) -endif +export GO111MODULE=on +MODULE = github.com/envoyproxy/ratelimit SHELL := /bin/bash -GOREPO := ${GOPATH}/src/github.com/lyft/ratelimit .PHONY: bootstrap bootstrap: - script/install-glide - glide install + go get github.com/golang/mock/mockgen@v1.4.1 -.PHONY: bootstrap_tests -bootstrap_tests: - cd ./vendor/github.com/golang/mock/mockgen && go install define REDIS_STUNNEL cert = private.pem pid = /var/run/stunnel.pid @@ -33,6 +27,7 @@ redis.conf: echo "$$REDIS_STUNNEL" >> $@ redis-per-second.conf: echo "$$REDIS_PER_SECOND_STUNNEL" >> $@ + .PHONY: bootstrap_redis_tls bootstrap_redis_tls: redis.conf redis-per-second.conf openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \ @@ -51,27 +46,27 @@ docs_format: .PHONY: fix_format fix_format: script/docs_fix_format - go fmt $(shell glide nv) + go fmt $(MODULE)/... .PHONY: check_format check_format: docs_format - @gofmt -l $(shell glide nv | sed 's/\.\.\.//g') | tee /dev/stderr | read && echo "Files failed gofmt" && exit 1 || true + @gofmt -l $(shell go list -f '{{.Dir}}' ./...) | tee /dev/stderr | read && echo "Files failed gofmt" && exit 1 || true .PHONY: compile compile: - mkdir -p ${GOREPO}/bin - cd ${GOREPO}/src/service_cmd && go build -o ratelimit ./ && mv ./ratelimit ${GOREPO}/bin - cd ${GOREPO}/src/client_cmd && go build -o ratelimit_client ./ && mv ./ratelimit_client ${GOREPO}/bin - cd ${GOREPO}/src/config_check_cmd && go build -o ratelimit_config_check ./ && mv ./ratelimit_config_check ${GOREPO}/bin + mkdir -p ./bin + go build -mod=readonly -o ./bin/ratelimit $(MODULE)/src/service_cmd + go build -mod=readonly -o ./bin/ratelimit_client $(MODULE)/src/client_cmd + go build -mod=readonly -o ./bin/ratelimit_config_check $(MODULE)/src/config_check_cmd .PHONY: tests_unit tests_unit: compile - go test -race ./... + go test -race $(MODULE)/... .PHONY: tests tests: compile - go test -race -tags=integration ./... + go test -race -tags=integration $(MODULE)/... .PHONY: docker docker: tests - docker build . -t lyft/ratelimit:`git rev-parse HEAD` + docker build . -t envoyproxy/ratelimit:`git rev-parse HEAD` diff --git a/README.md b/README.md index 67290c441..fdc749199 100644 --- a/README.md +++ b/README.md @@ -40,10 +40,10 @@ decision is then returned to the caller. Envoy's data-plane-api defines a ratelimit service proto [rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto). Logically the data-plane-api [rls](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto) -is equivalent to the [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) +is equivalent to the [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) defined in this repo. However, due to the namespace differences and how gRPC routing works it is not possible to transparently route the -legacy ratelimit (ones based in the [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) +legacy ratelimit (ones based in the [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) defined in this repo) requests to the data-plane-api definitions. Therefore, the ratelimit service will upgrade the requests, process them internally as it would process a data-plane-api ratelimit request, and then downgrade the response to send back to the client. This means that, @@ -54,7 +54,7 @@ for a slight performance hit for clients using the legacy proto, ratelimit is ba 1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production use at Lyft for over 2 years. 2. `v1.1.0` introduces the data-plane-api proto and initiates the deprecation of the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). -3. `v2.0.0` deletes support for the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). This version will be tagged by the end of 2018Q3 (~September 2018) +3. `v2.0.0` deletes support for the legacy [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). This version will be tagged by the end of 2018Q3 (~September 2018) to give time to community members running ratelimit off of `master`. @@ -97,7 +97,6 @@ go [here](https://golang.org/doc/install). The docker-compose setup has three containers: redis, ratelimit-build, and ratelimit. In order to run the docker-compose setup from the root of the repo, run ```bash -glide install docker-compose up ``` @@ -308,7 +307,7 @@ descriptors: The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors a designated path, and watches for symlink swaps to files in the directory tree to reload configuration files. -The path to watch can be configured via the [settings](https://github.com/lyft/ratelimit/blob/master/src/settings/settings.go) +The path to watch can be configured via the [settings](https://github.com/envoyproxy/ratelimit/blob/master/src/settings/settings.go) package with the following environment variables: ``` @@ -324,7 +323,7 @@ For more information on how runtime works you can read its [README](https://gith # Request Fields For information on the fields of a Ratelimit gRPC request please read the information -on the RateLimitRequest message type in the Ratelimit [proto file.](https://github.com/lyft/ratelimit/blob/master/proto/ratelimit/ratelimit.proto) +on the RateLimitRequest message type in the Ratelimit [proto file.](https://github.com/envoyproxy/ratelimit/blob/master/proto/ratelimit/ratelimit.proto) # Statistics @@ -377,7 +376,7 @@ You can specify the debug port with the `DEBUG_PORT` environment variable. It de # Local Cache Ratelimit optionally uses [freecache](https://github.com/coocood/freecache) as its local caching layer, which stores the over-the-limit cache keys, and thus avoids reading the -redis cache again for the already over-the-limit keys. The local cache size can be configured via `LocalCacheSizeInBytes` in the [settings](https://github.com/lyft/ratelimit/blob/master/src/settings/settings.go). +redis cache again for the already over-the-limit keys. The local cache size can be configured via `LocalCacheSizeInBytes` in the [settings](https://github.com/envoyproxy/ratelimit/blob/master/src/settings/settings.go). If `LocalCacheSizeInBytes` is 0, local cache is disabled. # Redis diff --git a/docker-compose.yml b/docker-compose.yml index d2af278c2..c4ab4443f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,11 +12,11 @@ services: # minimal container that builds the ratelimit service binary and exits. ratelimit-build: - image: golang:1.10-alpine - working_dir: /go/src/github.com/lyft/ratelimit - command: go build -o /usr/local/bin/ratelimit /go/src/github.com/lyft/ratelimit/src/service_cmd/main.go + image: golang:1.14-alpine + working_dir: /go/src/github.com/envoyproxy/ratelimit + command: go build -o /usr/local/bin/ratelimit ./src/service_cmd/main.go volumes: - - .:/go/src/github.com/lyft/ratelimit + - .:/go/src/github.com/envoyproxy/ratelimit - binary:/usr/local/bin/ ratelimit: diff --git a/glide.lock b/glide.lock deleted file mode 100644 index 2d7ff379f..000000000 --- a/glide.lock +++ /dev/null @@ -1,142 +0,0 @@ -hash: 8cc8fb031b0204aa915eaa8947e364aa1ce4362a3a9eede8086ffa97ced02b4c -updated: 2019-12-19T15:33:02.225239-08:00 -imports: -- name: github.com/cespare/xxhash - version: d7df74196a9e781ede915320c11c378c1b2f3a1f -- name: github.com/coocood/freecache - version: 3c79a0a23c1940ab4479332fb3e0127265650ce3 -- name: github.com/envoyproxy/go-control-plane - version: 0ad6fa1cf0b9b6ca8f3617a7188a568e81f40b87 - subpackages: - - envoy/api/v2/core - - envoy/api/v2/ratelimit - - envoy/service/ratelimit/v2 - - envoy/type -- name: github.com/envoyproxy/protoc-gen-validate - version: ff6f7a9bc2e5fe006509b9f8c7594c41a953d50f -- name: github.com/fsnotify/fsnotify - version: 629574ca2a5df945712d3079857300b5e4da0236 -- name: github.com/gogo/protobuf - version: 5628607bb4c51c3157aacc3a50f0ab707582b805 - subpackages: - - gogoproto - - proto - - protoc-gen-gogo/descriptor - - protoc-gen-gogofast - - sortkeys - - types -- name: github.com/golang/mock - version: 41e7e9a91aa20115266b326233308d17079ea51c - subpackages: - - gomock -- name: github.com/golang/protobuf - version: b5d812f8a3706043e23a9cd5babf2e5423744d30 - subpackages: - - jsonpb - - proto - - protoc-gen-go/descriptor - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/struct - - ptypes/timestamp -- name: github.com/google/protobuf - version: 6973c3a5041636c1d8dc5f7f6c8c1f3c15bc63d6 -- name: github.com/gorilla/mux - version: 49c01487a141b49f8ffe06277f3dca3ee80a55fa -- name: github.com/kavu/go_reuseport - version: 3d6c1e425f717ee59152524e73b904b67705eeb8 -- name: github.com/kelseyhightower/envconfig - version: ac12b1f15efba734211a556d8b125110dc538016 -- name: github.com/lyft/goruntime - version: a0d6acf20fcfd48f53e623ed62b87ffb7fe17038 - subpackages: - - loader - - snapshot - - snapshot/entry -- name: github.com/lyft/gostats - version: 943f43ede7b2dbf1d7162587689cb484d49ecd15 -- name: github.com/lyft/protoc-gen-validate - version: f9d2b11e44149635b23a002693b76512b01ae515 - subpackages: - - validate -- name: github.com/mediocregopher/radix.v2 - version: b67df6e626f993b64b3ca9f4b8630900e61002e3 - subpackages: - - pool - - redis -- name: github.com/sirupsen/logrus - version: d682213848ed68c0a260ca37d6dd5ace8423f5ba -- name: github.com/stretchr/testify - version: f390dcf405f7b83c997eac1b06768bb9f44dec18 - subpackages: - - assert -- name: golang.org/x/crypto - version: becbf705a91575484002d598f87d74f0002801e7 - subpackages: - - ssh/terminal -- name: golang.org/x/net - version: c0dbc17a35534bf2e581d7a942408dc936316da4 - subpackages: - - context - - http/httpguts - - http2 - - http2/hpack - - idna - - internal/timeseries - - trace -- name: golang.org/x/sys - version: acbc56fc7007d2a01796d5bde54f39e3b3e95945 - subpackages: - - unix - - windows -- name: golang.org/x/text - version: cbf43d21aaebfdfeb81d91a5f444d13a3046e686 - subpackages: - - secure/bidirule - - transform - - unicode/bidi - - unicode/norm -- name: google.golang.org/genproto - version: b31c10ee225f87dbb9f5f878ead9d64f34f5cbbb - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: 41344da2231b913fa3d983840a57a6b1b7b631a1 - subpackages: - - balancer - - balancer/base - - balancer/roundrobin - - channelz - - codes - - connectivity - - credentials - - encoding - - encoding/proto - - grpclb/grpc_lb_v1/messages - - grpclog - - health - - health/grpc_health_v1 - - internal - - keepalive - - metadata - - naming - - peer - - resolver - - resolver/dns - - resolver/passthrough - - stats - - status - - tap - - transport -- name: gopkg.in/yaml.v2 - version: 1f64d6156d11335c3f22d9330b0ad14fc1e789ce -testImports: -- name: github.com/davecgh/go-spew - version: 2df174808ee097f90d259e432cc04442cf60be21 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib diff --git a/glide.yaml b/glide.yaml deleted file mode 100644 index 82fbcdc97..000000000 --- a/glide.yaml +++ /dev/null @@ -1,41 +0,0 @@ -package: github.com/lyft/ratelimit -import: -- package: github.com/golang/mock - version: master - subpackages: - - gomock -- package: github.com/kelseyhightower/envconfig - version: 1.1.0 -- package: github.com/lyft/gostats - version: v0.2.6 -- package: github.com/lyft/goruntime - version: v0.2.1 -- package: github.com/mediocregopher/radix.v2 - version: master - subpackages: - - pool - - redis -- package: github.com/sirupsen/logrus - version: ^1.0 -- package: golang.org/x/net - version: master - subpackages: - - context -- package: gopkg.in/yaml.v2 - version: master -- package: github.com/stretchr/testify - version: v1.1.3 -- package: google.golang.org/grpc - version: v1.12.0 -- package: github.com/kavu/go_reuseport - version: v1.2.0 -- package: github.com/envoyproxy/go-control-plane - version: v0.6.9 -- package: github.com/envoyproxy/protoc-gen-validate - version: v0.0.14 -- package: github.com/google/protobuf - version: v3.7.1 -- package: github.com/golang/protobuf/proto - version: v1.3.1 -- package: github.com/coocood/freecache - version: v1.1.0 diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..b05bd81ef --- /dev/null +++ b/go.mod @@ -0,0 +1,33 @@ +module github.com/envoyproxy/ratelimit + +go 1.14 + +require ( + github.com/cespare/xxhash v1.1.0 // indirect + github.com/coocood/freecache v1.1.0 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/envoyproxy/go-control-plane v0.6.9 + github.com/gogo/protobuf v1.3.1 // indirect + github.com/golang/mock v1.4.1 + github.com/golang/protobuf v1.3.2 + github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 + github.com/kavu/go_reuseport v1.2.0 + github.com/kelseyhightower/envconfig v1.1.0 + github.com/lyft/goruntime v0.2.1 + github.com/lyft/gostats v0.2.6 + github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 // indirect + github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 + github.com/onsi/ginkgo v1.12.0 // indirect + github.com/onsi/gomega v1.9.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/sirupsen/logrus v1.0.4 + github.com/stretchr/testify v1.1.3 + golang.org/x/crypto v0.0.0-20191219195013-becbf705a915 // indirect + golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 + golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb // indirect + google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f // indirect + google.golang.org/grpc v1.19.0 + gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect + gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect + gopkg.in/yaml.v2 v2.2.7 +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..3f0fe1148 --- /dev/null +++ b/go.sum @@ -0,0 +1,119 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA= +github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.6.9 h1:deEH9W8ZAUGNbCdX+9iNzBOGrAOrnpJGoy0PcTqk/tE= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.2-0.20191213205753-41e7e9a91aa2 h1:hBrdHEwxv/6aUIcg0N6NHSRO9Y7jK4Jmu2XP8jUPI+o= +github.com/golang/mock v1.3.2-0.20191213205753-41e7e9a91aa2/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= +github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kavu/go_reuseport v1.2.0 h1:YO+pt6m5Z3WkVH9DjaDJzoSS/0FO2Q8x3CfObxk/i2E= +github.com/kavu/go_reuseport v1.2.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= +github.com/kelseyhightower/envconfig v1.1.0 h1:4htXR8ameS6KBfrNBoqEgpg0IK2D6rozN9ATOPwRfM0= +github.com/kelseyhightower/envconfig v1.1.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/lyft/goruntime v0.2.1 h1:7DebA8oMVuoQ5TQ0j1xR/X2xRagbGrm0e2SoMdt5tRs= +github.com/lyft/goruntime v0.2.1/go.mod h1:8rUh5gwIPQtyIkIXHbLN1j45HOb8cMgDhrw5GA7DF4g= +github.com/lyft/gostats v0.2.6 h1:m4XmqpBamBXaFjp76h2Ao4TrNpsIVODNClDrH0YTbjM= +github.com/lyft/gostats v0.2.6/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= +github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 h1:kLCSHuk3X+SI8Up26wM71id7jz77B3zCZDp01UWMVbM= +github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 h1:ViNuGS149jgnttqhc6XQNPwdupEMBXqCx9wtlW7P3sA= +github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9/go.mod h1:fLRUbhbSd5Px2yKUaGYYPltlyxi1guJz1vCmo1RQL50= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.0.4 h1:gzbtLsZC3Ic5PptoRG+kQj4L60qjK7H7XszrU163JNQ= +github.com/sirupsen/logrus v1.0.4/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/testify v1.1.3 h1:76sIvNG1I8oBerx/MvuVHh5HBWBW7oxfsi3snKIsz5w= +github.com/stretchr/testify v1.1.3/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191219195013-becbf705a915 h1:aJ0ex187qoXrJHPo8ZasVTASQB7llQP6YeNzgDALPRk= +golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb h1:MsKWO3hK1h941VWsQ8dKJqIdb3r3XP9/cDw8n/B95SM= +golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86JNJe4spst6Ff8MjvPUdPg= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f h1:0RYv5T9ZdroAqqfM2taEB0nJrArv0X1JpIdgUmY4xg8= +google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/script/install-glide b/script/install-glide deleted file mode 100755 index 8b450e880..000000000 --- a/script/install-glide +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -e - -which glide > /dev/null 2>&1 && exit 0 - -if test "Darwin" == "$(uname)" - then brew install glide -fi - -which glide > /dev/null 2>&1 || { - mkdir -p ./glide - - curl -L https://github.com/Masterminds/glide/releases/download/v0.12.2/glide-v0.12.2-linux-amd64.tar.gz | tar xz -C ./glide --strip-components=1 - chmod 755 -R ./glide - - if which sudo >/dev/null; - then sudo mv ./glide/glide /usr/local/bin/ - else - mv ./glide/glide /usr/local/bin/ - fi -} - -which glide > /dev/null 2>&1 \ No newline at end of file diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index d2defbfab..f9f3c7426 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -7,8 +7,8 @@ import ( "os" "path/filepath" + "github.com/envoyproxy/ratelimit/src/config" "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/config" ) func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { diff --git a/src/redis/cache.go b/src/redis/cache.go index e5617c552..b090cc995 100644 --- a/src/redis/cache.go +++ b/src/redis/cache.go @@ -2,7 +2,7 @@ package redis import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - "github.com/lyft/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/config" "golang.org/x/net/context" ) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index b3a34f49a..5cc7b63e3 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -11,8 +11,8 @@ import ( "github.com/coocood/freecache" pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - "github.com/lyft/ratelimit/src/assert" - "github.com/lyft/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/assert" + "github.com/envoyproxy/ratelimit/src/config" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 5405bc8c5..dbcf787ee 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -4,8 +4,8 @@ import ( "crypto/tls" "net" + "github.com/envoyproxy/ratelimit/src/assert" stats "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/assert" "github.com/mediocregopher/radix.v2/pool" "github.com/mediocregopher/radix.v2/redis" logger "github.com/sirupsen/logrus" diff --git a/src/server/server_impl.go b/src/server/server_impl.go index d685f847c..2b27f06cd 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -8,7 +8,7 @@ import ( "net/http/pprof" "sort" - "github.com/lyft/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/redis" "os" "os/signal" @@ -17,11 +17,11 @@ import ( "net" "github.com/coocood/freecache" + "github.com/envoyproxy/ratelimit/src/settings" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" "github.com/lyft/goruntime/loader" stats "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/settings" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/health" diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index d9817721f..3982a39ed 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -5,11 +5,11 @@ import ( "sync" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + "github.com/envoyproxy/ratelimit/src/assert" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" "github.com/lyft/goruntime/loader" "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/assert" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index a8218279b..e4654783a 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -2,9 +2,9 @@ package ratelimit import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" "github.com/golang/protobuf/jsonpb" "github.com/lyft/gostats" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" "golang.org/x/net/context" ) @@ -12,7 +12,7 @@ type RateLimitLegacyServiceServer interface { pb_legacy.RateLimitServiceServer } -// legacyService is used to implement ratelimit.proto (https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) +// legacyService is used to implement ratelimit.proto (https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) // the legacyService receives RateLimitRequests, converts the request, and calls the service's ShouldRateLimit method. type legacyService struct { s *service diff --git a/src/service_cmd/main.go b/src/service_cmd/main.go index 5df1ab032..a53b362b6 100644 --- a/src/service_cmd/main.go +++ b/src/service_cmd/main.go @@ -1,6 +1,6 @@ package main -import "github.com/lyft/ratelimit/src/service_cmd/runner" +import "github.com/envoyproxy/ratelimit/src/service_cmd/runner" func main() { runner := runner.NewRunner() diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 21c07d47d..ee9da4b39 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -11,13 +11,13 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" + pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" - "github.com/lyft/ratelimit/src/server" - ratelimit "github.com/lyft/ratelimit/src/service" - "github.com/lyft/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/server" + ratelimit "github.com/envoyproxy/ratelimit/src/service" + "github.com/envoyproxy/ratelimit/src/settings" logger "github.com/sirupsen/logrus" ) @@ -78,7 +78,7 @@ func (runner *Runner) Run() { // Ratelimit is compatible with two proto definitions // 1. data-plane-api rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto pb.RegisterRateLimitServiceServer(srv.GrpcServer(), service) - // 2. ratelimit.proto defined in this repository: https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto + // 2. ratelimit.proto defined in this repository: https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto pb_legacy.RegisterRateLimitServiceServer(srv.GrpcServer(), service.GetLegacyService()) // (1) is the current definition, and (2) is the legacy definition. diff --git a/test/common/common.go b/test/common/common.go index ba3e79045..e3796f5de 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -5,7 +5,7 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" + pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" ) type TestStatSink struct { diff --git a/test/config/config_test.go b/test/config/config_test.go index be5ff3473..791cc1098 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -6,8 +6,8 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + "github.com/envoyproxy/ratelimit/src/config" "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/config" "github.com/stretchr/testify/assert" ) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 697cf896c..b33683c8e 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -11,9 +11,9 @@ import ( "time" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" - "github.com/lyft/ratelimit/src/service_cmd/runner" - "github.com/lyft/ratelimit/test/common" + pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" + "github.com/envoyproxy/ratelimit/src/service_cmd/runner" + "github.com/envoyproxy/ratelimit/test/common" "github.com/stretchr/testify/assert" "golang.org/x/net/context" "google.golang.org/grpc" @@ -41,7 +41,7 @@ func newDescriptorStatusLegacy( } } -// TODO: Once adding the ability of stopping the server in the runner (https://github.com/lyft/ratelimit/issues/119), +// TODO: Once adding the ability of stopping the server in the runner (https://github.com/envoyproxy/ratelimit/issues/119), // stop the server at the end of each test, thus we can reuse the grpc port among these integration tests. func TestBasicConfig(t *testing.T) { t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0")) diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index b58f42687..e4ddc17b8 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/lyft/ratelimit/src/config (interfaces: RateLimitConfig,RateLimitConfigLoader) +// Source: github.com/envoyproxy/ratelimit/src/config (interfaces: RateLimitConfig,RateLimitConfigLoader) // Package mock_config is a generated GoMock package. package mock_config @@ -7,9 +7,9 @@ package mock_config import ( context "context" ratelimit "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" gostats "github.com/lyft/gostats" - config "github.com/lyft/ratelimit/src/config" reflect "reflect" ) diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go index 9b224c88b..efe80f841 100644 --- a/test/mocks/mocks.go +++ b/test/mocks/mocks.go @@ -2,5 +2,5 @@ package mocks //go:generate mockgen -destination ./runtime/snapshot/snapshot.go github.com/lyft/goruntime/snapshot IFace //go:generate mockgen -destination ./runtime/loader/loader.go github.com/lyft/goruntime/loader IFace -//go:generate mockgen -destination ./config/config.go github.com/lyft/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader -//go:generate mockgen -destination ./redis/redis.go github.com/lyft/ratelimit/src/redis RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource +//go:generate mockgen -destination ./config/config.go github.com/envoyproxy/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader +//go:generate mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/redis.go index ad8f4ec96..df24212a2 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/redis.go @@ -1,13 +1,13 @@ // Automatically generated by MockGen. DO NOT EDIT! -// Source: github.com/lyft/ratelimit/src/redis (interfaces: RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource) +// Source: github.com/envoyproxy/ratelimit/src/redis (interfaces: RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource) package mock_redis import ( ratelimit "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + config "github.com/envoyproxy/ratelimit/src/config" + redis "github.com/envoyproxy/ratelimit/src/redis" gomock "github.com/golang/mock/gomock" - config "github.com/lyft/ratelimit/src/config" - redis "github.com/lyft/ratelimit/src/redis" context "golang.org/x/net/context" ) diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index 57581f38d..d4f94fc97 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -6,15 +6,15 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" stats "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" "math/rand" + "github.com/envoyproxy/ratelimit/test/common" + mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" "github.com/golang/mock/gomock" - "github.com/lyft/ratelimit/test/common" - mock_redis "github.com/lyft/ratelimit/test/mocks/redis" "github.com/stretchr/testify/assert" ) diff --git a/test/server/health_test.go b/test/server/health_test.go index 21030ddde..a79e3642e 100644 --- a/test/server/health_test.go +++ b/test/server/health_test.go @@ -8,7 +8,7 @@ import ( "syscall" "testing" - "github.com/lyft/ratelimit/src/server" + "github.com/envoyproxy/ratelimit/src/server" "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index f0510f3d2..ad7e6b942 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -5,14 +5,14 @@ import ( pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/service" + "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/mock/gomock" "github.com/golang/protobuf/jsonpb" "github.com/lyft/gostats" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" - "github.com/lyft/ratelimit/src/service" - "github.com/lyft/ratelimit/test/common" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index df5de3eb4..57fa0a65d 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -5,16 +5,16 @@ import ( "testing" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/service" + "github.com/envoyproxy/ratelimit/test/common" + "github.com/envoyproxy/ratelimit/test/mocks/config" + "github.com/envoyproxy/ratelimit/test/mocks/redis" + "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" + "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" "github.com/golang/mock/gomock" "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" - "github.com/lyft/ratelimit/src/service" - "github.com/lyft/ratelimit/test/common" - "github.com/lyft/ratelimit/test/mocks/config" - "github.com/lyft/ratelimit/test/mocks/redis" - "github.com/lyft/ratelimit/test/mocks/runtime/loader" - "github.com/lyft/ratelimit/test/mocks/runtime/snapshot" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) From 395e9e24a5589f2128f5edfccb786996cab5dd52 Mon Sep 17 00:00:00 2001 From: Steve Sloka Date: Wed, 11 Mar 2020 17:09:47 -0400 Subject: [PATCH 02/41] CI: Github Actions (#127) Signed-off-by: Steve Sloka Signed-off-by: Diego Erdody --- .github/workflows/master.yaml | 35 ++++++++++++++++++++++++++++++ .github/workflows/pullrequest.yaml | 29 +++++++++++++++++++++++++ .github/workflows/release.yaml | 34 +++++++++++++++++++++++++++++ .travis.yml | 13 ----------- Makefile | 17 +++++++++++---- 5 files changed, 111 insertions(+), 17 deletions(-) create mode 100644 .github/workflows/master.yaml create mode 100644 .github/workflows/pullrequest.yaml create mode 100644 .github/workflows/release.yaml delete mode 100644 .travis.yml diff --git a/.github/workflows/master.yaml b/.github/workflows/master.yaml new file mode 100644 index 000000000..5d323a3d7 --- /dev/null +++ b/.github/workflows/master.yaml @@ -0,0 +1,35 @@ +name: Build and push :master image + +on: + push: + branches: + - master + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: check format + run: make check_format + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: deps + run: sudo apt-get update -y && sudo apt-get install stunnel4 redis -y + + - name: build and push docker image + run: | + redis-server --port 6380 & + redis-server --port 6381 --requirepass password123 & + redis-server --port 6382 --requirepass password123 & + redis-server --port 6384 --requirepass password123 & + redis-server --port 6385 --requirepass password123 & + echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin + make bootstrap bootstrap_redis_tls docker_push + env: + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + VERSION: master \ No newline at end of file diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml new file mode 100644 index 000000000..2e22c6d8a --- /dev/null +++ b/.github/workflows/pullrequest.yaml @@ -0,0 +1,29 @@ +name: CI Build and Test for PR + +on: + pull_request: + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: check format + run: make check_format + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: deps + run: sudo apt-get update -y && sudo apt-get install stunnel4 redis -y + + - name: build and test + run: | + redis-server --port 6380 & + redis-server --port 6381 --requirepass password123 & + redis-server --port 6382 --requirepass password123 & + redis-server --port 6384 --requirepass password123 & + redis-server --port 6385 --requirepass password123 & + make bootstrap bootstrap_redis_tls tests_unit tests \ No newline at end of file diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..28569b0a5 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,34 @@ +name: Build and push :release image + +on: + push: + tags: + - 'v*' + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: check format + run: make check_format + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: deps + run: sudo apt-get update -y && sudo apt-get install stunnel4 redis -y + + - name: build and push docker image + run: | + redis-server --port 6380 & + redis-server --port 6381 --requirepass password123 & + redis-server --port 6382 --requirepass password123 & + redis-server --port 6384 --requirepass password123 & + redis-server --port 6385 --requirepass password123 & + echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin + make bootstrap bootstrap_redis_tls docker_push + env: + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} \ No newline at end of file diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index e6485c571..000000000 --- a/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: required -language: go -go: "1.14" -services: redis-server -before_install: sudo apt-get update -y && sudo apt-get install stunnel4 -y -install: make bootstrap bootstrap_redis_tls -before_script: -- redis-server --port 6380 & -- redis-server --port 6381 --requirepass password123 & -- redis-server --port 6382 --requirepass password123 & -- redis-server --port 6384 --requirepass password123 & -- redis-server --port 6385 --requirepass password123 & -script: make check_format tests diff --git a/Makefile b/Makefile index ac0850aee..553f6d5e7 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,10 @@ export GO111MODULE=on +PROJECT = ratelimit +REGISTRY ?= envoyproxy +IMAGE := $(REGISTRY)/$(PROJECT) MODULE = github.com/envoyproxy/ratelimit - +GIT_REF = $(shell git describe --tags || git rev-parse --short=8 --verify HEAD) +VERSION ?= $(GIT_REF) SHELL := /bin/bash .PHONY: bootstrap @@ -67,6 +71,11 @@ tests_unit: compile tests: compile go test -race -tags=integration $(MODULE)/... -.PHONY: docker -docker: tests - docker build . -t envoyproxy/ratelimit:`git rev-parse HEAD` +.PHONY: docker_image +docker_image: tests + docker build . -t $(IMAGE):$(VERSION) + +.PHONY: docker_push +docker_push: docker_image + docker push $(IMAGE):$(VERSION) + From 3343f5717a9628ee95b31228d470e4d9eb84e378 Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 26 May 2020 10:47:22 -0700 Subject: [PATCH 03/41] community: update contributing guide (#139) Fixes https://github.com/envoyproxy/ratelimit/issues/138 Signed-off-by: Matt Klein Signed-off-by: Diego Erdody --- CODE_OF_CONDUCT.md | 4 ++- CONTRIBUTING.md | 85 ++++++++++++++++++++++++++++++++++++++++++---- DCO | 37 ++++++++++++++++++++ 3 files changed, 119 insertions(+), 7 deletions(-) create mode 100644 DCO diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index afcbd8c21..91f111f23 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1 +1,3 @@ -This project is governed by [Lyft's code of conduct](https://github.com/lyft/code-of-conduct). All contributors and participants agree to abide by its terms. \ No newline at end of file +## Community Code of Conduct + +ratelimit follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 85d4e24e2..5d641c988 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,12 +8,85 @@ We welcome contributions from the community. Here are some guidelines. * Fork the repo and create your PR. * Tests will automatically run for you. -* When all of the tests are passing, tag @lyft/network-team and we will review it and - merge once our CLA has been signed (see below). +* When all of the tests are passing, tag @envoyproxy/ratelimit-maintainers and + we will review it and merge. * Party time. -# CLA +# DCO: Sign your work -* We require a CLA for code contributions, so before we can accept a pull request we need - to have a signed CLA. Please visit our [CLA service](https://oss.lyft.com/cla) and follow - the instructions to sign the CLA. +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](https://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +If you want this to be automatic you can set up some aliases: + +```bash +git config --add alias.amend "commit -s --amend" +git config --add alias.c "commit -s" +``` + +## Fixing DCO + +If your PR fails the DCO check, it's necessary to fix the entire commit history in the PR. Best +practice is to [squash](https://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) +the commit history to a single commit, append the DCO sign-off as described above, and [force +push](https://git-scm.com/docs/git-push#git-push---force). For example, if you have 2 commits in +your history: + +```bash +git rebase -i HEAD^^ +(interactive squash + DCO append) +git push origin -f +``` + +Note, that in general rewriting history in this way is a hindrance to the review process and this +should only be done to correct a DCO mistake. diff --git a/DCO b/DCO new file mode 100644 index 000000000..8201f9921 --- /dev/null +++ b/DCO @@ -0,0 +1,37 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. From 48b45a7db13f42264a45a858db2586be63f8613e Mon Sep 17 00:00:00 2001 From: dblackdblack Date: Tue, 26 May 2020 13:53:06 -0600 Subject: [PATCH 04/41] add http 1 `/json` endpoint (#136) Signed-off-by: David Black Signed-off-by: Diego Erdody --- README.md | 25 ++++++++++++++ docker-compose.yml | 9 +++++ src/server/server.go | 2 ++ src/server/server_impl.go | 33 +++++++++++++++++++ src/service_cmd/runner/runner.go | 2 ++ test/integration/integration_test.go | 27 +++++++++++++++ .../current/ratelimit/config/basic.yaml | 5 +++ 7 files changed, 103 insertions(+) diff --git a/README.md b/README.md index fdc749199..59744a159 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,8 @@ - [Loading Configuration](#loading-configuration) - [Request Fields](#request-fields) - [Statistics](#statistics) +- [HTTP Port](#http-port) + - [/json endpoint](#json-endpoint) - [Debug Port](#debug-port) - [Local Cache](#local-cache) - [Redis](#redis) @@ -360,6 +362,29 @@ ratelimit.service.rate_limit.messaging.message_type_marketing.to_number.over_lim ratelimit.service.rate_limit.messaging.message_type_marketing.to_number.total_hits: 0 ``` +# HTTP Port + +The ratelimit service listens to HTTP 1.1 (by default on port 8080) with two endpoints: +1. /healthcheck → return a 200 if this service is healthy +1. /json → HTTP 1.1 endpoint for interacting with ratelimit service + +## /json endpoint + +Takes an HTTP POST with a JSON body of the form e.g. +```json +{ + "domain": "dummy", + "descriptors": [ + {"entries": [ + {"key": "one_per_day", + "value": "something"} + ]} + ] +} +``` +The service will return an http 200 if this request is allowed (if no ratelimits exceeded) or 429 if one or more +ratelimits were exceeded. Endpoint does not currently return detailed information on which limits were exceeded. + # Debug Port The debug port can be used to interact with the running process. diff --git a/docker-compose.yml b/docker-compose.yml index c4ab4443f..51360d361 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -19,6 +19,14 @@ services: - .:/go/src/github.com/envoyproxy/ratelimit - binary:/usr/local/bin/ + ratelimit-client-build: + image: golang:1.14-alpine + working_dir: /go/src/github.com/envoyproxy/ratelimit + command: go build -o /usr/local/bin/ratelimit_client ./src/client_cmd/main.go + volumes: + - .:/go/src/github.com/envoyproxy/ratelimit + - binary:/usr/local/bin/ + ratelimit: image: alpine:3.6 command: /usr/local/bin/ratelimit @@ -29,6 +37,7 @@ services: depends_on: - redis - ratelimit-build + - ratelimit-client-build networks: - ratelimit-network volumes: diff --git a/src/server/server.go b/src/server/server.go index 820085744..38520092a 100644 --- a/src/server/server.go +++ b/src/server/server.go @@ -1,6 +1,7 @@ package server import ( + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "net/http" "github.com/lyft/goruntime/loader" @@ -25,6 +26,7 @@ type Server interface { * Add an HTTP endpoint to the local debug port. */ AddDebugHttpEndpoint(path string, help string, handler http.HandlerFunc) + AddJsonHandler(pb.RateLimitServiceServer) /** * Returns the embedded gRPC server to be used for registering gRPC endpoints. diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 2b27f06cd..b0349594e 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -1,6 +1,7 @@ package server import ( + "encoding/json" "expvar" "fmt" "io" @@ -17,6 +18,7 @@ import ( "net" "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/settings" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" @@ -52,6 +54,37 @@ func (server *server) AddDebugHttpEndpoint(path string, help string, handler htt server.debugListener.endpoints[path] = help } +// add an http/1 handler at the /json endpoint which allows this ratelimit service to work with +// clients that cannot use the gRPC interface (e.g. lua) +// example usage from cURL with domain "dummy" and descriptor "perday": +// echo '{"domain": "dummy", "descriptors": [{"entries": [{"key": "perday"}]}]}' | curl -vvvXPOST --data @/dev/stdin localhost:8080/json +func (server *server) AddJsonHandler(svc pb.RateLimitServiceServer) { + handler := func(writer http.ResponseWriter, request *http.Request) { + var req pb.RateLimitRequest + + if err := json.NewDecoder(request.Body).Decode(&req); err != nil { + logger.Warnf("error: %s", err.Error()) + http.Error(writer, err.Error(), http.StatusBadRequest) + return + } + + resp, err := svc.ShouldRateLimit(nil, &req) + if err != nil { + logger.Warnf("error: %s", err.Error()) + http.Error(writer, err.Error(), http.StatusBadRequest) + return + } + logger.Debugf("resp:%s", resp) + if resp.OverallCode == pb.RateLimitResponse_OVER_LIMIT { + http.Error(writer, "over limit", http.StatusTooManyRequests) + } else if resp.OverallCode == pb.RateLimitResponse_UNKNOWN { + http.Error(writer, "unknown", http.StatusInternalServerError) + } + + } + server.router.HandleFunc("/json", handler) +} + func (server *server) GrpcServer() *grpc.Server { return server.grpcServer } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index ee9da4b39..a6a5d0d8f 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -75,6 +75,8 @@ func (runner *Runner) Run() { io.WriteString(writer, service.GetCurrentConfig().Dump()) }) + srv.AddJsonHandler(service) + // Ratelimit is compatible with two proto definitions // 1. data-plane-api rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto pb.RegisterRateLimitServiceServer(srv.GrpcServer(), service) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index b33683c8e..067a9a312 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -3,8 +3,10 @@ package integration_test import ( + "bytes" "fmt" "math/rand" + "net/http" "os" "strconv" "testing" @@ -344,6 +346,7 @@ func TestBasicConfigLegacy(t *testing.T) { assert := assert.New(t) conn, err := grpc.Dial("localhost:8083", grpc.WithInsecure()) + assert.NoError(err) defer conn.Close() c := pb_legacy.NewRateLimitServiceClient(conn) @@ -358,6 +361,30 @@ func TestBasicConfigLegacy(t *testing.T) { response) assert.NoError(err) + json_body := []byte(`{ + "domain": "basic", + "descriptors": [ + { + "entries": [ + { + "key": "one_per_minute" + } + ] + } + ] + }`) + http_resp, _ := http.Post("http://localhost:8082/json", "application/json", bytes.NewBuffer(json_body)) + assert.Equal(http_resp.StatusCode, 200) + http_resp.Body.Close() + + http_resp, _ = http.Post("http://localhost:8082/json", "application/json", bytes.NewBuffer(json_body)) + assert.Equal(http_resp.StatusCode, 429) + http_resp.Body.Close() + + invalid_json := []byte(`{"unclosed quote: []}`) + http_resp, _ = http.Post("http://localhost:8082/json", "application/json", bytes.NewBuffer(invalid_json)) + assert.Equal(http_resp.StatusCode, 400) + response, err = c.ShouldRateLimit( context.Background(), common.NewRateLimitRequestLegacy("basic_legacy", [][][2]string{{{"key1", "foo"}}}, 1)) diff --git a/test/integration/runtime/current/ratelimit/config/basic.yaml b/test/integration/runtime/current/ratelimit/config/basic.yaml index 41cd5a31a..843b98873 100644 --- a/test/integration/runtime/current/ratelimit/config/basic.yaml +++ b/test/integration/runtime/current/ratelimit/config/basic.yaml @@ -9,3 +9,8 @@ descriptors: rate_limit: unit: second requests_per_unit: 50 + + - key: one_per_minute + rate_limit: + unit: minute + requests_per_unit: 1 \ No newline at end of file From 6dcfe7064ebe8a6db7a6d27b45ae13011a5f114a Mon Sep 17 00:00:00 2001 From: David Weitzman Date: Wed, 27 May 2020 12:01:13 -0700 Subject: [PATCH 05/41] Use mockgen version from go.mod instead of from "make bootstrap" (#143) Even though the Makefile wants to encourage using mockgen@1.4.1, it seems like the mocks have been generated using a pre-1.0 version of mockgen. Using "go run github.com/golang/mock/mockgen" as a go:generate command instead of just "mockgen" avoids the need to pre-install into the developer's $PATH and uses the go.mod-specified version Signed-off-by: David Weitzman Signed-off-by: Diego Erdody --- Makefile | 3 +-- test/mocks/mocks.go | 8 ++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 553f6d5e7..038300a2a 100644 --- a/Makefile +++ b/Makefile @@ -8,8 +8,7 @@ VERSION ?= $(GIT_REF) SHELL := /bin/bash .PHONY: bootstrap -bootstrap: - go get github.com/golang/mock/mockgen@v1.4.1 +bootstrap: ; define REDIS_STUNNEL cert = private.pem diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go index efe80f841..e0ce8288a 100644 --- a/test/mocks/mocks.go +++ b/test/mocks/mocks.go @@ -1,6 +1,6 @@ package mocks -//go:generate mockgen -destination ./runtime/snapshot/snapshot.go github.com/lyft/goruntime/snapshot IFace -//go:generate mockgen -destination ./runtime/loader/loader.go github.com/lyft/goruntime/loader IFace -//go:generate mockgen -destination ./config/config.go github.com/envoyproxy/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader -//go:generate mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource +//go:generate go run github.com/golang/mock/mockgen -destination ./runtime/snapshot/snapshot.go github.com/lyft/goruntime/snapshot IFace +//go:generate go run github.com/golang/mock/mockgen -destination ./runtime/loader/loader.go github.com/lyft/goruntime/loader IFace +//go:generate go run github.com/golang/mock/mockgen -destination ./config/config.go github.com/envoyproxy/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader +//go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource From dd2dcc70b2df30981a9e546c0ea6414f601b2feb Mon Sep 17 00:00:00 2001 From: David Weitzman Date: Wed, 27 May 2020 13:53:31 -0700 Subject: [PATCH 06/41] Upgrade gostats dependency from 0.2.6 to 0.4.0 (#141) My interest is the UDP protocol support which appeared in gotstats 0.3.10 There's a breaking change as of https://github.com/lyft/gostats/releases/tag/v0.3.0 which is that gostats no longer publishes stats as expvars. Signed-off-by: David Weitzman Signed-off-by: Diego Erdody --- go.mod | 2 +- go.sum | 6 ++---- test/integration/integration_test.go | 1 + 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index b05bd81ef..64a950080 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/kavu/go_reuseport v1.2.0 github.com/kelseyhightower/envconfig v1.1.0 github.com/lyft/goruntime v0.2.1 - github.com/lyft/gostats v0.2.6 + github.com/lyft/gostats v0.4.0 github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 // indirect github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 github.com/onsi/ginkgo v1.12.0 // indirect diff --git a/go.sum b/go.sum index 3f0fe1148..9b715351e 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.2-0.20191213205753-41e7e9a91aa2 h1:hBrdHEwxv/6aUIcg0N6NHSRO9Y7jK4Jmu2XP8jUPI+o= -github.com/golang/mock v1.3.2-0.20191213205753-41e7e9a91aa2/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -37,8 +35,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/lyft/goruntime v0.2.1 h1:7DebA8oMVuoQ5TQ0j1xR/X2xRagbGrm0e2SoMdt5tRs= github.com/lyft/goruntime v0.2.1/go.mod h1:8rUh5gwIPQtyIkIXHbLN1j45HOb8cMgDhrw5GA7DF4g= -github.com/lyft/gostats v0.2.6 h1:m4XmqpBamBXaFjp76h2Ao4TrNpsIVODNClDrH0YTbjM= -github.com/lyft/gostats v0.2.6/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= +github.com/lyft/gostats v0.4.0 h1:PbRWmwidTPk6Y80S6itBWDa+XVt1hGvqFM88TBJYdOo= +github.com/lyft/gostats v0.4.0/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 h1:kLCSHuk3X+SI8Up26wM71id7jz77B3zCZDp01UWMVbM= github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 h1:ViNuGS149jgnttqhc6XQNPwdupEMBXqCx9wtlW7P3sA= diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 067a9a312..60a25257b 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -115,6 +115,7 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu os.Setenv("REDIS_PERSECOND_SOCKET_TYPE", "tcp") os.Setenv("REDIS_SOCKET_TYPE", "tcp") os.Setenv("LOCAL_CACHE_SIZE_IN_BYTES", local_cache_size) + os.Setenv("USE_STATSD", "false") local_cache_size_val, _ := strconv.Atoi(local_cache_size) enable_local_cache := local_cache_size_val > 0 From 265c5968979bc01c1087c9b00065bb069749724b Mon Sep 17 00:00:00 2001 From: Tong Cai Date: Fri, 29 May 2020 05:18:06 +0800 Subject: [PATCH 07/41] Upgrade radix (#137) Signed-off-by: Tong Cai Signed-off-by: Diego Erdody --- README.md | 7 + go.mod | 11 +- go.sum | 34 +++ src/redis/cache_impl.go | 67 ++--- src/redis/driver.go | 39 +-- src/redis/driver_impl.go | 123 ++++----- src/service_cmd/runner/runner.go | 10 +- src/settings/settings.go | 52 ++-- test/mocks/config/config.go | 10 +- test/mocks/mocks.go | 2 +- test/mocks/redis/redis.go | 241 ++++++++---------- test/mocks/runtime/loader/loader.go | 4 + test/mocks/runtime/snapshot/snapshot.go | 16 ++ test/redis/bench_test.go | 93 +++++++ test/redis/cache_impl_test.go | 320 ++++++++++++------------ 15 files changed, 566 insertions(+), 463 deletions(-) create mode 100644 test/redis/bench_test.go diff --git a/README.md b/README.md index 59744a159..0640cb3c1 100644 --- a/README.md +++ b/README.md @@ -416,6 +416,13 @@ As well Ratelimit supports TLS connections and authentication. These can be conf 1. `REDIS_TLS` & `REDIS_PERSECOND_TLS`: set to `"true"` to enable a TLS connection for the specific connection type. 1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable authentication to the redis host. +Ratelimit use [implicit pipelining](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L238) to send requests to redis. Pipelining can be configured using the following environment variables: + +1. `REDIS_PIPELINE_WINDOW` & `REDIS_PERSECOND_PIPELINE_WINDOW`: sets the duration after which internal pipelines will be flushed. +If window is zero then implicit pipelining will be disabled. +1. `REDIS_PIPELINE_LIMIT` & `REDIS_PERSECOND_PIPELINE_LIMIT`: sets maximum number of commands that can be pipelined before flushing. +If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. + ## One Redis Instance To configure one Redis instance use the following environment variables: diff --git a/go.mod b/go.mod index 64a950080..f679a373e 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/envoyproxy/ratelimit go 1.14 require ( + github.com/alicebob/miniredis/v2 v2.11.4 github.com/cespare/xxhash v1.1.0 // indirect github.com/coocood/freecache v1.1.0 - github.com/davecgh/go-spew v1.1.1 // indirect github.com/envoyproxy/go-control-plane v0.6.9 github.com/gogo/protobuf v1.3.1 // indirect github.com/golang/mock v1.4.1 @@ -16,18 +16,19 @@ require ( github.com/lyft/goruntime v0.2.1 github.com/lyft/gostats v0.4.0 github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 // indirect - github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 + github.com/mediocregopher/radix/v3 v3.5.1 github.com/onsi/ginkgo v1.12.0 // indirect github.com/onsi/gomega v1.9.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sirupsen/logrus v1.0.4 - github.com/stretchr/testify v1.1.3 + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.5.1 golang.org/x/crypto v0.0.0-20191219195013-becbf705a915 // indirect golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 + golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb // indirect google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f // indirect google.golang.org/grpc v1.19.0 gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect - gopkg.in/yaml.v2 v2.2.7 + gopkg.in/yaml.v2 v2.3.0 ) diff --git a/go.sum b/go.sum index 9b715351e..e0747b697 100644 --- a/go.sum +++ b/go.sum @@ -2,11 +2,20 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= +github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= +github.com/alicebob/miniredis/v2 v2.11.4 h1:GsuyeunTx7EllZBU3/6Ji3dhMQZDpC9rLf1luJ+6M5M= +github.com/alicebob/miniredis/v2 v2.11.4/go.mod h1:VL3UDEfAH59bSa7MuHMuFToxkqyHh69s/WUbYlOAuyg= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA= github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.6.9 h1:deEH9W8ZAUGNbCdX+9iNzBOGrAOrnpJGoy0PcTqk/tE= @@ -23,6 +32,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= @@ -41,6 +51,10 @@ github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 h1:kLCS github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 h1:ViNuGS149jgnttqhc6XQNPwdupEMBXqCx9wtlW7P3sA= github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9/go.mod h1:fLRUbhbSd5Px2yKUaGYYPltlyxi1guJz1vCmo1RQL50= +github.com/mediocregopher/radix/v3 v3.4.2 h1:galbPBjIwmyREgwGCfQEN4X8lxbJnKBYurgz+VfcStA= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/mediocregopher/radix/v3 v3.5.1 h1:IOYgQUMA380N4khaL5eNT4v/P2LnHa8b0wnVdwZMFsY= +github.com/mediocregopher/radix/v3 v3.5.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= @@ -53,8 +67,19 @@ github.com/sirupsen/logrus v1.0.4 h1:gzbtLsZC3Ic5PptoRG+kQj4L60qjK7H7XszrU163JNQ github.com/sirupsen/logrus v1.0.4/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.1.3 h1:76sIvNG1I8oBerx/MvuVHh5HBWBW7oxfsi3snKIsz5w= github.com/stretchr/testify v1.1.3/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0= +github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191219195013-becbf705a915 h1:aJ0ex187qoXrJHPo8ZasVTASQB7llQP6YeNzgDALPRk= golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -72,9 +97,13 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -92,6 +121,8 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86J golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -109,9 +140,12 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNj gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 5cc7b63e3..4e37d10f0 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -18,12 +18,12 @@ import ( ) type rateLimitCacheImpl struct { - pool Pool - // Optional Pool for a dedicated cache of per second limits. - // If this pool is nil, then the Cache will use the pool for all - // limits regardless of unit. If this pool is not nil, then it + client Client + // Optional Client for a dedicated cache of per second limits. + // If this client is nil, then the Cache will use the client for all + // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. - perSecondPool Pool + perSecondClient Client timeSource TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 @@ -105,16 +105,14 @@ type cacheKey struct { perSecond bool } -func pipelineAppend(conn Connection, key string, hitsAddend uint32, expirationSeconds int64) { - conn.PipeAppend("INCRBY", key, hitsAddend) - conn.PipeAppend("EXPIRE", key, expirationSeconds) -} - -func pipelineFetch(conn Connection) uint32 { - ret := uint32(conn.PipeResponse().Int()) - // Pop off EXPIRE response and check for error. - conn.PipeResponse() - return ret +func pipelineAppend(client Client, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) (err error) { + if err = client.DoCmd(result, "INCRBY", key, hitsAddend); err != nil { + return + } + if err = client.DoCmd(nil, "EXPIRE", key, expirationSeconds); err != nil { + return + } + return } func (this *rateLimitCacheImpl) DoLimit( @@ -124,17 +122,6 @@ func (this *rateLimitCacheImpl) DoLimit( logger.Debugf("starting cache lookup") - conn := this.pool.Get() - defer this.pool.Put(conn) - - // Optional connection for per second limits. If the cache has a perSecondPool setup, - // then use a connection from the pool for per second limits. - var perSecondConn Connection = nil - if this.perSecondPool != nil { - perSecondConn = this.perSecondPool.Get() - defer this.perSecondPool.Put(perSecondConn) - } - // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. hitsAddend := max(1, request.HitsAddend) @@ -154,6 +141,8 @@ func (this *rateLimitCacheImpl) DoLimit( } isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) + results := make([]uint32, len(request.Descriptors)) + var err error // Now, actually setup the pipeline, skipping empty cache keys. for i, cacheKey := range cacheKeys { @@ -179,12 +168,17 @@ func (this *rateLimitCacheImpl) DoLimit( } // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. - if perSecondConn != nil && cacheKey.perSecond { - pipelineAppend(perSecondConn, cacheKey.key, hitsAddend, expirationSeconds) + if this.perSecondClient != nil && cacheKey.perSecond { + if err = pipelineAppend(this.perSecondClient, cacheKey.key, hitsAddend, &results[i], expirationSeconds); err != nil { + break + } } else { - pipelineAppend(conn, cacheKey.key, hitsAddend, expirationSeconds) + if err = pipelineAppend(this.client, cacheKey.key, hitsAddend, &results[i], expirationSeconds); err != nil { + break + } } } + checkError(err) // Now fetch the pipeline. responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, @@ -212,14 +206,7 @@ func (this *rateLimitCacheImpl) DoLimit( continue } - var limitAfterIncrease uint32 - // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. - if this.perSecondPool != nil && cacheKey.perSecond { - limitAfterIncrease = pipelineFetch(perSecondConn) - } else { - limitAfterIncrease = pipelineFetch(conn) - } - + limitAfterIncrease := results[i] limitBeforeIncrease := limitAfterIncrease - hitsAddend overLimitThreshold := limits[i].Limit.RequestsPerUnit // The nearLimitThreshold is the number of requests that can be made before hitting the NearLimitRatio. @@ -288,10 +275,10 @@ func (this *rateLimitCacheImpl) DoLimit( return responseDescriptorStatuses } -func NewRateLimitCacheImpl(pool Pool, perSecondPool Pool, timeSource TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) RateLimitCache { +func NewRateLimitCacheImpl(client Client, perSecondClient Client, timeSource TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) RateLimitCache { return &rateLimitCacheImpl{ - pool: pool, - perSecondPool: perSecondPool, + client: client, + perSecondClient: perSecondClient, timeSource: timeSource, jitterRand: jitterRand, expirationJitterMaxSeconds: expirationJitterMaxSeconds, diff --git a/src/redis/driver.go b/src/redis/driver.go index 1f4bea32f..0f672df7b 100644 --- a/src/redis/driver.go +++ b/src/redis/driver.go @@ -7,33 +7,20 @@ func (e RedisError) Error() string { return string(e) } -// Interface for a redis connection pool. -type Pool interface { - // Get a connection from the pool. Call Put() on the connection when done. - // Throws RedisError if a connection can not be obtained. - Get() Connection - - // Put a connection back into the pool. - // @param c supplies the connection to put back. - Put(c Connection) -} - -// Interface for a redis connection. -type Connection interface { - // Append a command onto the pipeline queue. - // @param command supplies the command to append. +// Interface for a redis client. +type Client interface { + // DoCmd is used to perform a redis command and retrieve a result. + // + // @param rcv supplies receiver for the result. + // @param cmd supplies the command to append. + // @param key supplies the key to append. // @param args supplies the additional arguments. - PipeAppend(command string, args ...interface{}) + DoCmd(rcv interface{}, cmd, key string, args ...interface{}) error - // Execute the pipeline queue and wait for a response. - // @return a response object. - // Throws a RedisError if there was an error fetching the response. - PipeResponse() Response -} + // Once Close() is called all future method calls on the Client will return + // an error + Close() error -// Interface for a redis response. -type Response interface { - // @return the response as an integer. - // Throws a RedisError if the response is not convertable to an integer. - Int() int64 + // NumActiveConns return number of active connections, used in testing. + NumActiveConns() int } diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index dbcf787ee..47d0d5853 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -2,12 +2,13 @@ package redis import ( "crypto/tls" - "net" + "fmt" + "time" + + "github.com/mediocregopher/radix/v3/trace" - "github.com/envoyproxy/ratelimit/src/assert" stats "github.com/lyft/gostats" - "github.com/mediocregopher/radix.v2/pool" - "github.com/mediocregopher/radix.v2/redis" + "github.com/mediocregopher/radix/v3" logger "github.com/sirupsen/logrus" ) @@ -25,18 +26,22 @@ func newPoolStats(scope stats.Scope) poolStats { return ret } -type poolImpl struct { - pool *pool.Pool - stats poolStats -} - -type connectionImpl struct { - client *redis.Client - pending uint +func poolTrace(ps *poolStats) trace.PoolTrace { + return trace.PoolTrace{ + ConnCreated: func(_ trace.PoolConnCreated) { + ps.connectionTotal.Add(1) + ps.connectionActive.Add(1) + }, + ConnClosed: func(_ trace.PoolConnClosed) { + ps.connectionActive.Sub(1) + ps.connectionClose.Add(1) + }, + } } -type responseImpl struct { - response *redis.Resp +type clientImpl struct { + client radix.Client + stats poolStats } func checkError(err error) { @@ -45,78 +50,60 @@ func checkError(err error) { } } -func (this *poolImpl) Get() Connection { - client, err := this.pool.Get() - checkError(err) - this.stats.connectionActive.Inc() - this.stats.connectionTotal.Inc() - return &connectionImpl{client, 0} -} +func NewClientImpl(scope stats.Scope, useTls bool, auth string, url string, poolSize int, + pipelineWindow time.Duration, pipelineLimit int) Client { + logger.Warnf("connecting to redis on %s with pool size %d", url, poolSize) -func (this *poolImpl) Put(c Connection) { - impl := c.(*connectionImpl) - this.stats.connectionActive.Dec() - if impl.pending == 0 { - this.pool.Put(impl.client) - } else { - // radix does not appear to track if we attempt to put a connection back with pipelined - // responses that have not been flushed. If we are in this state, just kill the connection - // and don't put it back in the pool. - impl.client.Close() - this.stats.connectionClose.Inc() - } -} + df := func(network, addr string) (radix.Conn, error) { + var dialOpts []radix.DialOpt -func NewPoolImpl(scope stats.Scope, useTls bool, auth string, url string, poolSize int) Pool { - logger.Warnf("connecting to redis on %s with pool size %d", url, poolSize) - df := func(network, addr string) (*redis.Client, error) { - var conn net.Conn var err error if useTls { - conn, err = tls.Dial("tcp", addr, &tls.Config{}) - } else { - conn, err = net.Dial("tcp", addr) + dialOpts = append(dialOpts, radix.DialUseTLS(&tls.Config{})) } - if err != nil { - return nil, err - } - client, err := redis.NewClient(conn) if err != nil { return nil, err } if auth != "" { logger.Warnf("enabling authentication to redis on %s", url) - if err = client.Cmd("AUTH", auth).Err; err != nil { - client.Close() - return nil, err - } + + dialOpts = append(dialOpts, radix.DialAuthPass(auth)) } - return client, nil + + return radix.Dial(network, addr, dialOpts...) } - pool, err := pool.NewCustom("tcp", url, poolSize, df) + + stats := newPoolStats(scope) + + // TODO: support sentinel and redis cluster + pool, err := radix.NewPool("tcp", url, poolSize, radix.PoolConnFunc(df), + radix.PoolPipelineWindow(pipelineWindow, pipelineLimit), + radix.PoolWithTrace(poolTrace(&stats)), + ) checkError(err) - return &poolImpl{ - pool: pool, - stats: newPoolStats(scope)} -} -func (this *connectionImpl) PipeAppend(cmd string, args ...interface{}) { - this.client.PipeAppend(cmd, args...) - this.pending++ + // Check if connection is good + var pingResponse string + checkError(pool.Do(radix.Cmd(&pingResponse, "PING"))) + if pingResponse != "PONG" { + checkError(fmt.Errorf("connecting redis error: %s", pingResponse)) + } + + return &clientImpl{ + client: pool, + stats: stats, + } } -func (this *connectionImpl) PipeResponse() Response { - assert.Assert(this.pending > 0) - this.pending-- +func (c *clientImpl) DoCmd(rcv interface{}, cmd, key string, args ...interface{}) error { + return c.client.Do(radix.FlatCmd(rcv, cmd, key, args...)) +} - resp := this.client.PipeResp() - checkError(resp.Err) - return &responseImpl{resp} +func (c *clientImpl) Close() error { + return c.client.Close() } -func (this *responseImpl) Int() int64 { - i, err := this.response.Int64() - checkError(err) - return i +func (c *clientImpl) NumActiveConns() int { + return int(c.stats.connectionActive.Value()) } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index a6a5d0d8f..471caadcc 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -49,12 +49,14 @@ func (runner *Runner) Run() { srv := server.NewServer("ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) - var perSecondPool redis.Pool + var perSecondPool redis.Client if s.RedisPerSecond { - perSecondPool = redis.NewPoolImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize) + perSecondPool = redis.NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, + s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) } - var otherPool redis.Pool - otherPool = redis.NewPoolImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisUrl, s.RedisPoolSize) + var otherPool redis.Client + otherPool = redis.NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisUrl, s.RedisPoolSize, + s.RedisPipelineWindow, s.RedisPipelineLimit) service := ratelimit.NewService( srv.Runtime(), diff --git a/src/settings/settings.go b/src/settings/settings.go index 8d1335bbb..53ab13472 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -1,6 +1,8 @@ package settings import ( + "time" + "github.com/kelseyhightower/envconfig" "google.golang.org/grpc" ) @@ -9,29 +11,33 @@ type Settings struct { // runtime options GrpcUnaryInterceptor grpc.ServerOption // env config - Port int `envconfig:"PORT" default:"8080"` - GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` - DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` - UseStatsd bool `envconfig:"USE_STATSD" default:"true"` - StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` - StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` - RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` - RuntimeSubdirectory string `envconfig:"RUNTIME_SUBDIRECTORY"` - RuntimeIgnoreDotFiles bool `envconfig:"RUNTIME_IGNOREDOTFILES" default:"false"` - LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` - RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` - RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` - RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` - RedisAuth string `envconfig:"REDIS_AUTH" default:""` - RedisTls bool `envconfig:"REDIS_TLS" default:"false"` - RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"` - RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"` - RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"` - RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` - RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""` - RedisPerSecondTls bool `envconfig:"REDIS_PERSECOND_TLS" default:"false"` - ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"` - LocalCacheSizeInBytes int `envconfig:"LOCAL_CACHE_SIZE_IN_BYTES" default:"0"` + Port int `envconfig:"PORT" default:"8080"` + GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` + DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` + UseStatsd bool `envconfig:"USE_STATSD" default:"true"` + StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` + StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` + RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` + RuntimeSubdirectory string `envconfig:"RUNTIME_SUBDIRECTORY"` + RuntimeIgnoreDotFiles bool `envconfig:"RUNTIME_IGNOREDOTFILES" default:"false"` + LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` + RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` + RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` + RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` + RedisAuth string `envconfig:"REDIS_AUTH" default:""` + RedisTls bool `envconfig:"REDIS_TLS" default:"false"` + RedisPipelineWindow time.Duration `envconfig:"REDIS_PIPELINE_WINDOW" default:"75µs"` + RedisPipelineLimit int `envconfig:"REDIS_PIPELINE_LIMIT" default:"8"` + RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"` + RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"` + RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"` + RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` + RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""` + RedisPerSecondTls bool `envconfig:"REDIS_PERSECOND_TLS" default:"false"` + RedisPerSecondPipelineWindow time.Duration `envconfig:"REDIS_PERSECOND_PIPELINE_WINDOW" default:"75µs"` + RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"8"` + ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"` + LocalCacheSizeInBytes int `envconfig:"LOCAL_CACHE_SIZE_IN_BYTES" default:"0"` } type Option func(*Settings) diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index e4ddc17b8..044b55ec9 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -9,7 +9,7 @@ import ( ratelimit "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" - gostats "github.com/lyft/gostats" + stats "github.com/lyft/gostats" reflect "reflect" ) @@ -38,6 +38,7 @@ func (m *MockRateLimitConfig) EXPECT() *MockRateLimitConfigMockRecorder { // Dump mocks base method func (m *MockRateLimitConfig) Dump() string { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Dump") ret0, _ := ret[0].(string) return ret0 @@ -45,11 +46,13 @@ func (m *MockRateLimitConfig) Dump() string { // Dump indicates an expected call of Dump func (mr *MockRateLimitConfigMockRecorder) Dump() *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dump", reflect.TypeOf((*MockRateLimitConfig)(nil).Dump)) } // GetLimit mocks base method func (m *MockRateLimitConfig) GetLimit(arg0 context.Context, arg1 string, arg2 *ratelimit.RateLimitDescriptor) *config.RateLimit { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLimit", arg0, arg1, arg2) ret0, _ := ret[0].(*config.RateLimit) return ret0 @@ -57,6 +60,7 @@ func (m *MockRateLimitConfig) GetLimit(arg0 context.Context, arg1 string, arg2 * // GetLimit indicates an expected call of GetLimit func (mr *MockRateLimitConfigMockRecorder) GetLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLimit", reflect.TypeOf((*MockRateLimitConfig)(nil).GetLimit), arg0, arg1, arg2) } @@ -84,7 +88,8 @@ func (m *MockRateLimitConfigLoader) EXPECT() *MockRateLimitConfigLoaderMockRecor } // Load mocks base method -func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 gostats.Scope) config.RateLimitConfig { +func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats.Scope) config.RateLimitConfig { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Load", arg0, arg1) ret0, _ := ret[0].(config.RateLimitConfig) return ret0 @@ -92,5 +97,6 @@ func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, ar // Load indicates an expected call of Load func (mr *MockRateLimitConfigLoaderMockRecorder) Load(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockRateLimitConfigLoader)(nil).Load), arg0, arg1) } diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go index e0ce8288a..98d5a4a97 100644 --- a/test/mocks/mocks.go +++ b/test/mocks/mocks.go @@ -3,4 +3,4 @@ package mocks //go:generate go run github.com/golang/mock/mockgen -destination ./runtime/snapshot/snapshot.go github.com/lyft/goruntime/snapshot IFace //go:generate go run github.com/golang/mock/mockgen -destination ./runtime/loader/loader.go github.com/lyft/goruntime/loader IFace //go:generate go run github.com/golang/mock/mockgen -destination ./config/config.go github.com/envoyproxy/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader -//go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource +//go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis RateLimitCache,Client,TimeSource,JitterRandSource diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/redis.go index df24212a2..be5f5a34f 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/redis.go @@ -1,227 +1,206 @@ -// Automatically generated by MockGen. DO NOT EDIT! -// Source: github.com/envoyproxy/ratelimit/src/redis (interfaces: RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource) +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/envoyproxy/ratelimit/src/redis (interfaces: RateLimitCache,Client,TimeSource,JitterRandSource) +// Package mock_redis is a generated GoMock package. package mock_redis import ( - ratelimit "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + context "context" + v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" config "github.com/envoyproxy/ratelimit/src/config" - redis "github.com/envoyproxy/ratelimit/src/redis" gomock "github.com/golang/mock/gomock" - context "golang.org/x/net/context" + reflect "reflect" ) -// Mock of RateLimitCache interface +// MockRateLimitCache is a mock of RateLimitCache interface type MockRateLimitCache struct { ctrl *gomock.Controller - recorder *_MockRateLimitCacheRecorder + recorder *MockRateLimitCacheMockRecorder } -// Recorder for MockRateLimitCache (not exported) -type _MockRateLimitCacheRecorder struct { +// MockRateLimitCacheMockRecorder is the mock recorder for MockRateLimitCache +type MockRateLimitCacheMockRecorder struct { mock *MockRateLimitCache } +// NewMockRateLimitCache creates a new mock instance func NewMockRateLimitCache(ctrl *gomock.Controller) *MockRateLimitCache { mock := &MockRateLimitCache{ctrl: ctrl} - mock.recorder = &_MockRateLimitCacheRecorder{mock} + mock.recorder = &MockRateLimitCacheMockRecorder{mock} return mock } -func (_m *MockRateLimitCache) EXPECT() *_MockRateLimitCacheRecorder { - return _m.recorder +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRateLimitCache) EXPECT() *MockRateLimitCacheMockRecorder { + return m.recorder } -func (_m *MockRateLimitCache) DoLimit(_param0 context.Context, _param1 *ratelimit.RateLimitRequest, _param2 []*config.RateLimit) []*ratelimit.RateLimitResponse_DescriptorStatus { - ret := _m.ctrl.Call(_m, "DoLimit", _param0, _param1, _param2) - ret0, _ := ret[0].([]*ratelimit.RateLimitResponse_DescriptorStatus) +// DoLimit mocks base method +func (m *MockRateLimitCache) DoLimit(arg0 context.Context, arg1 *v2.RateLimitRequest, arg2 []*config.RateLimit) []*v2.RateLimitResponse_DescriptorStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DoLimit", arg0, arg1, arg2) + ret0, _ := ret[0].([]*v2.RateLimitResponse_DescriptorStatus) return ret0 } -func (_mr *_MockRateLimitCacheRecorder) DoLimit(arg0, arg1, arg2 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "DoLimit", arg0, arg1, arg2) +// DoLimit indicates an expected call of DoLimit +func (mr *MockRateLimitCacheMockRecorder) DoLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), arg0, arg1, arg2) } -// Mock of Pool interface -type MockPool struct { +// MockClient is a mock of Client interface +type MockClient struct { ctrl *gomock.Controller - recorder *_MockPoolRecorder + recorder *MockClientMockRecorder } -// Recorder for MockPool (not exported) -type _MockPoolRecorder struct { - mock *MockPool +// MockClientMockRecorder is the mock recorder for MockClient +type MockClientMockRecorder struct { + mock *MockClient } -func NewMockPool(ctrl *gomock.Controller) *MockPool { - mock := &MockPool{ctrl: ctrl} - mock.recorder = &_MockPoolRecorder{mock} +// NewMockClient creates a new mock instance +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} return mock } -func (_m *MockPool) EXPECT() *_MockPoolRecorder { - return _m.recorder +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder } -func (_m *MockPool) Get() redis.Connection { - ret := _m.ctrl.Call(_m, "Get") - ret0, _ := ret[0].(redis.Connection) +// Close mocks base method +func (m *MockClient) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) return ret0 } -func (_mr *_MockPoolRecorder) Get() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Get") +// Close indicates an expected call of Close +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) } -func (_m *MockPool) Put(_param0 redis.Connection) { - _m.ctrl.Call(_m, "Put", _param0) -} - -func (_mr *_MockPoolRecorder) Put(arg0 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Put", arg0) -} - -// Mock of Connection interface -type MockConnection struct { - ctrl *gomock.Controller - recorder *_MockConnectionRecorder -} - -// Recorder for MockConnection (not exported) -type _MockConnectionRecorder struct { - mock *MockConnection -} - -func NewMockConnection(ctrl *gomock.Controller) *MockConnection { - mock := &MockConnection{ctrl: ctrl} - mock.recorder = &_MockConnectionRecorder{mock} - return mock -} - -func (_m *MockConnection) EXPECT() *_MockConnectionRecorder { - return _m.recorder -} - -func (_m *MockConnection) PipeAppend(_param0 string, _param1 ...interface{}) { - _s := []interface{}{_param0} - for _, _x := range _param1 { - _s = append(_s, _x) +// DoCmd mocks base method +func (m *MockClient) DoCmd(arg0 interface{}, arg1, arg2 string, arg3 ...interface{}) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) } - _m.ctrl.Call(_m, "PipeAppend", _s...) -} - -func (_mr *_MockConnectionRecorder) PipeAppend(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0}, arg1...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "PipeAppend", _s...) -} - -func (_m *MockConnection) PipeResponse() redis.Response { - ret := _m.ctrl.Call(_m, "PipeResponse") - ret0, _ := ret[0].(redis.Response) + ret := m.ctrl.Call(m, "DoCmd", varargs...) + ret0, _ := ret[0].(error) return ret0 } -func (_mr *_MockConnectionRecorder) PipeResponse() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "PipeResponse") -} - -// Mock of Response interface -type MockResponse struct { - ctrl *gomock.Controller - recorder *_MockResponseRecorder -} - -// Recorder for MockResponse (not exported) -type _MockResponseRecorder struct { - mock *MockResponse -} - -func NewMockResponse(ctrl *gomock.Controller) *MockResponse { - mock := &MockResponse{ctrl: ctrl} - mock.recorder = &_MockResponseRecorder{mock} - return mock +// DoCmd indicates an expected call of DoCmd +func (mr *MockClientMockRecorder) DoCmd(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoCmd", reflect.TypeOf((*MockClient)(nil).DoCmd), varargs...) } -func (_m *MockResponse) EXPECT() *_MockResponseRecorder { - return _m.recorder -} - -func (_m *MockResponse) Int() int64 { - ret := _m.ctrl.Call(_m, "Int") - ret0, _ := ret[0].(int64) +// NumActiveConns mocks base method +func (m *MockClient) NumActiveConns() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NumActiveConns") + ret0, _ := ret[0].(int) return ret0 } -func (_mr *_MockResponseRecorder) Int() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Int") +// NumActiveConns indicates an expected call of NumActiveConns +func (mr *MockClientMockRecorder) NumActiveConns() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveConns", reflect.TypeOf((*MockClient)(nil).NumActiveConns)) } -// Mock of TimeSource interface +// MockTimeSource is a mock of TimeSource interface type MockTimeSource struct { ctrl *gomock.Controller - recorder *_MockTimeSourceRecorder + recorder *MockTimeSourceMockRecorder } -// Recorder for MockTimeSource (not exported) -type _MockTimeSourceRecorder struct { +// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource +type MockTimeSourceMockRecorder struct { mock *MockTimeSource } +// NewMockTimeSource creates a new mock instance func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { mock := &MockTimeSource{ctrl: ctrl} - mock.recorder = &_MockTimeSourceRecorder{mock} + mock.recorder = &MockTimeSourceMockRecorder{mock} return mock } -func (_m *MockTimeSource) EXPECT() *_MockTimeSourceRecorder { - return _m.recorder +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { + return m.recorder } -func (_m *MockTimeSource) UnixNow() int64 { - ret := _m.ctrl.Call(_m, "UnixNow") +// UnixNow mocks base method +func (m *MockTimeSource) UnixNow() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnixNow") ret0, _ := ret[0].(int64) return ret0 } -func (_mr *_MockTimeSourceRecorder) UnixNow() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "UnixNow") +// UnixNow indicates an expected call of UnixNow +func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) } -// Mock of JitterRandSource interface +// MockJitterRandSource is a mock of JitterRandSource interface type MockJitterRandSource struct { ctrl *gomock.Controller - recorder *_MockJitterRandSourceRecorder + recorder *MockJitterRandSourceMockRecorder } -// Recorder for MockJitterRandSource (not exported) -type _MockJitterRandSourceRecorder struct { +// MockJitterRandSourceMockRecorder is the mock recorder for MockJitterRandSource +type MockJitterRandSourceMockRecorder struct { mock *MockJitterRandSource } +// NewMockJitterRandSource creates a new mock instance func NewMockJitterRandSource(ctrl *gomock.Controller) *MockJitterRandSource { mock := &MockJitterRandSource{ctrl: ctrl} - mock.recorder = &_MockJitterRandSourceRecorder{mock} + mock.recorder = &MockJitterRandSourceMockRecorder{mock} return mock } -func (_m *MockJitterRandSource) EXPECT() *_MockJitterRandSourceRecorder { - return _m.recorder +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockJitterRandSource) EXPECT() *MockJitterRandSourceMockRecorder { + return m.recorder } -func (_m *MockJitterRandSource) Int63() int64 { - ret := _m.ctrl.Call(_m, "Int63") +// Int63 mocks base method +func (m *MockJitterRandSource) Int63() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Int63") ret0, _ := ret[0].(int64) return ret0 } -func (_mr *_MockJitterRandSourceRecorder) Int63() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Int63") +// Int63 indicates an expected call of Int63 +func (mr *MockJitterRandSourceMockRecorder) Int63() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int63", reflect.TypeOf((*MockJitterRandSource)(nil).Int63)) } -func (_m *MockJitterRandSource) Seed(_param0 int64) { - _m.ctrl.Call(_m, "Seed", _param0) +// Seed mocks base method +func (m *MockJitterRandSource) Seed(arg0 int64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Seed", arg0) } -func (_mr *_MockJitterRandSourceRecorder) Seed(arg0 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Seed", arg0) +// Seed indicates an expected call of Seed +func (mr *MockJitterRandSourceMockRecorder) Seed(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seed", reflect.TypeOf((*MockJitterRandSource)(nil).Seed), arg0) } diff --git a/test/mocks/runtime/loader/loader.go b/test/mocks/runtime/loader/loader.go index 16bbc1099..da00c6498 100644 --- a/test/mocks/runtime/loader/loader.go +++ b/test/mocks/runtime/loader/loader.go @@ -35,16 +35,19 @@ func (m *MockIFace) EXPECT() *MockIFaceMockRecorder { // AddUpdateCallback mocks base method func (m *MockIFace) AddUpdateCallback(arg0 chan<- int) { + m.ctrl.T.Helper() m.ctrl.Call(m, "AddUpdateCallback", arg0) } // AddUpdateCallback indicates an expected call of AddUpdateCallback func (mr *MockIFaceMockRecorder) AddUpdateCallback(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUpdateCallback", reflect.TypeOf((*MockIFace)(nil).AddUpdateCallback), arg0) } // Snapshot mocks base method func (m *MockIFace) Snapshot() snapshot.IFace { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Snapshot") ret0, _ := ret[0].(snapshot.IFace) return ret0 @@ -52,5 +55,6 @@ func (m *MockIFace) Snapshot() snapshot.IFace { // Snapshot indicates an expected call of Snapshot func (mr *MockIFaceMockRecorder) Snapshot() *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockIFace)(nil).Snapshot)) } diff --git a/test/mocks/runtime/snapshot/snapshot.go b/test/mocks/runtime/snapshot/snapshot.go index 432e34693..a56fe5a5b 100644 --- a/test/mocks/runtime/snapshot/snapshot.go +++ b/test/mocks/runtime/snapshot/snapshot.go @@ -36,6 +36,7 @@ func (m *MockIFace) EXPECT() *MockIFaceMockRecorder { // Entries mocks base method func (m *MockIFace) Entries() map[string]*entry.Entry { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Entries") ret0, _ := ret[0].(map[string]*entry.Entry) return ret0 @@ -43,11 +44,13 @@ func (m *MockIFace) Entries() map[string]*entry.Entry { // Entries indicates an expected call of Entries func (mr *MockIFaceMockRecorder) Entries() *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Entries", reflect.TypeOf((*MockIFace)(nil).Entries)) } // FeatureEnabled mocks base method func (m *MockIFace) FeatureEnabled(arg0 string, arg1 uint64) bool { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FeatureEnabled", arg0, arg1) ret0, _ := ret[0].(bool) return ret0 @@ -55,11 +58,13 @@ func (m *MockIFace) FeatureEnabled(arg0 string, arg1 uint64) bool { // FeatureEnabled indicates an expected call of FeatureEnabled func (mr *MockIFaceMockRecorder) FeatureEnabled(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FeatureEnabled", reflect.TypeOf((*MockIFace)(nil).FeatureEnabled), arg0, arg1) } // FeatureEnabledForID mocks base method func (m *MockIFace) FeatureEnabledForID(arg0 string, arg1 uint64, arg2 uint32) bool { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FeatureEnabledForID", arg0, arg1, arg2) ret0, _ := ret[0].(bool) return ret0 @@ -67,11 +72,13 @@ func (m *MockIFace) FeatureEnabledForID(arg0 string, arg1 uint64, arg2 uint32) b // FeatureEnabledForID indicates an expected call of FeatureEnabledForID func (mr *MockIFaceMockRecorder) FeatureEnabledForID(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FeatureEnabledForID", reflect.TypeOf((*MockIFace)(nil).FeatureEnabledForID), arg0, arg1, arg2) } // Get mocks base method func (m *MockIFace) Get(arg0 string) string { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0) ret0, _ := ret[0].(string) return ret0 @@ -79,11 +86,13 @@ func (m *MockIFace) Get(arg0 string) string { // Get indicates an expected call of Get func (mr *MockIFaceMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockIFace)(nil).Get), arg0) } // GetInteger mocks base method func (m *MockIFace) GetInteger(arg0 string, arg1 uint64) uint64 { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetInteger", arg0, arg1) ret0, _ := ret[0].(uint64) return ret0 @@ -91,11 +100,13 @@ func (m *MockIFace) GetInteger(arg0 string, arg1 uint64) uint64 { // GetInteger indicates an expected call of GetInteger func (mr *MockIFaceMockRecorder) GetInteger(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInteger", reflect.TypeOf((*MockIFace)(nil).GetInteger), arg0, arg1) } // GetModified mocks base method func (m *MockIFace) GetModified(arg0 string) time.Time { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetModified", arg0) ret0, _ := ret[0].(time.Time) return ret0 @@ -103,11 +114,13 @@ func (m *MockIFace) GetModified(arg0 string) time.Time { // GetModified indicates an expected call of GetModified func (mr *MockIFaceMockRecorder) GetModified(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetModified", reflect.TypeOf((*MockIFace)(nil).GetModified), arg0) } // Keys mocks base method func (m *MockIFace) Keys() []string { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Keys") ret0, _ := ret[0].([]string) return ret0 @@ -115,15 +128,18 @@ func (m *MockIFace) Keys() []string { // Keys indicates an expected call of Keys func (mr *MockIFaceMockRecorder) Keys() *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Keys", reflect.TypeOf((*MockIFace)(nil).Keys)) } // SetEntry mocks base method func (m *MockIFace) SetEntry(arg0 string, arg1 *entry.Entry) { + m.ctrl.T.Helper() m.ctrl.Call(m, "SetEntry", arg0, arg1) } // SetEntry indicates an expected call of SetEntry func (mr *MockIFaceMockRecorder) SetEntry(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEntry", reflect.TypeOf((*MockIFace)(nil).SetEntry), arg0, arg1) } diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go new file mode 100644 index 000000000..3463f25a0 --- /dev/null +++ b/test/redis/bench_test.go @@ -0,0 +1,93 @@ +package redis_test + +import ( + "context" + "runtime" + "testing" + "time" + + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" + stats "github.com/lyft/gostats" + + "math/rand" + + "github.com/envoyproxy/ratelimit/test/common" +) + +func BenchmarkParallelDoLimit(b *testing.B) { + b.Skip("Skip benchmark") + + b.ReportAllocs() + + // See https://github.com/mediocregopher/radix/blob/v3.5.1/bench/bench_test.go#L176 + parallel := runtime.GOMAXPROCS(0) + poolSize := parallel * runtime.GOMAXPROCS(0) + + do := func(b *testing.B, fn func() error) { + b.ResetTimer() + b.SetParallelism(parallel) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := fn(); err != nil { + b.Fatal(err) + } + } + }) + } + + mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int) func(*testing.B) { + return func(b *testing.B) { + statsStore := stats.NewStore(stats.NewNullSink(), false) + client := redis.NewClientImpl(statsStore, false, "", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) + defer client.Close() + + cache := redis.NewRateLimitCacheImpl(client, nil, redis.NewTimeSourceImpl(), rand.New(redis.NewLockedSource(time.Now().Unix())), 10, nil) + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + // wait for the pool to fill up + for { + time.Sleep(50 * time.Millisecond) + if client.NumActiveConns() >= poolSize { + break + } + } + + b.ResetTimer() + + do(b, func() error { + cache.DoLimit(context.Background(), request, limits) + return nil + }) + } + } + + b.Run("no pipeline", mkDoLimitBench(0, 0)) + + b.Run("pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1)) + b.Run("pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1)) + b.Run("pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1)) + b.Run("pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1)) + + b.Run("pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2)) + b.Run("pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2)) + b.Run("pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2)) + b.Run("pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2)) + + b.Run("pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4)) + b.Run("pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4)) + b.Run("pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4)) + b.Run("pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4)) + + b.Run("pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8)) + b.Run("pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8)) + b.Run("pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8)) + b.Run("pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8)) + + b.Run("pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16)) + b.Run("pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16)) + b.Run("pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16)) + b.Run("pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16)) +} diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index d4f94fc97..e0807689b 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -2,6 +2,7 @@ package redis_test import ( "testing" + "time" "github.com/coocood/freecache" @@ -12,6 +13,7 @@ import ( "math/rand" + "github.com/alicebob/miniredis/v2" "github.com/envoyproxy/ratelimit/test/common" mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" "github.com/golang/mock/gomock" @@ -29,40 +31,27 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { controller := gomock.NewController(t) defer controller.Finish() - pool := mock_redis.NewMockPool(controller) - perSecondPool := mock_redis.NewMockPool(controller) + client := mock_redis.NewMockClient(controller) + perSecondClient := mock_redis.NewMockClient(controller) timeSource := mock_redis.NewMockTimeSource(controller) - connection := mock_redis.NewMockConnection(controller) - perSecondConnection := mock_redis.NewMockConnection(controller) - response := mock_redis.NewMockResponse(controller) var cache redis.RateLimitCache if usePerSecondRedis { - cache = redis.NewRateLimitCacheImpl(pool, perSecondPool, timeSource, rand.New(rand.NewSource(1)), 0, nil) + cache = redis.NewRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil) } else { - cache = redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) + cache = redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) } statsStore := stats.NewStore(stats.NewNullSink(), false) - if usePerSecondRedis { - perSecondPool.EXPECT().Get().Return(perSecondConnection) - } - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - var connUsed *mock_redis.MockConnection + var clientUsed *mock_redis.MockClient if usePerSecondRedis { - connUsed = perSecondConnection + clientUsed = perSecondClient } else { - connUsed = connection + clientUsed = client } - connUsed.EXPECT().PipeAppend("INCRBY", "domain_key_value_1234", uint32(1)) - connUsed.EXPECT().PipeAppend("EXPIRE", "domain_key_value_1234", int64(1)) - connUsed.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(5)) - connUsed.EXPECT().PipeResponse() - if usePerSecondRedis { - perSecondPool.EXPECT().Put(perSecondConnection) - } - pool.EXPECT().Put(connection) + + clientUsed.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(0, uint32(5)) + clientUsed.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -74,21 +63,11 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - if usePerSecondRedis { - perSecondPool.EXPECT().Get().Return(perSecondConnection) - } - pool.EXPECT().Get().Return(connection) + clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint32(1)) - connection.EXPECT().PipeAppend( + clientUsed.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint32(1)).SetArg(0, uint32(11)) + clientUsed.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key2_value2_subkey2_subvalue2_1200", int64(60)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(11)) - connection.EXPECT().PipeResponse() - if usePerSecondRedis { - perSecondPool.EXPECT().Put(perSecondConnection) - } - pool.EXPECT().Put(connection) request = common.NewRateLimitRequest( "domain", @@ -107,27 +86,14 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) - if usePerSecondRedis { - perSecondPool.EXPECT().Get().Return(perSecondConnection) - } - pool.EXPECT().Get().Return(connection) + clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key3_value3_997200", uint32(1)) - connection.EXPECT().PipeAppend( + clientUsed.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key3_value3_997200", uint32(1)).SetArg(0, uint32(11)) + clientUsed.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key3_value3_997200", int64(3600)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint32(1)) - connection.EXPECT().PipeAppend( + clientUsed.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint32(1)).SetArg(0, uint32(13)) + clientUsed.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key3_value3_subkey3_subvalue3_950400", int64(86400)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(11)) - connection.EXPECT().PipeResponse() - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(13)) - connection.EXPECT().PipeResponse() - if usePerSecondRedis { - perSecondPool.EXPECT().Put(perSecondConnection) - } - pool.EXPECT().Put(connection) request = common.NewRateLimitRequest( "domain", @@ -193,26 +159,19 @@ func TestOverLimitWithLocalCache(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - pool := mock_redis.NewMockPool(controller) + client := mock_redis.NewMockClient(controller) timeSource := mock_redis.NewMockTimeSource(controller) - connection := mock_redis.NewMockConnection(controller) - response := mock_redis.NewMockResponse(controller) localCache := freecache.NewCache(100) - cache := redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache) + cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) localCacheStats := redis.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(11)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(11)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) @@ -232,15 +191,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) // Test Near Limit Stats. At Near Limit Ratio, still OK - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(13)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(13)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -255,15 +209,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) // Test Over limit stats - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(16)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(16)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -278,14 +227,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) // Test Over limit stats with local cache - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) - connection.EXPECT().PipeAppend( + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) - connection.EXPECT().PipeResponse().Times(0) - response.EXPECT().Int().Times(0) - pool.EXPECT().Put(connection) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}}, @@ -304,23 +249,16 @@ func TestNearLimit(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - pool := mock_redis.NewMockPool(controller) + client := mock_redis.NewMockClient(controller) timeSource := mock_redis.NewMockTimeSource(controller) - connection := mock_redis.NewMockConnection(controller) - response := mock_redis.NewMockResponse(controller) - cache := redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) + cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) statsStore := stats.NewStore(stats.NewNullSink(), false) // Test Near Limit Stats. Under Near Limit Ratio - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(11)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(11)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) @@ -336,15 +274,10 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // Test Near Limit Stats. At Near Limit Ratio, still OK - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(13)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(13)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -356,15 +289,10 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases // when we are near limit, not after we have passed the limit. - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(16)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(16)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -376,14 +304,9 @@ func TestNearLimit(t *testing.T) { // Now test hitsAddend that is greater than 1 // All of it under limit, under near limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key5_value5_1234", uint32(3)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key5_value5_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(5)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key5_value5_1234", uint32(3)).SetArg(0, uint32(5)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key5_value5_1234", int64(1)) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} @@ -396,14 +319,9 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // All of it under limit, some over near limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key6_value6_1234", uint32(2)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key6_value6_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(7)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key6_value6_1234", uint32(2)).SetArg(0, uint32(7)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key6_value6_1234", int64(1)) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} @@ -416,14 +334,9 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) // All of it under limit, all of it over near limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key7_value7_1234", uint32(3)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key7_value7_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(19)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key7_value7_1234", uint32(3)).SetArg(0, uint32(19)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key7_value7_1234", int64(1)) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} @@ -436,14 +349,9 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(3), limits[0].Stats.NearLimit.Value()) // Some of it over limit, all of it over near limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key8_value8_1234", uint32(3)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key8_value8_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(22)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key8_value8_1234", uint32(3)).SetArg(0, uint32(22)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key8_value8_1234", int64(1)) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} @@ -456,14 +364,9 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) // Some of it in all three places - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key9_value9_1234", uint32(7)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key9_value9_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(22)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key9_value9_1234", uint32(7)).SetArg(0, uint32(22)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key9_value9_1234", int64(1)) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} @@ -476,14 +379,9 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(4), limits[0].Stats.NearLimit.Value()) // all of it over limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key10_value10_1234", uint32(3)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key10_value10_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(30)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key10_value10_1234", uint32(3)).SetArg(0, uint32(30)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key10_value10_1234", int64(1)) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} @@ -501,23 +399,16 @@ func TestRedisWithJitter(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - pool := mock_redis.NewMockPool(controller) + client := mock_redis.NewMockClient(controller) timeSource := mock_redis.NewMockTimeSource(controller) - connection := mock_redis.NewMockConnection(controller) - response := mock_redis.NewMockResponse(controller) jitterSource := mock_redis.NewMockJitterRandSource(controller) - cache := redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(jitterSource), 3600, nil) + cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil) statsStore := stats.NewStore(stats.NewNullSink(), false) - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) jitterSource.EXPECT().Int63().Return(int64(100)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key_value_1234", uint32(1)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key_value_1234", int64(101)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(5)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(0, uint32(5)) + client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(101)) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -529,3 +420,106 @@ func TestRedisWithJitter(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) } + +func mustNewRedisServer() *miniredis.Miniredis { + srv, err := miniredis.Run() + if err != nil { + panic(err) + } + + return srv +} + +func TestNewClientImpl(t *testing.T) { + redisAuth := "123" + statsStore := stats.NewStore(stats.NewNullSink(), false) + + mkRedisClient := func(auth, addr string) redis.Client { + return redis.NewClientImpl(statsStore, false, auth, addr, 1, 1*time.Millisecond, 1) + } + + t.Run("connection refused", func(t *testing.T) { + assert.PanicsWithError(t, "dial tcp 127.0.0.1:12345: connect: connection refused", func() { + // It's possible there is a redis server listening on 6379 in ci environment, so + // use a random port. + mkRedisClient("", "localhost:12345") + }) + }) + + t.Run("ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + var client redis.Client + assert.NotPanics(t, func() { + client = mkRedisClient("", redisSrv.Addr()) + }) + assert.NotNil(t, client) + }) + + t.Run("auth fail", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + redisSrv.RequireAuth(redisAuth) + + assert.PanicsWithError(t, "NOAUTH Authentication required.", func() { + mkRedisClient("", redisSrv.Addr()) + }) + }) + + t.Run("auth pass", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + redisSrv.RequireAuth(redisAuth) + + assert.NotPanics(t, func() { + mkRedisClient(redisAuth, redisSrv.Addr()) + }) + }) +} + +func TestDoCmd(t *testing.T) { + statsStore := stats.NewStore(stats.NewNullSink(), false) + + mkRedisClient := func(addr string) redis.Client { + return redis.NewClientImpl(statsStore, false, "", addr, 1, 0, 0) + } + + t.Run("SETGET ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient(redisSrv.Addr()) + var res string + + assert.Nil(t, client.DoCmd(nil, "SET", "foo", "bar")) + assert.Nil(t, client.DoCmd(&res, "GET", "foo")) + assert.Equal(t, "bar", res) + }) + + t.Run("INCRBY ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient(redisSrv.Addr()) + var res uint32 + hits := uint32(1) + + assert.Nil(t, client.DoCmd(&res, "INCRBY", "a", hits)) + assert.Equal(t, hits, res) + assert.Nil(t, client.DoCmd(&res, "INCRBY", "a", hits)) + assert.Equal(t, uint32(2), res) + }) + + t.Run("connection broken", func(t *testing.T) { + redisSrv := mustNewRedisServer() + client := mkRedisClient(redisSrv.Addr()) + + assert.Nil(t, client.DoCmd(nil, "SET", "foo", "bar")) + + redisSrv.Close() + assert.EqualError(t, client.DoCmd(nil, "GET", "foo"), "EOF") + }) +} From adc8a04e7ba6071369a66cb0d5041ca83e8c5220 Mon Sep 17 00:00:00 2001 From: David Weitzman Date: Fri, 29 May 2020 07:45:05 -0700 Subject: [PATCH 08/41] cache_impl_test.go: fix failing test with ipv6 (#144) A newly-added test in #137 checks the exact text of an error message which seems to vary when the network is tcp4 vs tcp6. This change relaxes the assertion to look for "connection refused" in a panic without making assumptions about what an IP address looks like. Example failure: --- FAIL: TestNewClientImpl (0.00s) --- FAIL: TestNewClientImpl/connection_refused (0.00s) cache_impl_test.go:442: Error Trace: cache_impl_test.go:442 Error: func (assert.PanicTestFunc)(0x1724110) should panic with error message: "dial tcp 127.0.0.1:12345: connect: connection refused" Panic value: "dial tcp [::1]:12345: connect: connection refused" Panic stack: goroutine 27 [running]: The testify assert package doesn't seem to support inexact matching on error messages, so the code gets a bit uglier than before. Signed-off-by: David Weitzman Signed-off-by: Diego Erdody --- test/redis/cache_impl_test.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index e0807689b..34339e292 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -430,6 +430,17 @@ func mustNewRedisServer() *miniredis.Miniredis { return srv } +func expectPanicError(t *testing.T, f assert.PanicTestFunc) (result error) { + t.Helper() + defer func() { + panicResult := recover() + assert.NotNil(t, panicResult, "Expected a panic") + result = panicResult.(error) + }() + f() + return +} + func TestNewClientImpl(t *testing.T) { redisAuth := "123" statsStore := stats.NewStore(stats.NewNullSink(), false) @@ -439,11 +450,10 @@ func TestNewClientImpl(t *testing.T) { } t.Run("connection refused", func(t *testing.T) { - assert.PanicsWithError(t, "dial tcp 127.0.0.1:12345: connect: connection refused", func() { - // It's possible there is a redis server listening on 6379 in ci environment, so - // use a random port. - mkRedisClient("", "localhost:12345") - }) + // It's possible there is a redis server listening on 6379 in ci environment, so + // use a random port. + panicErr := expectPanicError(t, func() { mkRedisClient("", "localhost:12345") }) + assert.Contains(t, panicErr.Error(), "connection refused") }) t.Run("ok", func(t *testing.T) { From 46fe2f190c4b3e876e11bb38e9981d5277b51c3f Mon Sep 17 00:00:00 2001 From: David Weitzman Date: Fri, 29 May 2020 07:48:15 -0700 Subject: [PATCH 09/41] Split redis-specific logic from generic key-value store logic (#142) This is a pure refactoring with no behavior changes. It's a step toward being able to add memcache as a backend (see #140). This PR moves RateLimitCache from the redis package to a new "limiter" package, along with code for time/jitter, local cache stats, and constructing cache keys. All that can be reused with memcache. After this PR, the redis package is imported in exactly two places: - in service_cmd/runner/runner.go to call redis.NewRateLimiterCacheImplFromSettings() - in service/ratelimit.go in ShouldRateLimit to identify if a recovered panic is a redis.RedisError. If so, a stat is incremented and the panic() propagation is ended and in favor of returning the error as a the function result. The PR also includes changes by goimports to test/service/ratelimit_test.go so that the difference between package name vs file path name is explicit instead of implicit. Signed-off-by: David Weitzman Signed-off-by: Diego Erdody --- src/{redis => limiter}/cache.go | 2 +- src/limiter/cache_key.go | 90 +++++++++++ src/{redis => limiter}/local_cache_stats.go | 2 +- src/limiter/time.go | 40 +++++ src/redis/cache_impl.go | 170 +++++--------------- src/server/server_impl.go | 5 +- src/service/ratelimit.go | 7 +- src/service_cmd/runner/runner.go | 24 +-- test/mocks/limiter/limiter.go | 136 ++++++++++++++++ test/mocks/mocks.go | 3 +- test/mocks/redis/redis.go | 128 +-------------- test/redis/bench_test.go | 3 +- test/redis/cache_impl_test.go | 16 +- test/service/ratelimit_test.go | 16 +- 14 files changed, 344 insertions(+), 298 deletions(-) rename src/{redis => limiter}/cache.go (98%) create mode 100644 src/limiter/cache_key.go rename src/{redis => limiter}/local_cache_stats.go (98%) create mode 100644 src/limiter/time.go create mode 100644 test/mocks/limiter/limiter.go diff --git a/src/redis/cache.go b/src/limiter/cache.go similarity index 98% rename from src/redis/cache.go rename to src/limiter/cache.go index b090cc995..2ca16956f 100644 --- a/src/redis/cache.go +++ b/src/limiter/cache.go @@ -1,4 +1,4 @@ -package redis +package limiter import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" diff --git a/src/limiter/cache_key.go b/src/limiter/cache_key.go new file mode 100644 index 000000000..65540fa22 --- /dev/null +++ b/src/limiter/cache_key.go @@ -0,0 +1,90 @@ +package limiter + +import ( + "bytes" + "strconv" + "sync" + + pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + "github.com/envoyproxy/ratelimit/src/config" +) + +type CacheKeyGenerator struct { + // bytes.Buffer pool used to efficiently generate cache keys. + bufferPool sync.Pool +} + +func NewCacheKeyGenerator() CacheKeyGenerator { + return CacheKeyGenerator{bufferPool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }} +} + +type CacheKey struct { + Key string + // True if the key corresponds to a limit with a SECOND unit. False otherwise. + PerSecond bool +} + +func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { + return unit == pb.RateLimitResponse_RateLimit_SECOND +} + +// Convert a rate limit into a time divider. +// @param unit supplies the unit to convert. +// @return the divider to use in time computations. +func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { + switch unit { + case pb.RateLimitResponse_RateLimit_SECOND: + return 1 + case pb.RateLimitResponse_RateLimit_MINUTE: + return 60 + case pb.RateLimitResponse_RateLimit_HOUR: + return 60 * 60 + case pb.RateLimitResponse_RateLimit_DAY: + return 60 * 60 * 24 + } + + panic("should not get here") +} + +// Generate a cache key for a limit lookup. +// @param domain supplies the cache key domain. +// @param descriptor supplies the descriptor to generate the key for. +// @param limit supplies the rate limit to generate the key for (may be nil). +// @param now supplies the current unix time. +// @return CacheKey struct. +func (this *CacheKeyGenerator) GenerateCacheKey( + domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) CacheKey { + + if limit == nil { + return CacheKey{ + Key: "", + PerSecond: false, + } + } + + b := this.bufferPool.Get().(*bytes.Buffer) + defer this.bufferPool.Put(b) + b.Reset() + + b.WriteString(domain) + b.WriteByte('_') + + for _, entry := range descriptor.Entries { + b.WriteString(entry.Key) + b.WriteByte('_') + b.WriteString(entry.Value) + b.WriteByte('_') + } + + divider := UnitToDivider(limit.Limit.Unit) + b.WriteString(strconv.FormatInt((now/divider)*divider, 10)) + + return CacheKey{ + Key: b.String(), + PerSecond: isPerSecondLimit(limit.Limit.Unit)} +} diff --git a/src/redis/local_cache_stats.go b/src/limiter/local_cache_stats.go similarity index 98% rename from src/redis/local_cache_stats.go rename to src/limiter/local_cache_stats.go index 60a94194f..d0d59dc27 100644 --- a/src/redis/local_cache_stats.go +++ b/src/limiter/local_cache_stats.go @@ -1,4 +1,4 @@ -package redis +package limiter import ( "github.com/coocood/freecache" diff --git a/src/limiter/time.go b/src/limiter/time.go new file mode 100644 index 000000000..e6a779e70 --- /dev/null +++ b/src/limiter/time.go @@ -0,0 +1,40 @@ +package limiter + +import ( + "math/rand" + "sync" + "time" +) + +type timeSourceImpl struct{} + +func NewTimeSourceImpl() TimeSource { + return &timeSourceImpl{} +} + +func (this *timeSourceImpl) UnixNow() int64 { + return time.Now().Unix() +} + +// rand for jitter. +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func NewLockedSource(seed int64) JitterRandSource { + return &lockedSource{src: rand.NewSource(seed)} +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 4e37d10f0..1beb0de43 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,18 +1,16 @@ package redis import ( - "bytes" "math" "math/rand" - "strconv" - "sync" - "time" "github.com/coocood/freecache" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/server" + "github.com/envoyproxy/ratelimit/src/settings" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -24,72 +22,11 @@ type rateLimitCacheImpl struct { // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. perSecondClient Client - timeSource TimeSource + timeSource limiter.TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 - // bytes.Buffer pool used to efficiently generate cache keys. - bufferPool sync.Pool - localCache *freecache.Cache -} - -// Convert a rate limit into a time divider. -// @param unit supplies the unit to convert. -// @return the divider to use in time computations. -func unitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { - switch unit { - case pb.RateLimitResponse_RateLimit_SECOND: - return 1 - case pb.RateLimitResponse_RateLimit_MINUTE: - return 60 - case pb.RateLimitResponse_RateLimit_HOUR: - return 60 * 60 - case pb.RateLimitResponse_RateLimit_DAY: - return 60 * 60 * 24 - } - - panic("should not get here") -} - -// Generate a cache key for a limit lookup. -// @param domain supplies the cache key domain. -// @param descriptor supplies the descriptor to generate the key for. -// @param limit supplies the rate limit to generate the key for (may be nil). -// @param now supplies the current unix time. -// @return cacheKey struct. -func (this *rateLimitCacheImpl) generateCacheKey( - domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) cacheKey { - - if limit == nil { - return cacheKey{ - key: "", - perSecond: false, - } - } - - b := this.bufferPool.Get().(*bytes.Buffer) - defer this.bufferPool.Put(b) - b.Reset() - - b.WriteString(domain) - b.WriteByte('_') - - for _, entry := range descriptor.Entries { - b.WriteString(entry.Key) - b.WriteByte('_') - b.WriteString(entry.Value) - b.WriteByte('_') - } - - divider := unitToDivider(limit.Limit.Unit) - b.WriteString(strconv.FormatInt((now/divider)*divider, 10)) - - return cacheKey{ - key: b.String(), - perSecond: isPerSecondLimit(limit.Limit.Unit)} -} - -func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { - return unit == pb.RateLimitResponse_RateLimit_SECOND + cacheKeyGenerator limiter.CacheKeyGenerator + localCache *freecache.Cache } func max(a uint32, b uint32) uint32 { @@ -99,12 +36,6 @@ func max(a uint32, b uint32) uint32 { return b } -type cacheKey struct { - key string - // True if the key corresponds to a limit with a SECOND unit. False otherwise. - perSecond bool -} - func pipelineAppend(client Client, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) (err error) { if err = client.DoCmd(result, "INCRBY", key, hitsAddend); err != nil { return @@ -125,14 +56,15 @@ func (this *rateLimitCacheImpl) DoLimit( // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. hitsAddend := max(1, request.HitsAddend) - // First build a list of all cache keys that we are actually going to hit. generateCacheKey() + // First build a list of all cache keys that we are actually going to hit. GenerateCacheKey() // returns an empty string in the key if there is no limit so that we can keep the arrays // all the same size. assert.Assert(len(request.Descriptors) == len(limits)) - cacheKeys := make([]cacheKey, len(request.Descriptors)) + cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) now := this.timeSource.UnixNow() for i := 0; i < len(request.Descriptors); i++ { - cacheKeys[i] = this.generateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) + cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey( + request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { @@ -146,34 +78,34 @@ func (this *rateLimitCacheImpl) DoLimit( // Now, actually setup the pipeline, skipping empty cache keys. for i, cacheKey := range cacheKeys { - if cacheKey.key == "" { + if cacheKey.Key == "" { continue } if this.localCache != nil { // Get returns the value or not found error. - _, err := this.localCache.Get([]byte(cacheKey.key)) + _, err := this.localCache.Get([]byte(cacheKey.Key)) if err == nil { isOverLimitWithLocalCache[i] = true - logger.Debugf("cache key is over the limit: %s", cacheKey.key) + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) continue } } - logger.Debugf("looking up cache key: %s", cacheKey.key) + logger.Debugf("looking up cache key: %s", cacheKey.Key) - expirationSeconds := unitToDivider(limits[i].Limit.Unit) + expirationSeconds := limiter.UnitToDivider(limits[i].Limit.Unit) if this.expirationJitterMaxSeconds > 0 { expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) } // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. - if this.perSecondClient != nil && cacheKey.perSecond { - if err = pipelineAppend(this.perSecondClient, cacheKey.key, hitsAddend, &results[i], expirationSeconds); err != nil { + if this.perSecondClient != nil && cacheKey.PerSecond { + if err = pipelineAppend(this.perSecondClient, cacheKey.Key, hitsAddend, &results[i], expirationSeconds); err != nil { break } } else { - if err = pipelineAppend(this.client, cacheKey.key, hitsAddend, &results[i], expirationSeconds); err != nil { + if err = pipelineAppend(this.client, cacheKey.Key, hitsAddend, &results[i], expirationSeconds); err != nil { break } } @@ -184,7 +116,7 @@ func (this *rateLimitCacheImpl) DoLimit( responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) for i, cacheKey := range cacheKeys { - if cacheKey.key == "" { + if cacheKey.Key == "" { responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -213,7 +145,7 @@ func (this *rateLimitCacheImpl) DoLimit( // We need to know it in both the OK and OVER_LIMIT scenarios. nearLimitThreshold := uint32(math.Floor(float64(float32(overLimitThreshold) * config.NearLimitRatio))) - logger.Debugf("cache key: %s current: %d", cacheKey.key, limitAfterIncrease) + logger.Debugf("cache key: %s current: %d", cacheKey.Key, limitAfterIncrease) if limitAfterIncrease > overLimitThreshold { responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ @@ -244,9 +176,9 @@ func (this *rateLimitCacheImpl) DoLimit( // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. - err := this.localCache.Set([]byte(cacheKey.key), []byte{}, int(unitToDivider(limits[i].Limit.Unit))) + err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(limiter.UnitToDivider(limits[i].Limit.Unit))) if err != nil { - logger.Errorf("Failing to set local cache key: %s", cacheKey.key) + logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) } } } else { @@ -275,55 +207,33 @@ func (this *rateLimitCacheImpl) DoLimit( return responseDescriptorStatuses } -func NewRateLimitCacheImpl(client Client, perSecondClient Client, timeSource TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) RateLimitCache { +func NewRateLimitCacheImpl(client Client, perSecondClient Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) limiter.RateLimitCache { return &rateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, timeSource: timeSource, jitterRand: jitterRand, expirationJitterMaxSeconds: expirationJitterMaxSeconds, - bufferPool: newBufferPool(), + cacheKeyGenerator: limiter.NewCacheKeyGenerator(), localCache: localCache, } } -func newBufferPool() sync.Pool { - return sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache { + var perSecondPool Client + if s.RedisPerSecond { + perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, + s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) } -} - -type timeSourceImpl struct{} - -func NewTimeSourceImpl() TimeSource { - return &timeSourceImpl{} -} - -func (this *timeSourceImpl) UnixNow() int64 { - return time.Now().Unix() -} - -// rand for jitter. -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func NewLockedSource(seed int64) JitterRandSource { - return &lockedSource{src: rand.NewSource(seed)} -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() + var otherPool Client + otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisUrl, s.RedisPoolSize, + s.RedisPipelineWindow, s.RedisPipelineLimit) + + return NewRateLimitCacheImpl( + otherPool, + perSecondPool, + timeSource, + jitterRand, + expirationJitterMaxSeconds, + localCache) } diff --git a/src/server/server_impl.go b/src/server/server_impl.go index b0349594e..da92df840 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -9,8 +9,6 @@ import ( "net/http/pprof" "sort" - "github.com/envoyproxy/ratelimit/src/redis" - "os" "os/signal" "syscall" @@ -19,6 +17,7 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/settings" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" @@ -159,7 +158,7 @@ func newServer(name string, store stats.Store, localCache *freecache.Cache, opts ret.scope = ret.store.Scope(name) ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) if localCache != nil { - ret.store.AddStatGenerator(redis.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) + ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) } // setup runtime diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 3982a39ed..07a8c3132 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -7,9 +7,10 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/lyft/goruntime/loader" - "github.com/lyft/gostats" + stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -52,7 +53,7 @@ type service struct { configLoader config.RateLimitConfigLoader config config.RateLimitConfig runtimeUpdateEvent chan int - cache redis.RateLimitCache + cache limiter.RateLimitCache stats serviceStats rlStatsScope stats.Scope legacy *legacyService @@ -174,7 +175,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { return this.config } -func NewService(runtime loader.IFace, cache redis.RateLimitCache, +func NewService(runtime loader.IFace, cache limiter.RateLimitCache, configLoader config.RateLimitConfigLoader, stats stats.Scope) RateLimitServiceServer { newService := &service{ diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 471caadcc..5e43307ad 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -14,6 +14,7 @@ import ( pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/server" ratelimit "github.com/envoyproxy/ratelimit/src/service" @@ -49,24 +50,15 @@ func (runner *Runner) Run() { srv := server.NewServer("ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) - var perSecondPool redis.Client - if s.RedisPerSecond { - perSecondPool = redis.NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, - s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) - } - var otherPool redis.Client - otherPool = redis.NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisUrl, s.RedisPoolSize, - s.RedisPipelineWindow, s.RedisPipelineLimit) - service := ratelimit.NewService( srv.Runtime(), - redis.NewRateLimitCacheImpl( - otherPool, - perSecondPool, - redis.NewTimeSourceImpl(), - rand.New(redis.NewLockedSource(time.Now().Unix())), - s.ExpirationJitterMaxSeconds, - localCache), + redis.NewRateLimiterCacheImplFromSettings( + s, + localCache, + srv, + limiter.NewTimeSourceImpl(), + rand.New(limiter.NewLockedSource(time.Now().Unix())), + s.ExpirationJitterMaxSeconds), config.NewRateLimitConfigLoaderImpl(), srv.Scope().Scope("service")) diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go new file mode 100644 index 000000000..f5c9f8bfa --- /dev/null +++ b/test/mocks/limiter/limiter.go @@ -0,0 +1,136 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/envoyproxy/ratelimit/src/limiter (interfaces: RateLimitCache,TimeSource,JitterRandSource) + +// Package mock_limiter is a generated GoMock package. +package mock_limiter + +import ( + context "context" + v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + config "github.com/envoyproxy/ratelimit/src/config" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockRateLimitCache is a mock of RateLimitCache interface +type MockRateLimitCache struct { + ctrl *gomock.Controller + recorder *MockRateLimitCacheMockRecorder +} + +// MockRateLimitCacheMockRecorder is the mock recorder for MockRateLimitCache +type MockRateLimitCacheMockRecorder struct { + mock *MockRateLimitCache +} + +// NewMockRateLimitCache creates a new mock instance +func NewMockRateLimitCache(ctrl *gomock.Controller) *MockRateLimitCache { + mock := &MockRateLimitCache{ctrl: ctrl} + mock.recorder = &MockRateLimitCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRateLimitCache) EXPECT() *MockRateLimitCacheMockRecorder { + return m.recorder +} + +// DoLimit mocks base method +func (m *MockRateLimitCache) DoLimit(arg0 context.Context, arg1 *v2.RateLimitRequest, arg2 []*config.RateLimit) []*v2.RateLimitResponse_DescriptorStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DoLimit", arg0, arg1, arg2) + ret0, _ := ret[0].([]*v2.RateLimitResponse_DescriptorStatus) + return ret0 +} + +// DoLimit indicates an expected call of DoLimit +func (mr *MockRateLimitCacheMockRecorder) DoLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), arg0, arg1, arg2) +} + +// MockTimeSource is a mock of TimeSource interface +type MockTimeSource struct { + ctrl *gomock.Controller + recorder *MockTimeSourceMockRecorder +} + +// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource +type MockTimeSourceMockRecorder struct { + mock *MockTimeSource +} + +// NewMockTimeSource creates a new mock instance +func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { + mock := &MockTimeSource{ctrl: ctrl} + mock.recorder = &MockTimeSourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { + return m.recorder +} + +// UnixNow mocks base method +func (m *MockTimeSource) UnixNow() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnixNow") + ret0, _ := ret[0].(int64) + return ret0 +} + +// UnixNow indicates an expected call of UnixNow +func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) +} + +// MockJitterRandSource is a mock of JitterRandSource interface +type MockJitterRandSource struct { + ctrl *gomock.Controller + recorder *MockJitterRandSourceMockRecorder +} + +// MockJitterRandSourceMockRecorder is the mock recorder for MockJitterRandSource +type MockJitterRandSourceMockRecorder struct { + mock *MockJitterRandSource +} + +// NewMockJitterRandSource creates a new mock instance +func NewMockJitterRandSource(ctrl *gomock.Controller) *MockJitterRandSource { + mock := &MockJitterRandSource{ctrl: ctrl} + mock.recorder = &MockJitterRandSourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockJitterRandSource) EXPECT() *MockJitterRandSourceMockRecorder { + return m.recorder +} + +// Int63 mocks base method +func (m *MockJitterRandSource) Int63() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Int63") + ret0, _ := ret[0].(int64) + return ret0 +} + +// Int63 indicates an expected call of Int63 +func (mr *MockJitterRandSourceMockRecorder) Int63() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int63", reflect.TypeOf((*MockJitterRandSource)(nil).Int63)) +} + +// Seed mocks base method +func (m *MockJitterRandSource) Seed(arg0 int64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Seed", arg0) +} + +// Seed indicates an expected call of Seed +func (mr *MockJitterRandSourceMockRecorder) Seed(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seed", reflect.TypeOf((*MockJitterRandSource)(nil).Seed), arg0) +} diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go index 98d5a4a97..490d20977 100644 --- a/test/mocks/mocks.go +++ b/test/mocks/mocks.go @@ -3,4 +3,5 @@ package mocks //go:generate go run github.com/golang/mock/mockgen -destination ./runtime/snapshot/snapshot.go github.com/lyft/goruntime/snapshot IFace //go:generate go run github.com/golang/mock/mockgen -destination ./runtime/loader/loader.go github.com/lyft/goruntime/loader IFace //go:generate go run github.com/golang/mock/mockgen -destination ./config/config.go github.com/envoyproxy/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader -//go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis RateLimitCache,Client,TimeSource,JitterRandSource +//go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis Client +//go:generate go run github.com/golang/mock/mockgen -destination ./limiter/limiter.go github.com/envoyproxy/ratelimit/src/limiter RateLimitCache,TimeSource,JitterRandSource diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/redis.go index be5f5a34f..4d3001468 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/redis.go @@ -1,54 +1,14 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/envoyproxy/ratelimit/src/redis (interfaces: RateLimitCache,Client,TimeSource,JitterRandSource) +// Source: github.com/envoyproxy/ratelimit/src/redis (interfaces: Client) // Package mock_redis is a generated GoMock package. package mock_redis import ( - context "context" - v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" reflect "reflect" ) -// MockRateLimitCache is a mock of RateLimitCache interface -type MockRateLimitCache struct { - ctrl *gomock.Controller - recorder *MockRateLimitCacheMockRecorder -} - -// MockRateLimitCacheMockRecorder is the mock recorder for MockRateLimitCache -type MockRateLimitCacheMockRecorder struct { - mock *MockRateLimitCache -} - -// NewMockRateLimitCache creates a new mock instance -func NewMockRateLimitCache(ctrl *gomock.Controller) *MockRateLimitCache { - mock := &MockRateLimitCache{ctrl: ctrl} - mock.recorder = &MockRateLimitCacheMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockRateLimitCache) EXPECT() *MockRateLimitCacheMockRecorder { - return m.recorder -} - -// DoLimit mocks base method -func (m *MockRateLimitCache) DoLimit(arg0 context.Context, arg1 *v2.RateLimitRequest, arg2 []*config.RateLimit) []*v2.RateLimitResponse_DescriptorStatus { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DoLimit", arg0, arg1, arg2) - ret0, _ := ret[0].([]*v2.RateLimitResponse_DescriptorStatus) - return ret0 -} - -// DoLimit indicates an expected call of DoLimit -func (mr *MockRateLimitCacheMockRecorder) DoLimit(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), arg0, arg1, arg2) -} - // MockClient is a mock of Client interface type MockClient struct { ctrl *gomock.Controller @@ -118,89 +78,3 @@ func (mr *MockClientMockRecorder) NumActiveConns() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveConns", reflect.TypeOf((*MockClient)(nil).NumActiveConns)) } - -// MockTimeSource is a mock of TimeSource interface -type MockTimeSource struct { - ctrl *gomock.Controller - recorder *MockTimeSourceMockRecorder -} - -// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource -type MockTimeSourceMockRecorder struct { - mock *MockTimeSource -} - -// NewMockTimeSource creates a new mock instance -func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { - mock := &MockTimeSource{ctrl: ctrl} - mock.recorder = &MockTimeSourceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { - return m.recorder -} - -// UnixNow mocks base method -func (m *MockTimeSource) UnixNow() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UnixNow") - ret0, _ := ret[0].(int64) - return ret0 -} - -// UnixNow indicates an expected call of UnixNow -func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) -} - -// MockJitterRandSource is a mock of JitterRandSource interface -type MockJitterRandSource struct { - ctrl *gomock.Controller - recorder *MockJitterRandSourceMockRecorder -} - -// MockJitterRandSourceMockRecorder is the mock recorder for MockJitterRandSource -type MockJitterRandSourceMockRecorder struct { - mock *MockJitterRandSource -} - -// NewMockJitterRandSource creates a new mock instance -func NewMockJitterRandSource(ctrl *gomock.Controller) *MockJitterRandSource { - mock := &MockJitterRandSource{ctrl: ctrl} - mock.recorder = &MockJitterRandSourceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockJitterRandSource) EXPECT() *MockJitterRandSourceMockRecorder { - return m.recorder -} - -// Int63 mocks base method -func (m *MockJitterRandSource) Int63() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Int63") - ret0, _ := ret[0].(int64) - return ret0 -} - -// Int63 indicates an expected call of Int63 -func (mr *MockJitterRandSourceMockRecorder) Int63() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int63", reflect.TypeOf((*MockJitterRandSource)(nil).Int63)) -} - -// Seed mocks base method -func (m *MockJitterRandSource) Seed(arg0 int64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Seed", arg0) -} - -// Seed indicates an expected call of Seed -func (mr *MockJitterRandSourceMockRecorder) Seed(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seed", reflect.TypeOf((*MockJitterRandSource)(nil).Seed), arg0) -} diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 3463f25a0..b268de996 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -8,6 +8,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" stats "github.com/lyft/gostats" @@ -43,7 +44,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { client := redis.NewClientImpl(statsStore, false, "", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() - cache := redis.NewRateLimitCacheImpl(client, nil, redis.NewTimeSourceImpl(), rand.New(redis.NewLockedSource(time.Now().Unix())), 10, nil) + cache := redis.NewRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index 34339e292..56811fb9c 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -8,6 +8,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" stats "github.com/lyft/gostats" @@ -15,6 +16,7 @@ import ( "github.com/alicebob/miniredis/v2" "github.com/envoyproxy/ratelimit/test/common" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -33,8 +35,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { client := mock_redis.NewMockClient(controller) perSecondClient := mock_redis.NewMockClient(controller) - timeSource := mock_redis.NewMockTimeSource(controller) - var cache redis.RateLimitCache + timeSource := mock_limiter.NewMockTimeSource(controller) + var cache limiter.RateLimitCache if usePerSecondRedis { cache = redis.NewRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil) } else { @@ -160,12 +162,12 @@ func TestOverLimitWithLocalCache(t *testing.T) { defer controller.Finish() client := mock_redis.NewMockClient(controller) - timeSource := mock_redis.NewMockTimeSource(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) localCache := freecache.NewCache(100) cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) - localCacheStats := redis.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)) @@ -250,7 +252,7 @@ func TestNearLimit(t *testing.T) { defer controller.Finish() client := mock_redis.NewMockClient(controller) - timeSource := mock_redis.NewMockTimeSource(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) statsStore := stats.NewStore(stats.NewNullSink(), false) @@ -400,8 +402,8 @@ func TestRedisWithJitter(t *testing.T) { defer controller.Finish() client := mock_redis.NewMockClient(controller) - timeSource := mock_redis.NewMockTimeSource(controller) - jitterSource := mock_redis.NewMockJitterRandSource(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + jitterSource := mock_limiter.NewMockJitterRandSource(controller) cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil) statsStore := stats.NewStore(stats.NewNullSink(), false) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 57fa0a65d..c51bc7984 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -7,14 +7,14 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" - "github.com/envoyproxy/ratelimit/src/service" + ratelimit "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/test/common" - "github.com/envoyproxy/ratelimit/test/mocks/config" - "github.com/envoyproxy/ratelimit/test/mocks/redis" - "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" - "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" + mock_config "github.com/envoyproxy/ratelimit/test/mocks/config" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" + mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" + mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" "github.com/golang/mock/gomock" - "github.com/lyft/gostats" + stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -51,7 +51,7 @@ type rateLimitServiceTestSuite struct { controller *gomock.Controller runtime *mock_loader.MockIFace snapshot *mock_snapshot.MockIFace - cache *mock_redis.MockRateLimitCache + cache *mock_limiter.MockRateLimitCache configLoader *mock_config.MockRateLimitConfigLoader config *mock_config.MockRateLimitConfig runtimeUpdateCallback chan<- int @@ -64,7 +64,7 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.controller = gomock.NewController(t) ret.runtime = mock_loader.NewMockIFace(ret.controller) ret.snapshot = mock_snapshot.NewMockIFace(ret.controller) - ret.cache = mock_redis.NewMockRateLimitCache(ret.controller) + ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) ret.statStore = stats.NewStore(stats.NewNullSink(), false) From 23f32b5d8265fcbe6fcae3689e7a9972bb20dd88 Mon Sep 17 00:00:00 2001 From: David Weitzman Date: Fri, 12 Jun 2020 11:48:34 -0700 Subject: [PATCH 10/41] json handler: return full ratelimit service response as json (#148) Previously an HTTP POST to /json would only return an HTTP status code, not all the other details supported by grpc ratelimit responses. With this change an HTTP POST to /json receives the full proto3 response encoded as json by jsonpb. It seems unlikely that anyone would be parsing the text "over limit" from the HTTP body instead of just reading the 429 response code, but for anyone doing that this would be a breaking change. Signed-off-by: David Weitzman Signed-off-by: Diego Erdody --- README.md | 27 ++++++++- src/server/server_impl.go | 38 ++++++++---- test/integration/integration_test.go | 5 ++ test/mocks/mocks.go | 1 + test/mocks/rls/rls.go | 50 ++++++++++++++++ test/server/server_impl_test.go | 86 ++++++++++++++++++++++++++++ 6 files changed, 196 insertions(+), 11 deletions(-) create mode 100644 test/mocks/rls/rls.go create mode 100644 test/server/server_impl_test.go diff --git a/README.md b/README.md index 0640cb3c1..eca0d0028 100644 --- a/README.md +++ b/README.md @@ -383,7 +383,32 @@ Takes an HTTP POST with a JSON body of the form e.g. } ``` The service will return an http 200 if this request is allowed (if no ratelimits exceeded) or 429 if one or more -ratelimits were exceeded. Endpoint does not currently return detailed information on which limits were exceeded. +ratelimits were exceeded. + +The response is a RateLimitResponse encoded with +[proto3-to-json mapping](https://developers.google.com/protocol-buffers/docs/proto3#json): +```json +{ + "overallCode": "OVER_LIMIT", + "statuses": [ + { + "code": "OVER_LIMIT", + "currentLimit": { + "requestsPerUnit": 1, + "unit": "MINUTE" + } + }, + { + "code": "OK", + "currentLimit": { + "requestsPerUnit": 2, + "unit": "MINUTE" + }, + "limitRemaining": 1 + } + ] +} +``` # Debug Port diff --git a/src/server/server_impl.go b/src/server/server_impl.go index da92df840..9a1239a96 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -1,7 +1,7 @@ package server import ( - "encoding/json" + "bytes" "expvar" "fmt" "io" @@ -19,6 +19,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/settings" + "github.com/golang/protobuf/jsonpb" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" "github.com/lyft/goruntime/loader" @@ -53,15 +54,18 @@ func (server *server) AddDebugHttpEndpoint(path string, help string, handler htt server.debugListener.endpoints[path] = help } -// add an http/1 handler at the /json endpoint which allows this ratelimit service to work with +// create an http/1 handler at the /json endpoint which allows this ratelimit service to work with // clients that cannot use the gRPC interface (e.g. lua) // example usage from cURL with domain "dummy" and descriptor "perday": // echo '{"domain": "dummy", "descriptors": [{"entries": [{"key": "perday"}]}]}' | curl -vvvXPOST --data @/dev/stdin localhost:8080/json -func (server *server) AddJsonHandler(svc pb.RateLimitServiceServer) { - handler := func(writer http.ResponseWriter, request *http.Request) { +func NewJsonHandler(svc pb.RateLimitServiceServer) func(http.ResponseWriter, *http.Request) { + // Default options include enums as strings and no identation. + m := &jsonpb.Marshaler{} + + return func(writer http.ResponseWriter, request *http.Request) { var req pb.RateLimitRequest - if err := json.NewDecoder(request.Body).Decode(&req); err != nil { + if err := jsonpb.Unmarshal(request.Body, &req); err != nil { logger.Warnf("error: %s", err.Error()) http.Error(writer, err.Error(), http.StatusBadRequest) return @@ -73,15 +77,29 @@ func (server *server) AddJsonHandler(svc pb.RateLimitServiceServer) { http.Error(writer, err.Error(), http.StatusBadRequest) return } + logger.Debugf("resp:%s", resp) - if resp.OverallCode == pb.RateLimitResponse_OVER_LIMIT { - http.Error(writer, "over limit", http.StatusTooManyRequests) - } else if resp.OverallCode == pb.RateLimitResponse_UNKNOWN { - http.Error(writer, "unknown", http.StatusInternalServerError) + + buf := bytes.NewBuffer(nil) + err = m.Marshal(buf, resp) + if err != nil { + logger.Errorf("error marshaling proto3 to json: %s", err.Error()) + http.Error(writer, "error marshaling proto3 to json: "+err.Error(), http.StatusInternalServerError) + return } + writer.Header().Set("Content-Type", "application/json") + if resp == nil || resp.OverallCode == pb.RateLimitResponse_UNKNOWN { + writer.WriteHeader(http.StatusInternalServerError) + } else if resp.OverallCode == pb.RateLimitResponse_OVER_LIMIT { + writer.WriteHeader(http.StatusTooManyRequests) + } + writer.Write(buf.Bytes()) } - server.router.HandleFunc("/json", handler) +} + +func (server *server) AddJsonHandler(svc pb.RateLimitServiceServer) { + server.router.HandleFunc("/json", NewJsonHandler(svc)) } func (server *server) GrpcServer() *grpc.Server { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 60a25257b..6e1b9efd6 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -5,6 +5,7 @@ package integration_test import ( "bytes" "fmt" + "io/ioutil" "math/rand" "net/http" "os" @@ -376,11 +377,15 @@ func TestBasicConfigLegacy(t *testing.T) { }`) http_resp, _ := http.Post("http://localhost:8082/json", "application/json", bytes.NewBuffer(json_body)) assert.Equal(http_resp.StatusCode, 200) + body, _ := ioutil.ReadAll(http_resp.Body) http_resp.Body.Close() + assert.Equal(`{"overallCode":"OK","statuses":[{"code":"OK","currentLimit":{"requestsPerUnit":1,"unit":"MINUTE"}}]}`, string(body)) http_resp, _ = http.Post("http://localhost:8082/json", "application/json", bytes.NewBuffer(json_body)) assert.Equal(http_resp.StatusCode, 429) + body, _ = ioutil.ReadAll(http_resp.Body) http_resp.Body.Close() + assert.Equal(`{"overallCode":"OVER_LIMIT","statuses":[{"code":"OVER_LIMIT","currentLimit":{"requestsPerUnit":1,"unit":"MINUTE"}}]}`, string(body)) invalid_json := []byte(`{"unclosed quote: []}`) http_resp, _ = http.Post("http://localhost:8082/json", "application/json", bytes.NewBuffer(invalid_json)) diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go index 490d20977..703865af0 100644 --- a/test/mocks/mocks.go +++ b/test/mocks/mocks.go @@ -5,3 +5,4 @@ package mocks //go:generate go run github.com/golang/mock/mockgen -destination ./config/config.go github.com/envoyproxy/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader //go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis Client //go:generate go run github.com/golang/mock/mockgen -destination ./limiter/limiter.go github.com/envoyproxy/ratelimit/src/limiter RateLimitCache,TimeSource,JitterRandSource +//go:generate go run github.com/golang/mock/mockgen -destination ./rls/rls.go github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2 RateLimitServiceServer diff --git a/test/mocks/rls/rls.go b/test/mocks/rls/rls.go new file mode 100644 index 000000000..77cd49ae9 --- /dev/null +++ b/test/mocks/rls/rls.go @@ -0,0 +1,50 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2 (interfaces: RateLimitServiceServer) + +// Package mock_v2 is a generated GoMock package. +package mock_v2 + +import ( + context "context" + v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockRateLimitServiceServer is a mock of RateLimitServiceServer interface +type MockRateLimitServiceServer struct { + ctrl *gomock.Controller + recorder *MockRateLimitServiceServerMockRecorder +} + +// MockRateLimitServiceServerMockRecorder is the mock recorder for MockRateLimitServiceServer +type MockRateLimitServiceServerMockRecorder struct { + mock *MockRateLimitServiceServer +} + +// NewMockRateLimitServiceServer creates a new mock instance +func NewMockRateLimitServiceServer(ctrl *gomock.Controller) *MockRateLimitServiceServer { + mock := &MockRateLimitServiceServer{ctrl: ctrl} + mock.recorder = &MockRateLimitServiceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRateLimitServiceServer) EXPECT() *MockRateLimitServiceServerMockRecorder { + return m.recorder +} + +// ShouldRateLimit mocks base method +func (m *MockRateLimitServiceServer) ShouldRateLimit(arg0 context.Context, arg1 *v2.RateLimitRequest) (*v2.RateLimitResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShouldRateLimit", arg0, arg1) + ret0, _ := ret[0].(*v2.RateLimitResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ShouldRateLimit indicates an expected call of ShouldRateLimit +func (mr *MockRateLimitServiceServerMockRecorder) ShouldRateLimit(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldRateLimit", reflect.TypeOf((*MockRateLimitServiceServer)(nil).ShouldRateLimit), arg0, arg1) +} diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go new file mode 100644 index 000000000..de058ecbc --- /dev/null +++ b/test/server/server_impl_test.go @@ -0,0 +1,86 @@ +package server_test + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + + "github.com/envoyproxy/ratelimit/src/server" + mock_v2 "github.com/envoyproxy/ratelimit/test/mocks/rls" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func assertHttpResponse(t *testing.T, + handler http.HandlerFunc, + requestBody string, + expectedStatusCode int, + expectedContentType string, + expectedResponseBody string) { + + t.Helper() + assert := assert.New(t) + + req := httptest.NewRequest("METHOD_NOT_CHECKED", "/path_not_checked", strings.NewReader(requestBody)) + w := httptest.NewRecorder() + handler(w, req) + + resp := w.Result() + actualBody, _ := ioutil.ReadAll(resp.Body) + assert.Equal(expectedContentType, resp.Header.Get("Content-Type")) + assert.Equal(expectedStatusCode, resp.StatusCode) + assert.Equal(expectedResponseBody, string(actualBody)) +} + +func TestJsonHandler(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + rls := mock_v2.NewMockRateLimitServiceServer(controller) + handler := server.NewJsonHandler(rls) + + // Missing request body + assertHttpResponse(t, handler, "", 400, "text/plain; charset=utf-8", "EOF\n") + + // Request body is not valid json + assertHttpResponse(t, handler, "}", 400, "text/plain; charset=utf-8", "invalid character '}' looking for beginning of value\n") + + // Unknown response code + rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ + Domain: "foo", + }).Return(&pb.RateLimitResponse{}, nil) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "application/json", "{}") + + // ratelimit service error + rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ + Domain: "foo", + }).Return(nil, fmt.Errorf("some error")) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 400, "text/plain; charset=utf-8", "some error\n") + + // json unmarshaling error + rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ + Domain: "foo", + }).Return(nil, nil) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "text/plain; charset=utf-8", "error marshaling proto3 to json: Marshal called with nil\n") + + // successful request, not rate limited + rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ + Domain: "foo", + }).Return(&pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + }, nil) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 200, "application/json", `{"overallCode":"OK"}`) + + // successful request, rate limited + rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ + Domain: "foo", + }).Return(&pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + }, nil) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 429, "application/json", `{"overallCode":"OVER_LIMIT"}`) +} From 4a6c402d9ba75b45dd34972d02ea3fca6217c1e9 Mon Sep 17 00:00:00 2001 From: Yuki Sawa Date: Thu, 25 Jun 2020 15:36:02 -0700 Subject: [PATCH 11/41] Update goruntime to latest, 0.2.5. Add new config for watching changes in runtime config folder directly instead of the runtime root dir. (#151) Signed-off-by: Yuki Sawa Signed-off-by: Diego Erdody --- README.md | 6 + go.mod | 2 +- go.sum | 4 +- src/server/server_impl.go | 23 +++- src/service/ratelimit.go | 6 +- src/service_cmd/runner/runner.go | 4 +- src/settings/settings.go | 1 + test/integration/integration_test.go | 130 ++++++++++++++++++ .../runtime/current/ratelimit/reload.yaml | 16 +++ test/service/ratelimit_legacy_test.go | 2 +- test/service/ratelimit_test.go | 4 +- 11 files changed, 183 insertions(+), 15 deletions(-) create mode 100644 test/integration/runtime/current/ratelimit/reload.yaml diff --git a/README.md b/README.md index eca0d0028..746c5f5a9 100644 --- a/README.md +++ b/README.md @@ -320,6 +320,12 @@ RUNTIME_IGNOREDOTFILES default:"false" **Configuration files are loaded from RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/\*.yaml** +There are two methods for triggering a configuration reload: +1. Symlink RUNTIME_ROOT to a different directory. +2. Update the contents inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/` directly. + +The former is the default behavior. To use the latter method, set the `RUNTIME_WATCH_ROOT` environment variable to `false`. + For more information on how runtime works you can read its [README](https://github.com/lyft/goruntime). # Request Fields diff --git a/go.mod b/go.mod index f679a373e..fb228646f 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 github.com/kavu/go_reuseport v1.2.0 github.com/kelseyhightower/envconfig v1.1.0 - github.com/lyft/goruntime v0.2.1 + github.com/lyft/goruntime v0.2.5 github.com/lyft/gostats v0.4.0 github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 // indirect github.com/mediocregopher/radix/v3 v3.5.1 diff --git a/go.sum b/go.sum index e0747b697..ba1d3c07f 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,8 @@ github.com/kelseyhightower/envconfig v1.1.0 h1:4htXR8ameS6KBfrNBoqEgpg0IK2D6rozN github.com/kelseyhightower/envconfig v1.1.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/lyft/goruntime v0.2.1 h1:7DebA8oMVuoQ5TQ0j1xR/X2xRagbGrm0e2SoMdt5tRs= -github.com/lyft/goruntime v0.2.1/go.mod h1:8rUh5gwIPQtyIkIXHbLN1j45HOb8cMgDhrw5GA7DF4g= +github.com/lyft/goruntime v0.2.5 h1:yRmwOXl3Zns3+Z03fDMWt5+p609rfhIErh7HYCayODg= +github.com/lyft/goruntime v0.2.5/go.mod h1:8rUh5gwIPQtyIkIXHbLN1j45HOb8cMgDhrw5GA7DF4g= github.com/lyft/gostats v0.4.0 h1:PbRWmwidTPk6Y80S6itBWDa+XVt1hGvqFM88TBJYdOo= github.com/lyft/gostats v0.4.0/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 h1:kLCSHuk3X+SI8Up26wM71id7jz77B3zCZDp01UWMVbM= diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 9a1239a96..8624f37d5 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -7,6 +7,7 @@ import ( "io" "net/http" "net/http/pprof" + "path/filepath" "sort" "os" @@ -187,12 +188,22 @@ func newServer(name string, store stats.Store, localCache *freecache.Cache, opts loaderOpts = append(loaderOpts, loader.AllowDotFiles) } - ret.runtime = loader.New( - s.RuntimePath, - s.RuntimeSubdirectory, - ret.store.Scope("runtime"), - &loader.SymlinkRefresher{RuntimePath: s.RuntimePath}, - loaderOpts...) + if s.RuntimeWatchRoot { + ret.runtime = loader.New( + s.RuntimePath, + s.RuntimeSubdirectory, + ret.store.Scope("runtime"), + &loader.SymlinkRefresher{RuntimePath: s.RuntimePath}, + loaderOpts...) + + } else { + ret.runtime = loader.New( + filepath.Join(s.RuntimePath, s.RuntimeSubdirectory), + "config", + ret.store.Scope("runtime"), + &loader.DirectoryRefresher{}, + loaderOpts...) + } // setup http router ret.router = mux.NewRouter() diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 07a8c3132..1286e5510 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -57,6 +57,7 @@ type service struct { stats serviceStats rlStatsScope stats.Scope legacy *legacyService + runtimeWatchRoot bool } func (this *service) reloadConfig() { @@ -75,7 +76,7 @@ func (this *service) reloadConfig() { files := []config.RateLimitConfigToLoad{} snapshot := this.runtime.Snapshot() for _, key := range snapshot.Keys() { - if !strings.HasPrefix(key, "config.") { + if this.runtimeWatchRoot && !strings.HasPrefix(key, "config.") { continue } @@ -176,7 +177,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { } func NewService(runtime loader.IFace, cache limiter.RateLimitCache, - configLoader config.RateLimitConfigLoader, stats stats.Scope) RateLimitServiceServer { + configLoader config.RateLimitConfigLoader, stats stats.Scope, runtimeWatchRoot bool) RateLimitServiceServer { newService := &service{ runtime: runtime, @@ -187,6 +188,7 @@ func NewService(runtime loader.IFace, cache limiter.RateLimitCache, cache: cache, stats: newServiceStats(stats), rlStatsScope: stats.Scope("rate_limit"), + runtimeWatchRoot: runtimeWatchRoot, } newService.legacy = &legacyService{ s: newService, diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 5e43307ad..85c3f8991 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -60,7 +60,9 @@ func (runner *Runner) Run() { rand.New(limiter.NewLockedSource(time.Now().Unix())), s.ExpirationJitterMaxSeconds), config.NewRateLimitConfigLoaderImpl(), - srv.Scope().Scope("service")) + srv.Scope().Scope("service"), + s.RuntimeWatchRoot, + ) srv.AddDebugHttpEndpoint( "/rlconfig", diff --git a/src/settings/settings.go b/src/settings/settings.go index 53ab13472..971ff60cc 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -20,6 +20,7 @@ type Settings struct { RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` RuntimeSubdirectory string `envconfig:"RUNTIME_SUBDIRECTORY"` RuntimeIgnoreDotFiles bool `envconfig:"RUNTIME_IGNOREDOTFILES" default:"false"` + RuntimeWatchRoot bool `envconfig:"RUNTIME_WATCH_ROOT" default:"true"` LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 6e1b9efd6..9c9f01ea0 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -6,6 +6,7 @@ import ( "bytes" "fmt" "io/ioutil" + "io" "math/rand" "net/http" "os" @@ -67,6 +68,11 @@ func TestBasicAuthConfig(t *testing.T) { t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18093", "true", "1000")) } +func TestBasicReloadConfig(t *testing.T) { + t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRoot("8095", "false", "0")) + t.Run("ReloadWithoutWatchRoot", testBasicConfigReload("8097", "false", "0", "false")) +} + func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:16382") os.Setenv("REDIS_URL", "localhost:16381") @@ -97,6 +103,28 @@ func testBasicConfigAuth(grpcPort, perSecond string, local_cache_size string) fu return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } +func testBasicConfigWithoutWatchRoot(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") + os.Setenv("REDIS_URL", "localhost:6379") + os.Setenv("REDIS_AUTH", "") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", "false") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigReload(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") + os.Setenv("REDIS_URL", "localhost:6379") + os.Setenv("REDIS_AUTH", "") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + return testConfigReload(grpcPort, perSecond, local_cache_size) +} + func getCacheKey(cacheKey string, enableLocalCache bool) string { if enableLocalCache { return cacheKey + "_local" @@ -456,3 +484,105 @@ func TestBasicConfigLegacy(t *testing.T) { assert.NoError(err) } } + +func testConfigReload(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + return func(t *testing.T) { + os.Setenv("REDIS_PERSECOND", perSecond) + os.Setenv("PORT", "8082") + os.Setenv("GRPC_PORT", grpcPort) + os.Setenv("DEBUG_PORT", "8084") + os.Setenv("RUNTIME_ROOT", "runtime/current") + os.Setenv("RUNTIME_SUBDIRECTORY", "ratelimit") + os.Setenv("REDIS_PERSECOND_SOCKET_TYPE", "tcp") + os.Setenv("REDIS_SOCKET_TYPE", "tcp") + os.Setenv("LOCAL_CACHE_SIZE_IN_BYTES", local_cache_size) + os.Setenv("USE_STATSD", "false") + + local_cache_size_val, _ := strconv.Atoi(local_cache_size) + enable_local_cache := local_cache_size_val > 0 + runner := runner.NewRunner() + + go func() { + runner.Run() + }() + + // HACK: Wait for the server to come up. Make a hook that we can wait on. + time.Sleep(1 * time.Second) + + assert := assert.New(t) + conn, err := grpc.Dial(fmt.Sprintf("localhost:%s", grpcPort), grpc.WithInsecure()) + assert.NoError(err) + defer conn.Close() + c := pb.NewRateLimitServiceClient(conn) + + response, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest("reload", [][][2]string{{{getCacheKey("block", enable_local_cache), "foo"}}}, 1)) + assert.Equal( + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK}}}, + response) + assert.NoError(err) + + runner.GetStatsStore().Flush() + loadCount1 := runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() + + // Copy a new file to config folder to test config reload functionality + in, err := os.Open("runtime/current/ratelimit/reload.yaml") + if err != nil { + panic(err) + } + defer in.Close() + out, err := os.Create("runtime/current/ratelimit/config/reload.yaml") + if err != nil { + panic(err) + } + defer out.Close() + _, err = io.Copy(out, in) + if err != nil { + panic(err) + } + err = out.Close() + if err != nil { + panic(err) + } + + // Need to wait for config reload to take place and new descriptors to be loaded. + // Shouldn't take more than 5 seconds but wait 120 at most just to be safe. + wait := 120 + reloaded := false + loadCount2 := uint64(0) + + for i := 0; i < wait; i++ { + time.Sleep(1 * time.Second) + runner.GetStatsStore().Flush() + loadCount2 = runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() + + // Check that successful loads count has increased before continuing. + if loadCount2 > loadCount1 { + reloaded = true + break + } + } + + assert.True(reloaded) + assert.Greater(loadCount2, loadCount1) + + response, err = c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest("reload", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1)) + assert.Equal( + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49)}}, + response) + assert.NoError(err) + + err = os.Remove("runtime/current/ratelimit/config/reload.yaml") + if err != nil { + panic(err) + } + } +} \ No newline at end of file diff --git a/test/integration/runtime/current/ratelimit/reload.yaml b/test/integration/runtime/current/ratelimit/reload.yaml new file mode 100644 index 000000000..5da29e52d --- /dev/null +++ b/test/integration/runtime/current/ratelimit/reload.yaml @@ -0,0 +1,16 @@ +domain: reload +descriptors: + - key: key1 + rate_limit: + unit: second + requests_per_unit: 50 + + - key: block + rate_limit: + unit: second + requests_per_unit: 0 + + - key: one_per_minute + rate_limit: + unit: minute + requests_per_unit: 1 diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index ad7e6b942..71689a6af 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -224,7 +224,7 @@ func TestInitialLoadErrorLegacy(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Scope) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.GetLegacyService().ShouldRateLimit(nil, request) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index c51bc7984..a545f4f2e 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -82,7 +82,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Return(this.config) - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statStore) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statStore, true) } func TestService(test *testing.T) { @@ -225,7 +225,7 @@ func TestInitialLoadError(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Scope) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(nil, request) From 465e7bd85038ef7d631e33c006e99552588a642f Mon Sep 17 00:00:00 2001 From: Petr Pchelko Date: Wed, 1 Jul 2020 09:30:54 -0700 Subject: [PATCH 12/41] Drop support for legacy ratelimit.proto and upgrade to v3 rls.proto (#153) Signed-off-by: Petr Pchelko Signed-off-by: Diego Erdody --- Dockerfile | 1 - README.md | 36 +- go.mod | 4 +- go.sum | 18 + proto/ratelimit/ratelimit.pb.go | 538 -------------------------- proto/ratelimit/ratelimit.proto | 102 ----- src/client_cmd/main.go | 4 +- src/config/config.go | 4 +- src/config/config_impl.go | 4 +- src/limiter/cache.go | 2 +- src/limiter/cache_key.go | 4 +- src/redis/cache_impl.go | 2 +- src/server/server.go | 2 +- src/server/server_impl.go | 2 +- src/service/ratelimit.go | 2 +- src/service/ratelimit_legacy.go | 6 +- src/service_cmd/runner/runner.go | 8 +- test/common/common.go | 11 +- test/config/config_test.go | 4 +- test/integration/integration_test.go | 16 +- test/mocks/config/config.go | 2 +- test/mocks/limiter/limiter.go | 2 +- test/mocks/rls/rls.go | 2 +- test/redis/bench_test.go | 2 +- test/redis/cache_impl_test.go | 2 +- test/server/server_impl_test.go | 2 +- test/service/ratelimit_legacy_test.go | 27 +- test/service/ratelimit_test.go | 2 +- 28 files changed, 95 insertions(+), 716 deletions(-) delete mode 100644 proto/ratelimit/ratelimit.pb.go delete mode 100644 proto/ratelimit/ratelimit.proto diff --git a/Dockerfile b/Dockerfile index 706cefaa8..b0bdb0bd3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,6 @@ RUN go mod download COPY src src COPY script script -COPY proto proto RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd diff --git a/README.md b/README.md index 746c5f5a9..944e66b24 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* - [Overview](#overview) -- [Deprecation of Legacy Ratelimit Proto](#deprecation-of-legacy-ratelimit-proto) +- [Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto](#deprecation-of-legacy-ratelimit-proto-and-v2-ratelimit-proto) - [Deprecation Schedule](#deprecation-schedule) - [Building and Testing](#building-and-testing) - [Docker-compose setup](#docker-compose-setup) @@ -38,27 +38,27 @@ applications. Applications request a rate limit decision based on a domain and a reads the configuration from disk via [runtime](https://github.com/lyft/goruntime), composes a cache key, and talks to the Redis cache. A decision is then returned to the caller. -# Deprecation of Legacy Ratelimit Proto - -Envoy's data-plane-api defines a ratelimit service proto [rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto). -Logically the data-plane-api [rls](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto) -is equivalent to the [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) -defined in this repo. However, due -to the namespace differences and how gRPC routing works it is not possible to transparently route the -legacy ratelimit (ones based in the [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) -defined in this repo) requests to the data-plane-api -definitions. Therefore, the ratelimit service will upgrade the requests, process them internally as it would -process a data-plane-api ratelimit request, and then downgrade the response to send back to the client. This means that, +# Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto + +Envoy's data-plane-api defines a ratelimit service proto v3 [rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto). +Logically the data-plane-api rls [v3](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +is equivalent to the rls [v2](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +However, due to the namespace differences and how gRPC routing works it is not possible to transparently route the +legacy v2 ratelimit requests to the v3 definitions. Therefore, the ratelimit service will upgrade the requests, process them internally as it would +process a v3 ratelimit request, and then downgrade the response to send back to the client. This means that, for a slight performance hit for clients using the legacy proto, ratelimit is backwards compatible with the legacy proto. +Prior to version 2.0.0 ratelimit service contained a protocol definition that used to be supported in a legacy mode, +but support for it and was removed in 2.0.0. ## Deprecation Schedule -1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production -use at Lyft for over 2 years. +1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production use at Lyft for over 2 years. 2. `v1.1.0` introduces the data-plane-api proto and initiates the deprecation of the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). -3. `v2.0.0` deletes support for the legacy [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). This version will be tagged by the end of 2018Q3 (~September 2018) -to give time to community members running ratelimit off of `master`. - +3. `v2.0.0` deleted support for the legacy [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). +The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported +as a legacy protocol. +4. `v3.0.0` deletes support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) # Building and Testing @@ -331,7 +331,7 @@ For more information on how runtime works you can read its [README](https://gith # Request Fields For information on the fields of a Ratelimit gRPC request please read the information -on the RateLimitRequest message type in the Ratelimit [proto file.](https://github.com/envoyproxy/ratelimit/blob/master/proto/ratelimit/ratelimit.proto) +on the RateLimitRequest message type in the Ratelimit [proto file.](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/ratelimit/v3/rls.proto) # Statistics diff --git a/go.mod b/go.mod index fb228646f..90a8cb84b 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/alicebob/miniredis/v2 v2.11.4 github.com/cespare/xxhash v1.1.0 // indirect github.com/coocood/freecache v1.1.0 - github.com/envoyproxy/go-control-plane v0.6.9 + github.com/envoyproxy/go-control-plane v0.9.5 github.com/gogo/protobuf v1.3.1 // indirect github.com/golang/mock v1.4.1 github.com/golang/protobuf v1.3.2 @@ -27,7 +27,7 @@ require ( golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb // indirect google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f // indirect - google.golang.org/grpc v1.19.0 + google.golang.org/grpc v1.25.1 gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect gopkg.in/yaml.v2 v2.3.0 diff --git a/go.sum b/go.sum index ba1d3c07f..c8b595dc3 100644 --- a/go.sum +++ b/go.sum @@ -7,12 +7,15 @@ github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGn github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= github.com/alicebob/miniredis/v2 v2.11.4 h1:GsuyeunTx7EllZBU3/6Ji3dhMQZDpC9rLf1luJ+6M5M= github.com/alicebob/miniredis/v2 v2.11.4/go.mod h1:VL3UDEfAH59bSa7MuHMuFToxkqyHh69s/WUbYlOAuyg= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA= github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -20,6 +23,11 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.6.9 h1:deEH9W8ZAUGNbCdX+9iNzBOGrAOrnpJGoy0PcTqk/tE= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU= +github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -33,6 +41,7 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= @@ -63,6 +72,7 @@ github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/sirupsen/logrus v1.0.4 h1:gzbtLsZC3Ic5PptoRG+kQj4L60qjK7H7XszrU163JNQ= github.com/sirupsen/logrus v1.0.4/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= @@ -86,6 +96,7 @@ golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -117,8 +128,10 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86JNJe4spst6Ff8MjvPUdPg= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= @@ -126,10 +139,14 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f h1:0RYv5T9ZdroAqqfM2taEB0nJrArv0X1JpIdgUmY4xg8= google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= @@ -147,5 +164,6 @@ gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/proto/ratelimit/ratelimit.pb.go b/proto/ratelimit/ratelimit.pb.go deleted file mode 100644 index 77f41dbbd..000000000 --- a/proto/ratelimit/ratelimit.pb.go +++ /dev/null @@ -1,538 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: proto/ratelimit/ratelimit.proto - -package ratelimit - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RateLimit_Unit int32 - -const ( - RateLimit_UNKNOWN RateLimit_Unit = 0 - RateLimit_SECOND RateLimit_Unit = 1 - RateLimit_MINUTE RateLimit_Unit = 2 - RateLimit_HOUR RateLimit_Unit = 3 - RateLimit_DAY RateLimit_Unit = 4 -) - -var RateLimit_Unit_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SECOND", - 2: "MINUTE", - 3: "HOUR", - 4: "DAY", -} -var RateLimit_Unit_value = map[string]int32{ - "UNKNOWN": 0, - "SECOND": 1, - "MINUTE": 2, - "HOUR": 3, - "DAY": 4, -} - -func (x RateLimit_Unit) String() string { - return proto.EnumName(RateLimit_Unit_name, int32(x)) -} -func (RateLimit_Unit) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{2, 0} -} - -type RateLimitResponse_Code int32 - -const ( - RateLimitResponse_UNKNOWN RateLimitResponse_Code = 0 - RateLimitResponse_OK RateLimitResponse_Code = 1 - RateLimitResponse_OVER_LIMIT RateLimitResponse_Code = 2 -) - -var RateLimitResponse_Code_name = map[int32]string{ - 0: "UNKNOWN", - 1: "OK", - 2: "OVER_LIMIT", -} -var RateLimitResponse_Code_value = map[string]int32{ - "UNKNOWN": 0, - "OK": 1, - "OVER_LIMIT": 2, -} - -func (x RateLimitResponse_Code) String() string { - return proto.EnumName(RateLimitResponse_Code_name, int32(x)) -} -func (RateLimitResponse_Code) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{3, 0} -} - -// Main message for a rate limit request. The rate limit service is designed to be fully generic -// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded -// configuration will parse the request and find the most specific limit to apply. In addition, -// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors -// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any -// of them are over limit. This enables more complex application level rate limiting scenarios -// if desired. -type RateLimitRequest struct { - // All rate limit requests must specify a domain. This enables the configuration to be per - // application without fear of overlap. E.g., "envoy". - Domain string `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` - // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is - // processed by the service (see below). If any of the descriptors are over limit, the entire - // request is considered to be over limit. - Descriptors []*RateLimitDescriptor `protobuf:"bytes,2,rep,name=descriptors" json:"descriptors,omitempty"` - // Rate limit requests can optionally specify the number of hits a request adds to the matched limit. If the - // value is not set in the message, a request increases the matched limit by 1. - HitsAddend uint32 `protobuf:"varint,3,opt,name=hits_addend,json=hitsAddend" json:"hits_addend,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitRequest) Reset() { *m = RateLimitRequest{} } -func (m *RateLimitRequest) String() string { return proto.CompactTextString(m) } -func (*RateLimitRequest) ProtoMessage() {} -func (*RateLimitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{0} -} -func (m *RateLimitRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitRequest.Unmarshal(m, b) -} -func (m *RateLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitRequest.Marshal(b, m, deterministic) -} -func (dst *RateLimitRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitRequest.Merge(dst, src) -} -func (m *RateLimitRequest) XXX_Size() int { - return xxx_messageInfo_RateLimitRequest.Size(m) -} -func (m *RateLimitRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitRequest proto.InternalMessageInfo - -func (m *RateLimitRequest) GetDomain() string { - if m != nil { - return m.Domain - } - return "" -} - -func (m *RateLimitRequest) GetDescriptors() []*RateLimitDescriptor { - if m != nil { - return m.Descriptors - } - return nil -} - -func (m *RateLimitRequest) GetHitsAddend() uint32 { - if m != nil { - return m.HitsAddend - } - return 0 -} - -// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to -// determine the final rate limit key and overall allowed limit. Here are some examples of how -// they might be used for the domain "envoy". -// 1) ["authenticated": "false"], ["ip_address": "10.0.0.1"] -// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The -// configuration supplies a default limit for the ip_address field. If there is a desire to raise -// the limit for 10.0.0.1 or block it entirely it can be specified directly in the -// configuration. -// 2) ["authenticated": "false"], ["path": "/foo/bar"] -// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if -// configured that way in the service). -// 3) ["authenticated": "false"], ["path": "/foo/bar"], ["ip_address": "10.0.0.1"] -// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. -// Like (1) we can raise/block specific IP addresses if we want with an override configuration. -// 4) ["authenticated": "true"], ["client_id": "foo"] -// What it does: Limits all traffic for an authenticated client "foo" -// 5) ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] -// What it does: Limits traffic to a specific path for an authenticated client "foo" -// -// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. -// This enables building complex application scenarios with a generic backend. -type RateLimitDescriptor struct { - Entries []*RateLimitDescriptor_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitDescriptor) Reset() { *m = RateLimitDescriptor{} } -func (m *RateLimitDescriptor) String() string { return proto.CompactTextString(m) } -func (*RateLimitDescriptor) ProtoMessage() {} -func (*RateLimitDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{1} -} -func (m *RateLimitDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitDescriptor.Unmarshal(m, b) -} -func (m *RateLimitDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitDescriptor.Marshal(b, m, deterministic) -} -func (dst *RateLimitDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitDescriptor.Merge(dst, src) -} -func (m *RateLimitDescriptor) XXX_Size() int { - return xxx_messageInfo_RateLimitDescriptor.Size(m) -} -func (m *RateLimitDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitDescriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitDescriptor proto.InternalMessageInfo - -func (m *RateLimitDescriptor) GetEntries() []*RateLimitDescriptor_Entry { - if m != nil { - return m.Entries - } - return nil -} - -type RateLimitDescriptor_Entry struct { - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitDescriptor_Entry) Reset() { *m = RateLimitDescriptor_Entry{} } -func (m *RateLimitDescriptor_Entry) String() string { return proto.CompactTextString(m) } -func (*RateLimitDescriptor_Entry) ProtoMessage() {} -func (*RateLimitDescriptor_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{1, 0} -} -func (m *RateLimitDescriptor_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitDescriptor_Entry.Unmarshal(m, b) -} -func (m *RateLimitDescriptor_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitDescriptor_Entry.Marshal(b, m, deterministic) -} -func (dst *RateLimitDescriptor_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitDescriptor_Entry.Merge(dst, src) -} -func (m *RateLimitDescriptor_Entry) XXX_Size() int { - return xxx_messageInfo_RateLimitDescriptor_Entry.Size(m) -} -func (m *RateLimitDescriptor_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitDescriptor_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitDescriptor_Entry proto.InternalMessageInfo - -func (m *RateLimitDescriptor_Entry) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *RateLimitDescriptor_Entry) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// Defines an actual rate limit in terms of requests per unit of time and the unit itself. -type RateLimit struct { - RequestsPerUnit uint32 `protobuf:"varint,1,opt,name=requests_per_unit,json=requestsPerUnit" json:"requests_per_unit,omitempty"` - Unit RateLimit_Unit `protobuf:"varint,2,opt,name=unit,enum=pb.lyft.ratelimit.RateLimit_Unit" json:"unit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimit) Reset() { *m = RateLimit{} } -func (m *RateLimit) String() string { return proto.CompactTextString(m) } -func (*RateLimit) ProtoMessage() {} -func (*RateLimit) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{2} -} -func (m *RateLimit) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimit.Unmarshal(m, b) -} -func (m *RateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimit.Marshal(b, m, deterministic) -} -func (dst *RateLimit) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimit.Merge(dst, src) -} -func (m *RateLimit) XXX_Size() int { - return xxx_messageInfo_RateLimit.Size(m) -} -func (m *RateLimit) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimit.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimit proto.InternalMessageInfo - -func (m *RateLimit) GetRequestsPerUnit() uint32 { - if m != nil { - return m.RequestsPerUnit - } - return 0 -} - -func (m *RateLimit) GetUnit() RateLimit_Unit { - if m != nil { - return m.Unit - } - return RateLimit_UNKNOWN -} - -// A response from a ShouldRateLimit call. -type RateLimitResponse struct { - // The overall response code which takes into account all of the descriptors that were passed - // in the RateLimitRequest message. - OverallCode RateLimitResponse_Code `protobuf:"varint,1,opt,name=overall_code,json=overallCode,enum=pb.lyft.ratelimit.RateLimitResponse_Code" json:"overall_code,omitempty"` - // A list of DescriptorStatus messages which matches the length of the descriptor list passed - // in the RateLimitRequest. This can be used by the caller to determine which individual - // descriptors failed and/or what the currently configured limits are for all of them. - Statuses []*RateLimitResponse_DescriptorStatus `protobuf:"bytes,2,rep,name=statuses" json:"statuses,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitResponse) Reset() { *m = RateLimitResponse{} } -func (m *RateLimitResponse) String() string { return proto.CompactTextString(m) } -func (*RateLimitResponse) ProtoMessage() {} -func (*RateLimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{3} -} -func (m *RateLimitResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitResponse.Unmarshal(m, b) -} -func (m *RateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitResponse.Marshal(b, m, deterministic) -} -func (dst *RateLimitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitResponse.Merge(dst, src) -} -func (m *RateLimitResponse) XXX_Size() int { - return xxx_messageInfo_RateLimitResponse.Size(m) -} -func (m *RateLimitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitResponse proto.InternalMessageInfo - -func (m *RateLimitResponse) GetOverallCode() RateLimitResponse_Code { - if m != nil { - return m.OverallCode - } - return RateLimitResponse_UNKNOWN -} - -func (m *RateLimitResponse) GetStatuses() []*RateLimitResponse_DescriptorStatus { - if m != nil { - return m.Statuses - } - return nil -} - -type RateLimitResponse_DescriptorStatus struct { - // The response code for an individual descriptor. - Code RateLimitResponse_Code `protobuf:"varint,1,opt,name=code,enum=pb.lyft.ratelimit.RateLimitResponse_Code" json:"code,omitempty"` - // The current limit as configured by the server. Useful for debugging, etc. - CurrentLimit *RateLimit `protobuf:"bytes,2,opt,name=current_limit,json=currentLimit" json:"current_limit,omitempty"` - // The limit remaining in the current time unit. - LimitRemaining uint32 `protobuf:"varint,3,opt,name=limit_remaining,json=limitRemaining" json:"limit_remaining,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitResponse_DescriptorStatus) Reset() { *m = RateLimitResponse_DescriptorStatus{} } -func (m *RateLimitResponse_DescriptorStatus) String() string { return proto.CompactTextString(m) } -func (*RateLimitResponse_DescriptorStatus) ProtoMessage() {} -func (*RateLimitResponse_DescriptorStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{3, 0} -} -func (m *RateLimitResponse_DescriptorStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitResponse_DescriptorStatus.Unmarshal(m, b) -} -func (m *RateLimitResponse_DescriptorStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitResponse_DescriptorStatus.Marshal(b, m, deterministic) -} -func (dst *RateLimitResponse_DescriptorStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitResponse_DescriptorStatus.Merge(dst, src) -} -func (m *RateLimitResponse_DescriptorStatus) XXX_Size() int { - return xxx_messageInfo_RateLimitResponse_DescriptorStatus.Size(m) -} -func (m *RateLimitResponse_DescriptorStatus) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitResponse_DescriptorStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitResponse_DescriptorStatus proto.InternalMessageInfo - -func (m *RateLimitResponse_DescriptorStatus) GetCode() RateLimitResponse_Code { - if m != nil { - return m.Code - } - return RateLimitResponse_UNKNOWN -} - -func (m *RateLimitResponse_DescriptorStatus) GetCurrentLimit() *RateLimit { - if m != nil { - return m.CurrentLimit - } - return nil -} - -func (m *RateLimitResponse_DescriptorStatus) GetLimitRemaining() uint32 { - if m != nil { - return m.LimitRemaining - } - return 0 -} - -func init() { - proto.RegisterType((*RateLimitRequest)(nil), "pb.lyft.ratelimit.RateLimitRequest") - proto.RegisterType((*RateLimitDescriptor)(nil), "pb.lyft.ratelimit.RateLimitDescriptor") - proto.RegisterType((*RateLimitDescriptor_Entry)(nil), "pb.lyft.ratelimit.RateLimitDescriptor.Entry") - proto.RegisterType((*RateLimit)(nil), "pb.lyft.ratelimit.RateLimit") - proto.RegisterType((*RateLimitResponse)(nil), "pb.lyft.ratelimit.RateLimitResponse") - proto.RegisterType((*RateLimitResponse_DescriptorStatus)(nil), "pb.lyft.ratelimit.RateLimitResponse.DescriptorStatus") - proto.RegisterEnum("pb.lyft.ratelimit.RateLimit_Unit", RateLimit_Unit_name, RateLimit_Unit_value) - proto.RegisterEnum("pb.lyft.ratelimit.RateLimitResponse_Code", RateLimitResponse_Code_name, RateLimitResponse_Code_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for RateLimitService service - -type RateLimitServiceClient interface { - // Determine whether rate limiting should take place. - ShouldRateLimit(ctx context.Context, in *RateLimitRequest, opts ...grpc.CallOption) (*RateLimitResponse, error) -} - -type rateLimitServiceClient struct { - cc *grpc.ClientConn -} - -func NewRateLimitServiceClient(cc *grpc.ClientConn) RateLimitServiceClient { - return &rateLimitServiceClient{cc} -} - -func (c *rateLimitServiceClient) ShouldRateLimit(ctx context.Context, in *RateLimitRequest, opts ...grpc.CallOption) (*RateLimitResponse, error) { - out := new(RateLimitResponse) - err := grpc.Invoke(ctx, "/pb.lyft.ratelimit.RateLimitService/ShouldRateLimit", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for RateLimitService service - -type RateLimitServiceServer interface { - // Determine whether rate limiting should take place. - ShouldRateLimit(context.Context, *RateLimitRequest) (*RateLimitResponse, error) -} - -func RegisterRateLimitServiceServer(s *grpc.Server, srv RateLimitServiceServer) { - s.RegisterService(&_RateLimitService_serviceDesc, srv) -} - -func _RateLimitService_ShouldRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RateLimitRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RateLimitServiceServer).ShouldRateLimit(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pb.lyft.ratelimit.RateLimitService/ShouldRateLimit", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RateLimitServiceServer).ShouldRateLimit(ctx, req.(*RateLimitRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _RateLimitService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "pb.lyft.ratelimit.RateLimitService", - HandlerType: (*RateLimitServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ShouldRateLimit", - Handler: _RateLimitService_ShouldRateLimit_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "proto/ratelimit/ratelimit.proto", -} - -func init() { - proto.RegisterFile("proto/ratelimit/ratelimit.proto", fileDescriptor_ratelimit_8ec600a45de499be) -} - -var fileDescriptor_ratelimit_8ec600a45de499be = []byte{ - // 532 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x8e, 0xd2, 0x40, - 0x14, 0xde, 0xa1, 0x5d, 0x58, 0x4e, 0x17, 0x28, 0xa3, 0x31, 0x84, 0x98, 0x2c, 0x56, 0xa3, 0xf8, - 0x93, 0x6e, 0x82, 0xd9, 0x4b, 0x4d, 0x70, 0xc1, 0x2c, 0x59, 0x16, 0x74, 0x58, 0x34, 0x7a, 0x61, - 0xd3, 0xa5, 0x47, 0xb7, 0xb1, 0xdb, 0xe2, 0xcc, 0x94, 0x84, 0x3b, 0x9f, 0xc0, 0x3b, 0x1f, 0xc0, - 0x17, 0xf0, 0x0d, 0x7c, 0x37, 0xd3, 0xa1, 0x14, 0xfc, 0x09, 0x21, 0x7b, 0x77, 0xfe, 0xbe, 0xef, - 0x9c, 0x9e, 0xef, 0x4c, 0xe1, 0x60, 0xca, 0x23, 0x19, 0x1d, 0x72, 0x57, 0x62, 0xe0, 0x5f, 0xf9, - 0x72, 0x65, 0xd9, 0x2a, 0x43, 0xab, 0xd3, 0x0b, 0x3b, 0x98, 0x7f, 0x94, 0x76, 0x96, 0xb0, 0xbe, - 0x13, 0x30, 0x99, 0x2b, 0xb1, 0x9f, 0x78, 0x0c, 0xbf, 0xc4, 0x28, 0x24, 0xbd, 0x05, 0x79, 0x2f, - 0xba, 0x72, 0xfd, 0xb0, 0x46, 0x1a, 0xa4, 0x59, 0x64, 0xa9, 0x47, 0x4f, 0xc0, 0xf0, 0x50, 0x4c, - 0xb8, 0x3f, 0x95, 0x11, 0x17, 0xb5, 0x5c, 0x43, 0x6b, 0x1a, 0xad, 0xfb, 0xf6, 0x3f, 0xac, 0x76, - 0xc6, 0xd8, 0xc9, 0xca, 0xd9, 0x3a, 0x94, 0x1e, 0x80, 0x71, 0xe9, 0x4b, 0xe1, 0xb8, 0x9e, 0x87, - 0xa1, 0x57, 0xd3, 0x1a, 0xa4, 0x59, 0x62, 0x90, 0x84, 0xda, 0x2a, 0x62, 0x7d, 0x23, 0x70, 0xe3, - 0x3f, 0x2c, 0xf4, 0x25, 0x14, 0x30, 0x94, 0xdc, 0x47, 0x51, 0x23, 0xaa, 0xfd, 0x93, 0xed, 0xda, - 0xdb, 0xdd, 0x50, 0xf2, 0x39, 0x5b, 0x82, 0xeb, 0x87, 0xb0, 0xab, 0x22, 0xd4, 0x04, 0xed, 0x33, - 0xce, 0xd3, 0x0f, 0x4d, 0x4c, 0x7a, 0x13, 0x76, 0x67, 0x6e, 0x10, 0x63, 0x2d, 0xa7, 0x62, 0x0b, - 0xc7, 0xfa, 0x49, 0xa0, 0x98, 0xf1, 0xd2, 0x47, 0x50, 0xe5, 0x8b, 0x65, 0x09, 0x67, 0x8a, 0xdc, - 0x89, 0x43, 0x5f, 0x2a, 0x8e, 0x12, 0xab, 0x2c, 0x13, 0xaf, 0x90, 0x8f, 0x43, 0x5f, 0xd2, 0x23, - 0xd0, 0x55, 0x3a, 0xa1, 0x2b, 0xb7, 0xee, 0x6c, 0x9a, 0xd7, 0x4e, 0x00, 0x4c, 0x95, 0x5b, 0xcf, - 0x41, 0x57, 0x70, 0x03, 0x0a, 0xe3, 0xc1, 0xe9, 0x60, 0xf8, 0x76, 0x60, 0xee, 0x50, 0x80, 0xfc, - 0xa8, 0x7b, 0x3c, 0x1c, 0x74, 0x4c, 0x92, 0xd8, 0x67, 0xbd, 0xc1, 0xf8, 0xbc, 0x6b, 0xe6, 0xe8, - 0x1e, 0xe8, 0x27, 0xc3, 0x31, 0x33, 0x35, 0x5a, 0x00, 0xad, 0xd3, 0x7e, 0x67, 0xea, 0xd6, 0x0f, - 0x0d, 0xaa, 0x6b, 0xca, 0x8a, 0x69, 0x14, 0x0a, 0xa4, 0x7d, 0xd8, 0x8f, 0x66, 0xc8, 0xdd, 0x20, - 0x70, 0x26, 0x91, 0x87, 0x6a, 0xe6, 0x72, 0xeb, 0xe1, 0xa6, 0xa1, 0x96, 0x58, 0xfb, 0x38, 0xf2, - 0x90, 0x19, 0x29, 0x3c, 0x71, 0xe8, 0x6b, 0xd8, 0x13, 0xd2, 0x95, 0xb1, 0xc0, 0xe5, 0x35, 0x1c, - 0x6d, 0xc5, 0xb4, 0xd2, 0x65, 0xa4, 0xe0, 0x2c, 0xa3, 0xa9, 0xff, 0x22, 0x60, 0xfe, 0x9d, 0xa6, - 0xcf, 0x40, 0xbf, 0xde, 0xb4, 0x0a, 0x46, 0xdb, 0x50, 0x9a, 0xc4, 0x9c, 0x63, 0x28, 0x1d, 0x55, - 0xad, 0xa4, 0x30, 0x5a, 0xb7, 0x37, 0xf2, 0xec, 0xa7, 0x90, 0x85, 0xe0, 0x0f, 0xa0, 0xa2, 0x0a, - 0x1c, 0x8e, 0xc9, 0x53, 0xf0, 0xc3, 0x4f, 0xe9, 0xd1, 0x96, 0x83, 0x45, 0xd7, 0x34, 0x6a, 0x3d, - 0x06, 0x5d, 0xad, 0xe6, 0x0f, 0xd9, 0xf2, 0x90, 0x1b, 0x9e, 0x9a, 0x84, 0x96, 0x01, 0x86, 0x6f, - 0xba, 0xcc, 0xe9, 0xf7, 0xce, 0x7a, 0xe7, 0x66, 0xae, 0xc5, 0xd7, 0x1e, 0xdf, 0x08, 0xf9, 0xcc, - 0x9f, 0x20, 0xfd, 0x00, 0x95, 0xd1, 0x65, 0x14, 0x07, 0xde, 0xea, 0xda, 0xee, 0x6e, 0xfe, 0x60, - 0x75, 0x6e, 0xf5, 0x7b, 0xdb, 0x6c, 0xc5, 0xda, 0x79, 0x51, 0x7e, 0x5f, 0xcc, 0x0a, 0xbe, 0x12, - 0x72, 0x91, 0x57, 0xff, 0x86, 0xa7, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x0e, 0xa0, 0x99, - 0x3e, 0x04, 0x00, 0x00, -} diff --git a/proto/ratelimit/ratelimit.proto b/proto/ratelimit/ratelimit.proto deleted file mode 100644 index 7e1ee60fa..000000000 --- a/proto/ratelimit/ratelimit.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -option go_package = "ratelimit"; - -option cc_generic_services = true; - -package pb.lyft.ratelimit; - -service RateLimitService { - // Determine whether rate limiting should take place. - rpc ShouldRateLimit (RateLimitRequest) returns (RateLimitResponse) {} -} - -// Main message for a rate limit request. The rate limit service is designed to be fully generic -// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded -// configuration will parse the request and find the most specific limit to apply. In addition, -// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors -// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any -// of them are over limit. This enables more complex application level rate limiting scenarios -// if desired. -message RateLimitRequest { - // All rate limit requests must specify a domain. This enables the configuration to be per - // application without fear of overlap. E.g., "envoy". - string domain = 1; - // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is - // processed by the service (see below). If any of the descriptors are over limit, the entire - // request is considered to be over limit. - repeated RateLimitDescriptor descriptors = 2; - // Rate limit requests can optionally specify the number of hits a request adds to the matched limit. If the - // value is not set in the message, a request increases the matched limit by 1. - uint32 hits_addend = 3; -} - -// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to -// determine the final rate limit key and overall allowed limit. Here are some examples of how -// they might be used for the domain "envoy". -// 1) ["authenticated": "false"], ["ip_address": "10.0.0.1"] -// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The -// configuration supplies a default limit for the ip_address field. If there is a desire to raise -// the limit for 10.0.0.1 or block it entirely it can be specified directly in the -// configuration. -// 2) ["authenticated": "false"], ["path": "/foo/bar"] -// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if -// configured that way in the service). -// 3) ["authenticated": "false"], ["path": "/foo/bar"], ["ip_address": "10.0.0.1"] -// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. -// Like (1) we can raise/block specific IP addresses if we want with an override configuration. -// 4) ["authenticated": "true"], ["client_id": "foo"] -// What it does: Limits all traffic for an authenticated client "foo" -// 5) ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] -// What it does: Limits traffic to a specific path for an authenticated client "foo" -// -// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. -// This enables building complex application scenarios with a generic backend. -message RateLimitDescriptor { - message Entry { - string key = 1; - string value = 2; - } - - repeated Entry entries = 1; -} - -// Defines an actual rate limit in terms of requests per unit of time and the unit itself. -message RateLimit { - enum Unit { - UNKNOWN = 0; - SECOND = 1; - MINUTE = 2; - HOUR = 3; - DAY = 4; - } - - uint32 requests_per_unit = 1; - Unit unit = 2; -} - -// A response from a ShouldRateLimit call. -message RateLimitResponse { - enum Code { - UNKNOWN = 0; - OK = 1; - OVER_LIMIT = 2; - } - - message DescriptorStatus { - // The response code for an individual descriptor. - Code code = 1; - // The current limit as configured by the server. Useful for debugging, etc. - RateLimit current_limit = 2; - // The limit remaining in the current time unit. - uint32 limit_remaining = 3; - } - - // The overall response code which takes into account all of the descriptors that were passed - // in the RateLimitRequest message. - Code overall_code = 1; - // A list of DescriptorStatus messages which matches the length of the descriptor list passed - // in the RateLimitRequest. This can be used by the caller to determine which individual - // descriptors failed and/or what the currently configured limits are for all of them. - repeated DescriptorStatus statuses = 2; -} \ No newline at end of file diff --git a/src/client_cmd/main.go b/src/client_cmd/main.go index 37d1f5fd0..d65b025ff 100644 --- a/src/client_cmd/main.go +++ b/src/client_cmd/main.go @@ -7,8 +7,8 @@ import ( "os" "strings" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "golang.org/x/net/context" "google.golang.org/grpc" ) diff --git a/src/config/config.go b/src/config/config.go index 70f3b9730..8f94715c9 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -1,8 +1,8 @@ package config import ( - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" stats "github.com/lyft/gostats" "golang.org/x/net/context" ) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index a0ad1ff4f..02c74633c 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" diff --git a/src/limiter/cache.go b/src/limiter/cache.go index 2ca16956f..9408126ca 100644 --- a/src/limiter/cache.go +++ b/src/limiter/cache.go @@ -1,7 +1,7 @@ package limiter import ( - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "golang.org/x/net/context" ) diff --git a/src/limiter/cache_key.go b/src/limiter/cache_key.go index 65540fa22..a06087056 100644 --- a/src/limiter/cache_key.go +++ b/src/limiter/cache_key.go @@ -5,8 +5,8 @@ import ( "strconv" "sync" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" ) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 1beb0de43..ea4c52ee4 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -5,7 +5,7 @@ import ( "math/rand" "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" diff --git a/src/server/server.go b/src/server/server.go index 38520092a..d0570868b 100644 --- a/src/server/server.go +++ b/src/server/server.go @@ -1,7 +1,7 @@ package server import ( - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "net/http" "github.com/lyft/goruntime/loader" diff --git a/src/server/server_impl.go b/src/server/server_impl.go index 8624f37d5..8652bf5d8 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -17,7 +17,7 @@ import ( "net" "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/settings" "github.com/golang/protobuf/jsonpb" diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 1286e5510..08b392d21 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -4,7 +4,7 @@ import ( "strings" "sync" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index e4654783a..638850689 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -1,8 +1,8 @@ package ratelimit import ( - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/golang/protobuf/jsonpb" "github.com/lyft/gostats" "golang.org/x/net/context" @@ -12,7 +12,7 @@ type RateLimitLegacyServiceServer interface { pb_legacy.RateLimitServiceServer } -// legacyService is used to implement ratelimit.proto (https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) +// legacyService is used to implement v2 rls.proto (https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto) // the legacyService receives RateLimitRequests, converts the request, and calls the service's ShouldRateLimit method. type legacyService struct { s *service diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 85c3f8991..ef318bde1 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -10,8 +10,8 @@ import ( "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" @@ -74,9 +74,9 @@ func (runner *Runner) Run() { srv.AddJsonHandler(service) // Ratelimit is compatible with two proto definitions - // 1. data-plane-api rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto + // 1. data-plane-api v3 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto pb.RegisterRateLimitServiceServer(srv.GrpcServer(), service) - // 2. ratelimit.proto defined in this repository: https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto + // 1. data-plane-api v2 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto pb_legacy.RegisterRateLimitServiceServer(srv.GrpcServer(), service.GetLegacyService()) // (1) is the current definition, and (2) is the legacy definition. diff --git a/test/common/common.go b/test/common/common.go index e3796f5de..630161b47 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -3,9 +3,10 @@ package common import ( "sync" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" + pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" ) type TestStatSink struct { @@ -57,11 +58,11 @@ func NewRateLimitRequestLegacy(domain string, descriptors [][][2]string, hitsAdd request := &pb_legacy.RateLimitRequest{} request.Domain = domain for _, descriptor := range descriptors { - newDescriptor := &pb_legacy.RateLimitDescriptor{} + newDescriptor := &pb_struct_legacy.RateLimitDescriptor{} for _, entry := range descriptor { newDescriptor.Entries = append( newDescriptor.Entries, - &pb_legacy.RateLimitDescriptor_Entry{Key: entry[0], Value: entry[1]}) + &pb_struct_legacy.RateLimitDescriptor_Entry{Key: entry[0], Value: entry[1]}) } request.Descriptors = append(request.Descriptors, newDescriptor) } diff --git a/test/config/config_test.go b/test/config/config_test.go index 791cc1098..a2e1d204c 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -4,8 +4,8 @@ import ( "io/ioutil" "testing" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/lyft/gostats" "github.com/stretchr/testify/assert" diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 9c9f01ea0..22582e2c4 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -14,8 +14,8 @@ import ( "testing" "time" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/service_cmd/runner" "github.com/envoyproxy/ratelimit/test/common" "github.com/stretchr/testify/assert" @@ -36,11 +36,11 @@ func newDescriptorStatus( func newDescriptorStatusLegacy( status pb_legacy.RateLimitResponse_Code, requestsPerUnit uint32, - unit pb_legacy.RateLimit_Unit, limitRemaining uint32) *pb_legacy.RateLimitResponse_DescriptorStatus { + unit pb_legacy.RateLimitResponse_RateLimit_Unit, limitRemaining uint32) *pb_legacy.RateLimitResponse_DescriptorStatus { return &pb_legacy.RateLimitResponse_DescriptorStatus{ Code: status, - CurrentLimit: &pb_legacy.RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, + CurrentLimit: &pb_legacy.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, LimitRemaining: limitRemaining, } } @@ -426,7 +426,7 @@ func TestBasicConfigLegacy(t *testing.T) { &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OK, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimit_SECOND, 49)}}, + newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimitResponse_RateLimit_SECOND, 49)}}, response) assert.NoError(err) @@ -450,7 +450,7 @@ func TestBasicConfigLegacy(t *testing.T) { &pb_legacy.RateLimitResponse{ OverallCode: status, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimit_MINUTE, limitRemaining)}}, + newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimitResponse_RateLimit_MINUTE, limitRemaining)}}, response) assert.NoError(err) } @@ -478,8 +478,8 @@ func TestBasicConfigLegacy(t *testing.T) { &pb_legacy.RateLimitResponse{ OverallCode: status, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimit_MINUTE, limitRemaining1), - newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimit_HOUR, limitRemaining2)}}, + newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimitResponse_RateLimit_MINUTE, limitRemaining1), + newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}}, response) assert.NoError(err) } diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index 044b55ec9..6205f7c9c 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -6,7 +6,7 @@ package mock_config import ( context "context" - ratelimit "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + ratelimit "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" stats "github.com/lyft/gostats" diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go index f5c9f8bfa..53c069232 100644 --- a/test/mocks/limiter/limiter.go +++ b/test/mocks/limiter/limiter.go @@ -6,7 +6,7 @@ package mock_limiter import ( context "context" - v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" reflect "reflect" diff --git a/test/mocks/rls/rls.go b/test/mocks/rls/rls.go index 77cd49ae9..6be8fda9c 100644 --- a/test/mocks/rls/rls.go +++ b/test/mocks/rls/rls.go @@ -6,7 +6,7 @@ package mock_v2 import ( context "context" - v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" gomock "github.com/golang/mock/gomock" reflect "reflect" ) diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index b268de996..8945f8706 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index 56811fb9c..7c7951e19 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -6,7 +6,7 @@ import ( "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go index de058ecbc..b56465218 100644 --- a/test/server/server_impl_test.go +++ b/test/server/server_impl_test.go @@ -8,7 +8,7 @@ import ( "strings" "testing" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/server" mock_v2 "github.com/envoyproxy/ratelimit/test/mocks/rls" diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index 71689a6af..5de224701 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -3,9 +3,10 @@ package ratelimit_test import ( "testing" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" + pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/service" @@ -17,7 +18,7 @@ import ( "golang.org/x/net/context" ) -func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.RateLimit, error) { +func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.RateLimitResponse_RateLimit, error) { if ratelimit == nil { return nil, nil } @@ -28,7 +29,7 @@ func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.Rat return nil, err } - rl := &pb_legacy.RateLimit{} + rl := &pb_legacy.RateLimitResponse_RateLimit{} err = jsonpb.UnmarshalString(s, rl) if err != nil { return nil, err @@ -37,12 +38,12 @@ func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.Rat return rl, nil } -func convertRatelimits(ratelimits []*config.RateLimit) ([]*pb_legacy.RateLimit, error) { +func convertRatelimits(ratelimits []*config.RateLimit) ([]*pb_legacy.RateLimitResponse_RateLimit, error) { if ratelimits == nil { return nil, nil } - ret := make([]*pb_legacy.RateLimit, 0) + ret := make([]*pb_legacy.RateLimitResponse_RateLimit, 0) for _, rl := range ratelimits { if rl == nil { ret = append(ret, nil) @@ -266,7 +267,7 @@ func TestConvertLegacyRequest(test *testing.T) { { request := &pb_legacy.RateLimitRequest{ Domain: "test", - Descriptors: []*pb_legacy.RateLimitDescriptor{}, + Descriptors: []*pb_struct_legacy.RateLimitDescriptor{}, HitsAddend: 10, } @@ -285,9 +286,9 @@ func TestConvertLegacyRequest(test *testing.T) { } { - descriptors := []*pb_legacy.RateLimitDescriptor{ + descriptors := []*pb_struct_legacy.RateLimitDescriptor{ { - Entries: []*pb_legacy.RateLimitDescriptor_Entry{ + Entries: []*pb_struct_legacy.RateLimitDescriptor_Entry{ { Key: "foo", Value: "foo_value", @@ -296,7 +297,7 @@ func TestConvertLegacyRequest(test *testing.T) { }, }, { - Entries: []*pb_legacy.RateLimitDescriptor_Entry{}, + Entries: []*pb_struct_legacy.RateLimitDescriptor_Entry{}, }, { Entries: nil, @@ -375,9 +376,9 @@ func TestConvertResponse(test *testing.T) { Statuses: statuses, } - expectedRl := &pb_legacy.RateLimit{ + expectedRl := &pb_legacy.RateLimitResponse_RateLimit{ RequestsPerUnit: 10, - Unit: pb_legacy.RateLimit_DAY, + Unit: pb_legacy.RateLimitResponse_RateLimit_DAY, } expectedStatuses := []*pb_legacy.RateLimitResponse_DescriptorStatus{ diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index a545f4f2e..5c862fbe5 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -4,7 +4,7 @@ import ( "sync" "testing" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" ratelimit "github.com/envoyproxy/ratelimit/src/service" From af7b2343fea1809a3e126c87bd9721bf0efda9fc Mon Sep 17 00:00:00 2001 From: Petr Pchelko Date: Mon, 13 Jul 2020 16:50:13 -0700 Subject: [PATCH 13/41] Followups to v3 upgrade (#155) - Regenerate mocks based on new default protocol - Manually transform v2 messages to v3 messages - some of the fields were renamed thus json Marshal/Unmarshal does not work anymore - Added tests that verify conversion v2<->v3 works for headers fields - Update tests to use proto.Equal - simple assert.Equals might not work correctly for protobuf messages. Signed-off-by: Petr Pchelko Signed-off-by: Diego Erdody --- src/service/ratelimit_legacy.go | 94 ++++++++++++++++++++------- test/common/common.go | 8 +++ test/integration/integration_test.go | 15 +++-- test/mocks/config/config.go | 4 +- test/mocks/limiter/limiter.go | 6 +- test/mocks/mocks.go | 2 +- test/mocks/rls/rls.go | 12 ++-- test/server/server_impl_test.go | 31 ++++----- test/service/ratelimit_legacy_test.go | 69 +++++++++++++------- test/service/ratelimit_test.go | 9 ++- 10 files changed, 166 insertions(+), 84 deletions(-) diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index 638850689..e8ecb98a6 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -1,9 +1,10 @@ package ratelimit import ( + core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/golang/protobuf/jsonpb" "github.com/lyft/gostats" "golang.org/x/net/context" ) @@ -62,20 +63,32 @@ func ConvertLegacyRequest(legacyRequest *pb_legacy.RateLimitRequest) (*pb.RateLi if legacyRequest == nil { return nil, nil } - - m := &jsonpb.Marshaler{} - s, err := m.MarshalToString(legacyRequest) - if err != nil { - return nil, err + request := &pb.RateLimitRequest{ + Domain: legacyRequest.GetDomain(), + HitsAddend: legacyRequest.GetHitsAddend(), } - - req := &pb.RateLimitRequest{} - err = jsonpb.UnmarshalString(s, req) - if err != nil { - return nil, err + if legacyRequest.GetDescriptors() != nil { + descriptors := make([]*pb_struct.RateLimitDescriptor, len(legacyRequest.GetDescriptors())) + for i, descriptor := range legacyRequest.GetDescriptors() { + if descriptor != nil { + descriptors[i] = &pb_struct.RateLimitDescriptor{} + if descriptor.GetEntries() != nil { + entries := make([]*pb_struct.RateLimitDescriptor_Entry, len(descriptor.GetEntries())) + for j, entry := range descriptor.GetEntries() { + if entry != nil { + entries[j] = &pb_struct.RateLimitDescriptor_Entry{ + Key: entry.GetKey(), + Value: entry.GetValue(), + } + } + } + descriptors[i].Entries = entries + } + } + } + request.Descriptors = descriptors } - - return req, nil + return request, nil } func ConvertResponse(response *pb.RateLimitResponse) (*pb_legacy.RateLimitResponse, error) { @@ -83,17 +96,54 @@ func ConvertResponse(response *pb.RateLimitResponse) (*pb_legacy.RateLimitRespon return nil, nil } - m := &jsonpb.Marshaler{} - s, err := m.MarshalToString(response) - if err != nil { - return nil, err + legacyResponse := &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_Code(response.GetOverallCode()), } - resp := &pb_legacy.RateLimitResponse{} - err = jsonpb.UnmarshalString(s, resp) - if err != nil { - return nil, err + if response.GetStatuses() != nil { + statuses := make([]*pb_legacy.RateLimitResponse_DescriptorStatus, len(response.GetStatuses())) + for i, status := range response.GetStatuses() { + if status != nil { + statuses[i] = &pb_legacy.RateLimitResponse_DescriptorStatus{ + Code: pb_legacy.RateLimitResponse_Code(status.GetCode()), + LimitRemaining: status.GetLimitRemaining(), + } + if status.GetCurrentLimit() != nil { + statuses[i].CurrentLimit = &pb_legacy.RateLimitResponse_RateLimit{ + RequestsPerUnit: status.GetCurrentLimit().GetRequestsPerUnit(), + Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(status.GetCurrentLimit().GetUnit()), + } + } + } + } + legacyResponse.Statuses = statuses + } + + if response.GetRequestHeadersToAdd() != nil { + requestHeadersToAdd := make([]*core_legacy.HeaderValue, len(response.GetRequestHeadersToAdd())) + for i, header := range response.GetRequestHeadersToAdd() { + if header != nil { + requestHeadersToAdd[i] = &core_legacy.HeaderValue{ + Key: header.GetKey(), + Value: header.GetValue(), + } + } + } + legacyResponse.RequestHeadersToAdd = requestHeadersToAdd + } + + if response.GetResponseHeadersToAdd() != nil { + responseHeadersToAdd := make([]*core_legacy.HeaderValue, len(response.GetResponseHeadersToAdd())) + for i, header := range response.GetResponseHeadersToAdd() { + if header != nil { + responseHeadersToAdd[i] = &core_legacy.HeaderValue{ + Key: header.GetKey(), + Value: header.GetValue(), + } + } + } + legacyResponse.Headers = responseHeadersToAdd } - return resp, nil + return legacyResponse, nil } diff --git a/test/common/common.go b/test/common/common.go index 630161b47..b15c41d5c 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -1,6 +1,9 @@ package common import ( + "fmt" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" "sync" pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" @@ -69,3 +72,8 @@ func NewRateLimitRequestLegacy(domain string, descriptors [][][2]string, hitsAdd request.HitsAddend = hitsAddend return request } + +func AssertProtoEqual(assert *assert.Assertions, expected proto.Message, actual proto.Message) { + assert.True(proto.Equal(expected, actual), + fmt.Sprintf("These two protobuf messages are not equal:\nexpected: %v\nactual: %v", expected, actual)) +} diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 22582e2c4..0f01ac985 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -166,7 +166,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu response, err := c.ShouldRateLimit( context.Background(), common.NewRateLimitRequest("foo", [][][2]string{{{getCacheKey("hello", enable_local_cache), "world"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, @@ -184,7 +185,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu response, err = c.ShouldRateLimit( context.Background(), common.NewRateLimitRequest("basic", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ @@ -224,7 +226,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu limitRemaining = 0 } - assert.Equal( + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ @@ -287,7 +290,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu limitRemaining2 = 0 } - assert.Equal( + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ @@ -384,7 +388,8 @@ func TestBasicConfigLegacy(t *testing.T) { response, err := c.ShouldRateLimit( context.Background(), common.NewRateLimitRequestLegacy("foo", [][][2]string{{{"hello", "world"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OK, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index 6205f7c9c..38d5b347b 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -6,7 +6,7 @@ package mock_config import ( context "context" - ratelimit "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + envoy_extensions_common_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" stats "github.com/lyft/gostats" @@ -51,7 +51,7 @@ func (mr *MockRateLimitConfigMockRecorder) Dump() *gomock.Call { } // GetLimit mocks base method -func (m *MockRateLimitConfig) GetLimit(arg0 context.Context, arg1 string, arg2 *ratelimit.RateLimitDescriptor) *config.RateLimit { +func (m *MockRateLimitConfig) GetLimit(arg0 context.Context, arg1 string, arg2 *envoy_extensions_common_ratelimit_v3.RateLimitDescriptor) *config.RateLimit { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLimit", arg0, arg1, arg2) ret0, _ := ret[0].(*config.RateLimit) diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go index 53c069232..7e9f3e5b3 100644 --- a/test/mocks/limiter/limiter.go +++ b/test/mocks/limiter/limiter.go @@ -6,7 +6,7 @@ package mock_limiter import ( context "context" - v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" reflect "reflect" @@ -36,10 +36,10 @@ func (m *MockRateLimitCache) EXPECT() *MockRateLimitCacheMockRecorder { } // DoLimit mocks base method -func (m *MockRateLimitCache) DoLimit(arg0 context.Context, arg1 *v2.RateLimitRequest, arg2 []*config.RateLimit) []*v2.RateLimitResponse_DescriptorStatus { +func (m *MockRateLimitCache) DoLimit(arg0 context.Context, arg1 *envoy_service_ratelimit_v3.RateLimitRequest, arg2 []*config.RateLimit) []*envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DoLimit", arg0, arg1, arg2) - ret0, _ := ret[0].([]*v2.RateLimitResponse_DescriptorStatus) + ret0, _ := ret[0].([]*envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus) return ret0 } diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go index 703865af0..9f8b18cec 100644 --- a/test/mocks/mocks.go +++ b/test/mocks/mocks.go @@ -5,4 +5,4 @@ package mocks //go:generate go run github.com/golang/mock/mockgen -destination ./config/config.go github.com/envoyproxy/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader //go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis Client //go:generate go run github.com/golang/mock/mockgen -destination ./limiter/limiter.go github.com/envoyproxy/ratelimit/src/limiter RateLimitCache,TimeSource,JitterRandSource -//go:generate go run github.com/golang/mock/mockgen -destination ./rls/rls.go github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2 RateLimitServiceServer +//go:generate go run github.com/golang/mock/mockgen -destination ./rls/rls.go github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3 RateLimitServiceServer diff --git a/test/mocks/rls/rls.go b/test/mocks/rls/rls.go index 6be8fda9c..92d79b9ab 100644 --- a/test/mocks/rls/rls.go +++ b/test/mocks/rls/rls.go @@ -1,12 +1,12 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2 (interfaces: RateLimitServiceServer) +// Source: github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3 (interfaces: RateLimitServiceServer) -// Package mock_v2 is a generated GoMock package. -package mock_v2 +// Package mock_v3 is a generated GoMock package. +package mock_v3 import ( context "context" - v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" gomock "github.com/golang/mock/gomock" reflect "reflect" ) @@ -35,10 +35,10 @@ func (m *MockRateLimitServiceServer) EXPECT() *MockRateLimitServiceServerMockRec } // ShouldRateLimit mocks base method -func (m *MockRateLimitServiceServer) ShouldRateLimit(arg0 context.Context, arg1 *v2.RateLimitRequest) (*v2.RateLimitResponse, error) { +func (m *MockRateLimitServiceServer) ShouldRateLimit(arg0 context.Context, arg1 *envoy_service_ratelimit_v3.RateLimitRequest) (*envoy_service_ratelimit_v3.RateLimitResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ShouldRateLimit", arg0, arg1) - ret0, _ := ret[0].(*v2.RateLimitResponse) + ret0, _ := ret[0].(*envoy_service_ratelimit_v3.RateLimitResponse) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go index b56465218..8ee221610 100644 --- a/test/server/server_impl_test.go +++ b/test/server/server_impl_test.go @@ -2,6 +2,8 @@ package server_test import ( "fmt" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/mock" "io/ioutil" "net/http" "net/http/httptest" @@ -11,7 +13,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/server" - mock_v2 "github.com/envoyproxy/ratelimit/test/mocks/rls" + mock_v3 "github.com/envoyproxy/ratelimit/test/mocks/rls" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" ) @@ -41,8 +43,13 @@ func TestJsonHandler(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - rls := mock_v2.NewMockRateLimitServiceServer(controller) + rls := mock_v3.NewMockRateLimitServiceServer(controller) handler := server.NewJsonHandler(rls) + requestMatcher := mock.MatchedBy(func(req *pb.RateLimitRequest) bool { + return proto.Equal(req, &pb.RateLimitRequest{ + Domain: "foo", + }) + }) // Missing request body assertHttpResponse(t, handler, "", 400, "text/plain; charset=utf-8", "EOF\n") @@ -51,35 +58,25 @@ func TestJsonHandler(t *testing.T) { assertHttpResponse(t, handler, "}", 400, "text/plain; charset=utf-8", "invalid character '}' looking for beginning of value\n") // Unknown response code - rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ - Domain: "foo", - }).Return(&pb.RateLimitResponse{}, nil) + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(&pb.RateLimitResponse{}, nil) assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "application/json", "{}") // ratelimit service error - rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ - Domain: "foo", - }).Return(nil, fmt.Errorf("some error")) + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(nil, fmt.Errorf("some error")) assertHttpResponse(t, handler, `{"domain": "foo"}`, 400, "text/plain; charset=utf-8", "some error\n") // json unmarshaling error - rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ - Domain: "foo", - }).Return(nil, nil) + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(nil, nil) assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "text/plain; charset=utf-8", "error marshaling proto3 to json: Marshal called with nil\n") // successful request, not rate limited - rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ - Domain: "foo", - }).Return(&pb.RateLimitResponse{ + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(&pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, }, nil) assertHttpResponse(t, handler, `{"domain": "foo"}`, 200, "application/json", `{"overallCode":"OK"}`) // successful request, rate limited - rls.EXPECT().ShouldRateLimit(nil, &pb.RateLimitRequest{ - Domain: "foo", - }).Return(&pb.RateLimitResponse{ + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(&pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OVER_LIMIT, }, nil) assertHttpResponse(t, handler, `{"domain": "foo"}`, 429, "application/json", `{"overallCode":"OVER_LIMIT"}`) diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index 5de224701..d8839b715 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -3,7 +3,9 @@ package ratelimit_test import ( "testing" + core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" @@ -12,7 +14,6 @@ import ( "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/mock/gomock" - "github.com/golang/protobuf/jsonpb" "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" @@ -23,19 +24,10 @@ func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.Rat return nil, nil } - m := &jsonpb.Marshaler{} - s, err := m.MarshalToString(ratelimit) - if err != nil { - return nil, err - } - - rl := &pb_legacy.RateLimitResponse_RateLimit{} - err = jsonpb.UnmarshalString(s, rl) - if err != nil { - return nil, err - } - - return rl, nil + return &pb_legacy.RateLimitResponse_RateLimit{ + RequestsPerUnit: ratelimit.GetRequestsPerUnit(), + Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(ratelimit.GetUnit()), + }, nil } func convertRatelimits(ratelimits []*config.RateLimit) ([]*pb_legacy.RateLimitResponse_RateLimit, error) { @@ -75,7 +67,8 @@ func TestServiceLegacy(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) response, err := service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OK, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, @@ -112,7 +105,8 @@ func TestServiceLegacy(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) response, err = service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ @@ -147,7 +141,8 @@ func TestServiceLegacy(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}}) response, err = service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ @@ -261,7 +256,7 @@ func TestConvertLegacyRequest(test *testing.T) { assert.FailNow(test, err.Error()) } - assert.Equal(test, expectedRequest, req) + common.AssertProtoEqual(assert.New(test), expectedRequest, req) } { @@ -282,7 +277,7 @@ func TestConvertLegacyRequest(test *testing.T) { assert.FailNow(test, err.Error()) } - assert.Equal(test, expectedRequest, req) + common.AssertProtoEqual(assert.New(test), expectedRequest, req) } { @@ -341,7 +336,7 @@ func TestConvertLegacyRequest(test *testing.T) { assert.FailNow(test, err.Error()) } - assert.Equal(test, expectedRequest, req) + common.AssertProtoEqual(assert.New(test), expectedRequest, req) } } @@ -371,9 +366,21 @@ func TestConvertResponse(test *testing.T) { }, } + requestHeadersToAdd := []*core.HeaderValue{{ + Key: "test_request", + Value: "test_request_value", + }, nil} + + responseHeadersToAdd := []*core.HeaderValue{{ + Key: "test_response", + Value: "test_response", + }, nil} + response := &pb.RateLimitResponse{ - OverallCode: pb.RateLimitResponse_OVER_LIMIT, - Statuses: statuses, + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + Statuses: statuses, + RequestHeadersToAdd: requestHeadersToAdd, + ResponseHeadersToAdd: responseHeadersToAdd, } expectedRl := &pb_legacy.RateLimitResponse_RateLimit{ @@ -395,9 +402,21 @@ func TestConvertResponse(test *testing.T) { }, } + expectedRequestHeadersToAdd := []*core_legacy.HeaderValue{{ + Key: "test_request", + Value: "test_request_value", + }, nil} + + expecpectedResponseHeadersToAdd := []*core_legacy.HeaderValue{{ + Key: "test_response", + Value: "test_response", + }, nil} + expectedResponse := &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, - Statuses: expectedStatuses, + OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, + Statuses: expectedStatuses, + RequestHeadersToAdd: expectedRequestHeadersToAdd, + Headers: expecpectedResponseHeadersToAdd, } resp, err = ratelimit.ConvertResponse(response) @@ -405,5 +424,5 @@ func TestConvertResponse(test *testing.T) { assert.FailNow(test, err.Error()) } - assert.Equal(test, expectedResponse, resp) + common.AssertProtoEqual(assert.New(test), expectedResponse, resp) } diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 5c862fbe5..12c77926a 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -97,7 +97,8 @@ func TestService(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) response, err := service.ShouldRateLimit(nil, request) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, @@ -124,7 +125,8 @@ func TestService(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) response, err = service.ShouldRateLimit(nil, request) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OVER_LIMIT, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ @@ -154,7 +156,8 @@ func TestService(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}}) response, err = service.ShouldRateLimit(nil, request) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OVER_LIMIT, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ From 6e8dda765b1559a6eba51e04e859ddf21e60e65d Mon Sep 17 00:00:00 2001 From: David Weitzman Date: Mon, 13 Jul 2020 16:52:52 -0700 Subject: [PATCH 14/41] Introduce a Dockerfile for running integration tests (#156) This diff creates Dockerfile.integration for running integration tests with clearly-defined dependencies. Previously the dependencies of the integration tests were defined within the github actions config. The new "make docker_tests" target should work for any developer with Docker installed. Previously there was no single command that would run integration tests across platforms, which makes development and onboarding harder. Even copying the command from github actions wouldn't have worked before, since that command quietly assumed that redis was already running on port 6379. Signed-off-by: David Weitzman Signed-off-by: Diego Erdody --- .github/workflows/master.yaml | 10 +--------- .github/workflows/pullrequest.yaml | 10 +--------- .github/workflows/release.yaml | 10 +--------- Dockerfile.integration | 17 +++++++++++++++++ Makefile | 18 +++++++++++++++++- 5 files changed, 37 insertions(+), 28 deletions(-) create mode 100644 Dockerfile.integration diff --git a/.github/workflows/master.yaml b/.github/workflows/master.yaml index 5d323a3d7..a3bd5867a 100644 --- a/.github/workflows/master.yaml +++ b/.github/workflows/master.yaml @@ -17,18 +17,10 @@ jobs: steps: - uses: actions/checkout@v2 - - name: deps - run: sudo apt-get update -y && sudo apt-get install stunnel4 redis -y - - name: build and push docker image run: | - redis-server --port 6380 & - redis-server --port 6381 --requirepass password123 & - redis-server --port 6382 --requirepass password123 & - redis-server --port 6384 --requirepass password123 & - redis-server --port 6385 --requirepass password123 & echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - make bootstrap bootstrap_redis_tls docker_push + make docker_push env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml index 2e22c6d8a..da03fd13d 100644 --- a/.github/workflows/pullrequest.yaml +++ b/.github/workflows/pullrequest.yaml @@ -16,14 +16,6 @@ jobs: steps: - uses: actions/checkout@v2 - - name: deps - run: sudo apt-get update -y && sudo apt-get install stunnel4 redis -y - - name: build and test run: | - redis-server --port 6380 & - redis-server --port 6381 --requirepass password123 & - redis-server --port 6382 --requirepass password123 & - redis-server --port 6384 --requirepass password123 & - redis-server --port 6385 --requirepass password123 & - make bootstrap bootstrap_redis_tls tests_unit tests \ No newline at end of file + make docker_tests diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 28569b0a5..e2349f992 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,18 +17,10 @@ jobs: steps: - uses: actions/checkout@v2 - - name: deps - run: sudo apt-get update -y && sudo apt-get install stunnel4 redis -y - - name: build and push docker image run: | - redis-server --port 6380 & - redis-server --port 6381 --requirepass password123 & - redis-server --port 6382 --requirepass password123 & - redis-server --port 6384 --requirepass password123 & - redis-server --port 6385 --requirepass password123 & echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - make bootstrap bootstrap_redis_tls docker_push + make docker_push env: DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} \ No newline at end of file diff --git a/Dockerfile.integration b/Dockerfile.integration new file mode 100644 index 000000000..efff81438 --- /dev/null +++ b/Dockerfile.integration @@ -0,0 +1,17 @@ +# Running this docker image runs the integration tests. +FROM golang:1.14 + +RUN apt-get update -y && apt-get install sudo stunnel4 redis -y && rm -rf /var/lib/apt/lists/* + +WORKDIR /workdir + +ENV GOPROXY=https://proxy.golang.org +COPY go.mod go.sum /workdir/ +RUN go mod download + +COPY Makefile /workdir +RUN make bootstrap + +COPY src /workdir/src +COPY test /workdir/test +CMD make tests_with_redis diff --git a/Makefile b/Makefile index 038300a2a..b66a67a79 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,7 @@ export GO111MODULE=on PROJECT = ratelimit REGISTRY ?= envoyproxy IMAGE := $(REGISTRY)/$(PROJECT) +INTEGRATION_IMAGE := $(REGISTRY)/$(PROJECT)_integration MODULE = github.com/envoyproxy/ratelimit GIT_REF = $(shell git describe --tags || git rev-parse --short=8 --verify HEAD) VERSION ?= $(GIT_REF) @@ -70,8 +71,23 @@ tests_unit: compile tests: compile go test -race -tags=integration $(MODULE)/... +.PHONY: tests_with_redis +tests_with_redis: bootstrap_redis_tls tests_unit + redis-server --port 6379 & + redis-server --port 6380 & + redis-server --port 6381 --requirepass password123 & + redis-server --port 6382 --requirepass password123 & + redis-server --port 6384 --requirepass password123 & + redis-server --port 6385 --requirepass password123 & + go test -race -tags=integration $(MODULE)/... + +.PHONY: docker_tests +docker_tests: + docker build -f Dockerfile.integration . -t $(INTEGRATION_IMAGE):$(VERSION) && \ + docker run $$(tty -s && echo "-it" || echo) $(INTEGRATION_IMAGE):$(VERSION) + .PHONY: docker_image -docker_image: tests +docker_image: docker_tests docker build . -t $(IMAGE):$(VERSION) .PHONY: docker_push From 9281cc09613a27f46d8534d6778ea86d2c8c9ade Mon Sep 17 00:00:00 2001 From: Petr Pchelko Date: Tue, 14 Jul 2020 12:40:41 -0700 Subject: [PATCH 15/41] Add support for rate limit overrides. (#158) Fixes #154 Signed-off-by: Petr Pchelko Signed-off-by: Diego Erdody --- go.mod | 9 ++-- go.sum | 37 +++++++++++++ src/config/config_impl.go | 37 ++++++++++--- src/service/ratelimit_legacy.go | 1 + test/config/config_test.go | 76 +++++++++++++++++++++++++++ test/integration/integration_test.go | 15 ++++-- test/service/ratelimit_legacy_test.go | 1 + 7 files changed, 161 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 90a8cb84b..02d83c2b7 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,12 @@ go 1.14 require ( github.com/alicebob/miniredis/v2 v2.11.4 github.com/cespare/xxhash v1.1.0 // indirect + github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 // indirect github.com/coocood/freecache v1.1.0 - github.com/envoyproxy/go-control-plane v0.9.5 + github.com/envoyproxy/go-control-plane v0.9.6 github.com/gogo/protobuf v1.3.1 // indirect github.com/golang/mock v1.4.1 - github.com/golang/protobuf v1.3.2 + github.com/golang/protobuf v1.4.2 github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 github.com/kavu/go_reuseport v1.2.0 github.com/kelseyhightower/envconfig v1.1.0 @@ -26,8 +27,8 @@ require ( golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb // indirect - google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f // indirect - google.golang.org/grpc v1.25.1 + google.golang.org/grpc v1.27.0 + google.golang.org/protobuf v1.25.0 // indirect gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect gopkg.in/yaml.v2 v2.3.0 diff --git a/go.sum b/go.sum index c8b595dc3..2951b438a 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,11 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa v0.0.0-20200629203442-efcf912fb354 h1:JBAT2dkeyeqzQOaAA8tB21Zfyv/nHfaqjZvWIllABnw= github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU= github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 h1:9kRtNpqLHbZVO/NNxhHp2ymxFxsHOe3x2efJGn//Tas= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA= github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -24,8 +27,13 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.6.9 h1:deEH9W8ZAUGNbCdX+9iNzBOGrAOrnpJGoy0PcTqk/tE= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU= github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= +github.com/envoyproxy/go-control-plane v0.9.6-0.20200630214754-219d0fe20d5e h1:C4C1u9L0TNuvG9Se3g5DdqplIjf4qy7EqzUrOr8RCVI= +github.com/envoyproxy/go-control-plane v0.9.6-0.20200630214754-219d0fe20d5e/go.mod h1:JvuSsUgXzeWfLVfAe9OeW40eBtd+E8yMydqNm0iuBxs= +github.com/envoyproxy/go-control-plane v0.9.6 h1:GgblEiDzxf5ajlAZY4aC8xp7DwkrGfauFNMGdB2bBv0= +github.com/envoyproxy/go-control-plane v0.9.6/go.mod h1:GFqM7v0B62MraO4PWRedIbhThr/Rf7ev6aHOOPXeaDA= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= @@ -40,8 +48,21 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= @@ -136,17 +157,33 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+y golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f h1:0RYv5T9ZdroAqqfM2taEB0nJrArv0X1JpIdgUmY4xg8= google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/src/config/config_impl.go b/src/config/config_impl.go index 02c74633c..e19b5ce09 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -39,7 +39,8 @@ type rateLimitDomain struct { } type rateLimitConfigImpl struct { - domains map[string]*rateLimitDomain + domains map[string]*rateLimitDomain + statsScope stats.Scope } var validKeys = map[string]bool{ @@ -197,8 +198,7 @@ func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]i // Load a single YAML config file into the global config. // @param config specifies the file contents to load. -// @param statsScope supplies the owning scope. -func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad, statsScope stats.Scope) { +func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { // validate keys in config with generic map any := map[interface{}]interface{}{} err := yaml.Unmarshal([]byte(config.FileBytes), &any) @@ -228,10 +228,24 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad, statsS logger.Debugf("loading domain: %s", root.Domain) newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil}} - newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, statsScope) + newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsScope) this.domains[root.Domain] = newDomain } +func (this *rateLimitConfigImpl) descriptorToKey(descriptor *pb_struct.RateLimitDescriptor) string { + rateLimitKey := "" + for _, entry := range descriptor.Entries { + if rateLimitKey != "" { + rateLimitKey += "." + } + rateLimitKey += entry.Key + if entry.Value != "" { + rateLimitKey += "_" + entry.Value + } + } + return rateLimitKey +} + func (this *rateLimitConfigImpl) Dump() string { ret := "" for _, domain := range this.domains { @@ -252,6 +266,17 @@ func (this *rateLimitConfigImpl) GetLimit( return rateLimit } + if descriptor.GetLimit() != nil { + rateLimitKey := domain + "." + this.descriptorToKey(descriptor) + rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit()) + rateLimit = NewRateLimit( + descriptor.GetLimit().GetRequestsPerUnit(), + rateLimitOverrideUnit, + rateLimitKey, + this.statsScope) + return rateLimit + } + descriptorsMap := value.descriptors for i, entry := range descriptor.Entries { // First see if key_value is in the map. If that isn't in the map we look for just key @@ -292,9 +317,9 @@ func (this *rateLimitConfigImpl) GetLimit( func NewRateLimitConfigImpl( configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig { - ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}} + ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsScope} for _, config := range configs { - ret.loadConfig(config, statsScope) + ret.loadConfig(config) } return ret diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index e8ecb98a6..17112675c 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -110,6 +110,7 @@ func ConvertResponse(response *pb.RateLimitResponse) (*pb_legacy.RateLimitRespon } if status.GetCurrentLimit() != nil { statuses[i].CurrentLimit = &pb_legacy.RateLimitResponse_RateLimit{ + Name: status.GetCurrentLimit().GetName(), RequestsPerUnit: status.GetCurrentLimit().GetRequestsPerUnit(), Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(status.GetCurrentLimit().GetUnit()), } diff --git a/test/config/config_test.go b/test/config/config_test.go index a2e1d204c..966381658 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -1,11 +1,13 @@ package config_test import ( + "github.com/envoyproxy/ratelimit/test/common" "io/ioutil" "testing" pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" "github.com/envoyproxy/ratelimit/src/config" "github.com/lyft/gostats" "github.com/stretchr/testify/assert" @@ -150,6 +152,80 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key4.near_limit").Value()) } +func TestConfigLimitOverride(t *testing.T) { + assert := assert.New(t) + stats := stats.NewStore(stats.NewNullSink(), false) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), stats) + rlConfig.Dump() + // No matching domain + assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{ + Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ + RequestsPerUnit: 10, Unit: pb_type.RateLimitUnit_DAY, + }, + })) + rl := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something"}}, + Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ + RequestsPerUnit: 10, Unit: pb_type.RateLimitUnit_DAY, + }, + }) + assert.Equal("test-domain.key1_value1.subkey1_something", rl.FullKey) + common.AssertProtoEqual(assert, &pb.RateLimitResponse_RateLimit{ + RequestsPerUnit: 10, + Unit: pb.RateLimitResponse_RateLimit_DAY, + }, rl.Limit) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) + + // Change in override value doesn't erase stats + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something"}}, + Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ + RequestsPerUnit: 42, Unit: pb_type.RateLimitUnit_HOUR, + }, + }) + assert.Equal("test-domain.key1_value1.subkey1_something", rl.FullKey) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + common.AssertProtoEqual(assert, &pb.RateLimitResponse_RateLimit{ + RequestsPerUnit: 42, + Unit: pb.RateLimitResponse_RateLimit_HOUR, + }, rl.Limit) + assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) + assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) + assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) + + // Different value creates a different counter + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something_else"}}, + Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ + RequestsPerUnit: 42, Unit: pb_type.RateLimitUnit_HOUR, + }, + }) + assert.Equal("test-domain.key1_value1.subkey1_something_else", rl.FullKey) + common.AssertProtoEqual(assert, &pb.RateLimitResponse_RateLimit{ + RequestsPerUnit: 42, + Unit: pb.RateLimitResponse_RateLimit_HOUR, + }, rl.Limit) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.near_limit").Value()) +} + func expectConfigPanic(t *testing.T, call func(), expectedError string) { assert := assert.New(t) defer func() { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 0f01ac985..d56e29873 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -427,7 +427,8 @@ func TestBasicConfigLegacy(t *testing.T) { response, err = c.ShouldRateLimit( context.Background(), common.NewRateLimitRequestLegacy("basic_legacy", [][][2]string{{{"key1", "foo"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OK, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ @@ -451,7 +452,8 @@ func TestBasicConfigLegacy(t *testing.T) { limitRemaining = 0 } - assert.Equal( + common.AssertProtoEqual( + assert, &pb_legacy.RateLimitResponse{ OverallCode: status, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ @@ -479,7 +481,8 @@ func TestBasicConfigLegacy(t *testing.T) { limitRemaining2 = 0 } - assert.Equal( + common.AssertProtoEqual( + assert, &pb_legacy.RateLimitResponse{ OverallCode: status, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ @@ -523,7 +526,8 @@ func testConfigReload(grpcPort, perSecond string, local_cache_size string) func( response, err := c.ShouldRateLimit( context.Background(), common.NewRateLimitRequest("reload", [][][2]string{{{getCacheKey("block", enable_local_cache), "foo"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK}}}, @@ -577,7 +581,8 @@ func testConfigReload(grpcPort, perSecond string, local_cache_size string) func( response, err = c.ShouldRateLimit( context.Background(), common.NewRateLimitRequest("reload", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index d8839b715..a51ddbe90 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -25,6 +25,7 @@ func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.Rat } return &pb_legacy.RateLimitResponse_RateLimit{ + Name: ratelimit.GetName(), RequestsPerUnit: ratelimit.GetRequestsPerUnit(), Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(ratelimit.GetUnit()), }, nil From e293fb3f67f06df7790fc71c158471de34c6af1a Mon Sep 17 00:00:00 2001 From: Tong Cai Date: Wed, 12 Aug 2020 05:08:49 +0800 Subject: [PATCH 16/41] redis client: default to use explicit pipelining (#163) Signed-off-by: Tong Cai Signed-off-by: Diego Erdody --- README.md | 7 +- go.sum | 1 + src/redis/cache_impl.go | 31 ++-- src/redis/driver.go | 23 +++ src/redis/driver_impl.go | 45 ++++-- src/settings/settings.go | 8 +- test/mocks/redis/redis.go | 48 +++++++ test/redis/cache_impl_test.go | 260 +++++++++++++++++++++++----------- 8 files changed, 317 insertions(+), 106 deletions(-) diff --git a/README.md b/README.md index 944e66b24..e2f45e36f 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ - [Debug Port](#debug-port) - [Local Cache](#local-cache) - [Redis](#redis) + - [Pipelining](#pipelining) - [One Redis Instance](#one-redis-instance) - [Two Redis Instances](#two-redis-instances) - [Contact](#contact) @@ -447,7 +448,11 @@ As well Ratelimit supports TLS connections and authentication. These can be conf 1. `REDIS_TLS` & `REDIS_PERSECOND_TLS`: set to `"true"` to enable a TLS connection for the specific connection type. 1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable authentication to the redis host. -Ratelimit use [implicit pipelining](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L238) to send requests to redis. Pipelining can be configured using the following environment variables: +## Pipelining + +By default, for each request, ratelimit will pick up a connection from pool, wirte multiple redis commands in a single write then reads their responses in a single read. This reduces network delay. + +For high throughput scenarios, ratelimit also support [implicit pipelining](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L238) . It can be configured using the following environment variables: 1. `REDIS_PIPELINE_WINDOW` & `REDIS_PERSECOND_PIPELINE_WINDOW`: sets the duration after which internal pipelines will be flushed. If window is zero then implicit pipelining will be disabled. diff --git a/go.sum b/go.sum index 2951b438a..d04c74455 100644 --- a/go.sum +++ b/go.sum @@ -152,6 +152,7 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86JNJe4spst6Ff8MjvPUdPg= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index ea4c52ee4..22528e49a 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -36,14 +36,9 @@ func max(a uint32, b uint32) uint32 { return b } -func pipelineAppend(client Client, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) (err error) { - if err = client.DoCmd(result, "INCRBY", key, hitsAddend); err != nil { - return - } - if err = client.DoCmd(nil, "EXPIRE", key, expirationSeconds); err != nil { - return - } - return +func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) { + *pipeline = client.PipeAppend(*pipeline, result, "INCRBY", key, hitsAddend) + *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) } func (this *rateLimitCacheImpl) DoLimit( @@ -74,7 +69,7 @@ func (this *rateLimitCacheImpl) DoLimit( isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) results := make([]uint32, len(request.Descriptors)) - var err error + var pipeline, perSecondPipeline Pipeline // Now, actually setup the pipeline, skipping empty cache keys. for i, cacheKey := range cacheKeys { @@ -101,16 +96,24 @@ func (this *rateLimitCacheImpl) DoLimit( // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. if this.perSecondClient != nil && cacheKey.PerSecond { - if err = pipelineAppend(this.perSecondClient, cacheKey.Key, hitsAddend, &results[i], expirationSeconds); err != nil { - break + if perSecondPipeline == nil { + perSecondPipeline = Pipeline{} } + pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) } else { - if err = pipelineAppend(this.client, cacheKey.Key, hitsAddend, &results[i], expirationSeconds); err != nil { - break + if pipeline == nil { + pipeline = Pipeline{} } + pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) } } - checkError(err) + + if pipeline != nil { + checkError(this.client.PipeDo(pipeline)) + } + if perSecondPipeline != nil { + checkError(this.perSecondClient.PipeDo(perSecondPipeline)) + } // Now fetch the pipeline. responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, diff --git a/src/redis/driver.go b/src/redis/driver.go index 0f672df7b..7ffc0c7b7 100644 --- a/src/redis/driver.go +++ b/src/redis/driver.go @@ -1,5 +1,7 @@ package redis +import "github.com/mediocregopher/radix/v3" + // Errors that may be raised during config parsing. type RedisError string @@ -17,10 +19,31 @@ type Client interface { // @param args supplies the additional arguments. DoCmd(rcv interface{}, cmd, key string, args ...interface{}) error + // PipeAppend append a command onto the pipeline queue. + // + // @param pipeline supplies the queue for pending commands. + // @param rcv supplies receiver for the result. + // @param cmd supplies the command to append. + // @param key supplies the key to append. + // @param args supplies the additional arguments. + PipeAppend(pipeline Pipeline, rcv interface{}, cmd, key string, args ...interface{}) Pipeline + + // PipeDo writes multiple commands to a Conn in + // a single write, then reads their responses in a single read. This reduces + // network delay into a single round-trip. + // + // @param pipeline supplies the queue for pending commands. + PipeDo(pipeline Pipeline) error + // Once Close() is called all future method calls on the Client will return // an error Close() error // NumActiveConns return number of active connections, used in testing. NumActiveConns() int + + // ImplicitPipeliningEnabled return true if implicit pipelining is enabled. + ImplicitPipeliningEnabled() bool } + +type Pipeline []radix.CmdAction diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 47d0d5853..077068952 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -40,8 +40,9 @@ func poolTrace(ps *poolStats) trace.PoolTrace { } type clientImpl struct { - client radix.Client - stats poolStats + client radix.Client + stats poolStats + implicitPipelining bool } func checkError(err error) { @@ -76,11 +77,17 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, url string, pool stats := newPoolStats(scope) + opts := []radix.PoolOpt{radix.PoolConnFunc(df), radix.PoolWithTrace(poolTrace(&stats))} + + implicitPipelining := true + if pipelineWindow == 0 && pipelineLimit == 0 { + implicitPipelining = false + } else { + opts = append(opts, radix.PoolPipelineWindow(pipelineWindow, pipelineLimit)) + } + // TODO: support sentinel and redis cluster - pool, err := radix.NewPool("tcp", url, poolSize, radix.PoolConnFunc(df), - radix.PoolPipelineWindow(pipelineWindow, pipelineLimit), - radix.PoolWithTrace(poolTrace(&stats)), - ) + pool, err := radix.NewPool("tcp", url, poolSize, opts...) checkError(err) // Check if connection is good @@ -91,8 +98,9 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, url string, pool } return &clientImpl{ - client: pool, - stats: stats, + client: pool, + stats: stats, + implicitPipelining: implicitPipelining, } } @@ -107,3 +115,24 @@ func (c *clientImpl) Close() error { func (c *clientImpl) NumActiveConns() int { return int(c.stats.connectionActive.Value()) } + +func (c *clientImpl) PipeAppend(pipeline Pipeline, rcv interface{}, cmd, key string, args ...interface{}) Pipeline { + return append(pipeline, radix.FlatCmd(rcv, cmd, key, args...)) +} + +func (c *clientImpl) PipeDo(pipeline Pipeline) error { + if c.implicitPipelining { + for _, action := range pipeline { + if err := c.client.Do(action); err != nil { + return err + } + } + return nil + } + + return c.client.Do(radix.Pipeline(pipeline...)) +} + +func (c *clientImpl) ImplicitPipeliningEnabled() bool { + return c.implicitPipelining +} diff --git a/src/settings/settings.go b/src/settings/settings.go index 971ff60cc..78e902426 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -27,16 +27,16 @@ type Settings struct { RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` RedisAuth string `envconfig:"REDIS_AUTH" default:""` RedisTls bool `envconfig:"REDIS_TLS" default:"false"` - RedisPipelineWindow time.Duration `envconfig:"REDIS_PIPELINE_WINDOW" default:"75µs"` - RedisPipelineLimit int `envconfig:"REDIS_PIPELINE_LIMIT" default:"8"` + RedisPipelineWindow time.Duration `envconfig:"REDIS_PIPELINE_WINDOW" default:"0"` + RedisPipelineLimit int `envconfig:"REDIS_PIPELINE_LIMIT" default:"0"` RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"` RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"` RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"` RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""` RedisPerSecondTls bool `envconfig:"REDIS_PERSECOND_TLS" default:"false"` - RedisPerSecondPipelineWindow time.Duration `envconfig:"REDIS_PERSECOND_PIPELINE_WINDOW" default:"75µs"` - RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"8"` + RedisPerSecondPipelineWindow time.Duration `envconfig:"REDIS_PERSECOND_PIPELINE_WINDOW" default:"0"` + RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"0"` ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"` LocalCacheSizeInBytes int `envconfig:"LOCAL_CACHE_SIZE_IN_BYTES" default:"0"` } diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/redis.go index 4d3001468..032b500dc 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/redis.go @@ -5,6 +5,7 @@ package mock_redis import ( + redis "github.com/envoyproxy/ratelimit/src/redis" gomock "github.com/golang/mock/gomock" reflect "reflect" ) @@ -65,6 +66,20 @@ func (mr *MockClientMockRecorder) DoCmd(arg0, arg1, arg2 interface{}, arg3 ...in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoCmd", reflect.TypeOf((*MockClient)(nil).DoCmd), varargs...) } +// ImplicitPipeliningEnabled mocks base method +func (m *MockClient) ImplicitPipeliningEnabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ImplicitPipeliningEnabled") + ret0, _ := ret[0].(bool) + return ret0 +} + +// ImplicitPipeliningEnabled indicates an expected call of ImplicitPipeliningEnabled +func (mr *MockClientMockRecorder) ImplicitPipeliningEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImplicitPipeliningEnabled", reflect.TypeOf((*MockClient)(nil).ImplicitPipeliningEnabled)) +} + // NumActiveConns mocks base method func (m *MockClient) NumActiveConns() int { m.ctrl.T.Helper() @@ -78,3 +93,36 @@ func (mr *MockClientMockRecorder) NumActiveConns() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveConns", reflect.TypeOf((*MockClient)(nil).NumActiveConns)) } + +// PipeAppend mocks base method +func (m *MockClient) PipeAppend(arg0 redis.Pipeline, arg1 interface{}, arg2, arg3 string, arg4 ...interface{}) redis.Pipeline { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2, arg3} + for _, a := range arg4 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PipeAppend", varargs...) + ret0, _ := ret[0].(redis.Pipeline) + return ret0 +} + +// PipeAppend indicates an expected call of PipeAppend +func (mr *MockClientMockRecorder) PipeAppend(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PipeAppend", reflect.TypeOf((*MockClient)(nil).PipeAppend), varargs...) +} + +// PipeDo mocks base method +func (m *MockClient) PipeDo(arg0 redis.Pipeline) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PipeDo", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// PipeDo indicates an expected call of PipeDo +func (mr *MockClientMockRecorder) PipeDo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PipeDo", reflect.TypeOf((*MockClient)(nil).PipeDo), arg0) +} diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index 7c7951e19..65a9d2041 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/coocood/freecache" + "github.com/mediocregopher/radix/v3" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/config" @@ -27,6 +28,10 @@ func TestRedis(t *testing.T) { t.Run("WithPerSecondRedis", testRedis(true)) } +func pipeAppend(pipeline redis.Pipeline, rcv interface{}, cmd, key string, args ...interface{}) redis.Pipeline { + return append(pipeline, radix.FlatCmd(rcv, cmd, key, args...)) +} + func testRedis(usePerSecondRedis bool) func(*testing.T) { return func(t *testing.T) { assert := assert.New(t) @@ -52,8 +57,9 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client } - clientUsed.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(0, uint32(5)) - clientUsed.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -67,9 +73,10 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1234)) - clientUsed.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint32(1)).SetArg(0, uint32(11)) - clientUsed.EXPECT().DoCmd(gomock.Any(), - "EXPIRE", "domain_key2_value2_subkey2_subvalue2_1200", int64(60)) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key2_value2_subkey2_subvalue2_1200", int64(60)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest( "domain", @@ -90,12 +97,13 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1000000)) - clientUsed.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key3_value3_997200", uint32(1)).SetArg(0, uint32(11)) - clientUsed.EXPECT().DoCmd(gomock.Any(), - "EXPIRE", "domain_key3_value3_997200", int64(3600)) - clientUsed.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint32(1)).SetArg(0, uint32(13)) - clientUsed.EXPECT().DoCmd(gomock.Any(), - "EXPIRE", "domain_key3_value3_subkey3_subvalue3_950400", int64(86400)) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key3_value3_997200", int64(3600)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key3_value3_subkey3_subvalue3_950400", int64(86400)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest( "domain", @@ -171,9 +179,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(11)) - client.EXPECT().DoCmd(gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) @@ -194,9 +203,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNow().Return(int64(1000000)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(13)) - client.EXPECT().DoCmd(gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -212,9 +222,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Test Over limit stats timeSource.EXPECT().UnixNow().Return(int64(1000000)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(16)) - client.EXPECT().DoCmd(gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -230,8 +241,8 @@ func TestOverLimitWithLocalCache(t *testing.T) { // Test Over limit stats with local cache timeSource.EXPECT().UnixNow().Return(int64(1000000)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) - client.EXPECT().DoCmd(gomock.Any(), + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -258,9 +269,10 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(11)) - client.EXPECT().DoCmd(gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) @@ -277,9 +289,10 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. At Near Limit Ratio, still OK timeSource.EXPECT().UnixNow().Return(int64(1000000)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(13)) - client.EXPECT().DoCmd(gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -292,9 +305,10 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases // when we are near limit, not after we have passed the limit. timeSource.EXPECT().UnixNow().Return(int64(1000000)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(0, uint32(16)) - client.EXPECT().DoCmd(gomock.Any(), - "EXPIRE", "domain_key4_value4_997200", int64(3600)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -307,8 +321,9 @@ func TestNearLimit(t *testing.T) { // Now test hitsAddend that is greater than 1 // All of it under limit, under near limit timeSource.EXPECT().UnixNow().Return(int64(1234)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key5_value5_1234", uint32(3)).SetArg(0, uint32(5)) - client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key5_value5_1234", int64(1)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_1234", uint32(3)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} @@ -322,8 +337,9 @@ func TestNearLimit(t *testing.T) { // All of it under limit, some over near limit timeSource.EXPECT().UnixNow().Return(int64(1234)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key6_value6_1234", uint32(2)).SetArg(0, uint32(7)) - client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key6_value6_1234", int64(1)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key6_value6_1234", uint32(2)).SetArg(1, uint32(7)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key6_value6_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} @@ -337,8 +353,9 @@ func TestNearLimit(t *testing.T) { // All of it under limit, all of it over near limit timeSource.EXPECT().UnixNow().Return(int64(1234)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key7_value7_1234", uint32(3)).SetArg(0, uint32(19)) - client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key7_value7_1234", int64(1)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key7_value7_1234", uint32(3)).SetArg(1, uint32(19)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key7_value7_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} @@ -352,8 +369,9 @@ func TestNearLimit(t *testing.T) { // Some of it over limit, all of it over near limit timeSource.EXPECT().UnixNow().Return(int64(1234)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key8_value8_1234", uint32(3)).SetArg(0, uint32(22)) - client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key8_value8_1234", int64(1)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key8_value8_1234", uint32(3)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key8_value8_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} @@ -367,8 +385,9 @@ func TestNearLimit(t *testing.T) { // Some of it in all three places timeSource.EXPECT().UnixNow().Return(int64(1234)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key9_value9_1234", uint32(7)).SetArg(0, uint32(22)) - client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key9_value9_1234", int64(1)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key9_value9_1234", uint32(7)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key9_value9_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} @@ -382,8 +401,9 @@ func TestNearLimit(t *testing.T) { // all of it over limit timeSource.EXPECT().UnixNow().Return(int64(1234)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key10_value10_1234", uint32(3)).SetArg(0, uint32(30)) - client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key10_value10_1234", int64(1)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key10_value10_1234", uint32(3)).SetArg(1, uint32(30)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key10_value10_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} @@ -409,8 +429,9 @@ func TestRedisWithJitter(t *testing.T) { timeSource.EXPECT().UnixNow().Return(int64(1234)) jitterSource.EXPECT().Int63().Return(int64(100)) - client.EXPECT().DoCmd(gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(0, uint32(5)) - client.EXPECT().DoCmd(gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(101)) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(101)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -443,53 +464,73 @@ func expectPanicError(t *testing.T, f assert.PanicTestFunc) (result error) { return } -func TestNewClientImpl(t *testing.T) { - redisAuth := "123" - statsStore := stats.NewStore(stats.NewNullSink(), false) +func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) func(t *testing.T) { + return func(t *testing.T) { + redisAuth := "123" + statsStore := stats.NewStore(stats.NewNullSink(), false) - mkRedisClient := func(auth, addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, auth, addr, 1, 1*time.Millisecond, 1) - } + mkRedisClient := func(auth, addr string) redis.Client { + return redis.NewClientImpl(statsStore, false, auth, addr, 1, pipelineWindow, pipelineLimit) + } - t.Run("connection refused", func(t *testing.T) { - // It's possible there is a redis server listening on 6379 in ci environment, so - // use a random port. - panicErr := expectPanicError(t, func() { mkRedisClient("", "localhost:12345") }) - assert.Contains(t, panicErr.Error(), "connection refused") - }) + t.Run("connection refused", func(t *testing.T) { + // It's possible there is a redis server listening on 6379 in ci environment, so + // use a random port. + panicErr := expectPanicError(t, func() { mkRedisClient("", "localhost:12345") }) + assert.Contains(t, panicErr.Error(), "connection refused") + }) - t.Run("ok", func(t *testing.T) { - redisSrv := mustNewRedisServer() - defer redisSrv.Close() + t.Run("ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() - var client redis.Client - assert.NotPanics(t, func() { - client = mkRedisClient("", redisSrv.Addr()) + var client redis.Client + assert.NotPanics(t, func() { + client = mkRedisClient("", redisSrv.Addr()) + }) + assert.NotNil(t, client) }) - assert.NotNil(t, client) - }) - t.Run("auth fail", func(t *testing.T) { - redisSrv := mustNewRedisServer() - defer redisSrv.Close() + t.Run("auth fail", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() - redisSrv.RequireAuth(redisAuth) + redisSrv.RequireAuth(redisAuth) - assert.PanicsWithError(t, "NOAUTH Authentication required.", func() { - mkRedisClient("", redisSrv.Addr()) + assert.PanicsWithError(t, "NOAUTH Authentication required.", func() { + mkRedisClient("", redisSrv.Addr()) + }) }) - }) - t.Run("auth pass", func(t *testing.T) { - redisSrv := mustNewRedisServer() - defer redisSrv.Close() + t.Run("auth pass", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() - redisSrv.RequireAuth(redisAuth) + redisSrv.RequireAuth(redisAuth) - assert.NotPanics(t, func() { - mkRedisClient(redisAuth, redisSrv.Addr()) + assert.NotPanics(t, func() { + mkRedisClient(redisAuth, redisSrv.Addr()) + }) }) - }) + + t.Run("ImplicitPipeliningEnabled() return expected value", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient("", redisSrv.Addr()) + + if pipelineWindow == 0 && pipelineLimit == 0 { + assert.False(t, client.ImplicitPipeliningEnabled()) + } else { + assert.True(t, client.ImplicitPipeliningEnabled()) + } + }) + } +} + +func TestNewClientImpl(t *testing.T) { + t.Run("ImplicitPipeliningEnabled", testNewClientImpl(t, 2*time.Millisecond, 2)) + t.Run("ImplicitPipeliningDisabled", testNewClientImpl(t, 0, 0)) } func TestDoCmd(t *testing.T) { @@ -535,3 +576,64 @@ func TestDoCmd(t *testing.T) { assert.EqualError(t, client.DoCmd(nil, "GET", "foo"), "EOF") }) } + +func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) func(t *testing.T) { + return func(t *testing.T) { + statsStore := stats.NewStore(stats.NewNullSink(), false) + + mkRedisClient := func(addr string) redis.Client { + return redis.NewClientImpl(statsStore, false, "", addr, 1, pipelineWindow, pipelineLimit) + } + + t.Run("SETGET ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient(redisSrv.Addr()) + var res string + + pipeline := redis.Pipeline{} + pipeline = client.PipeAppend(pipeline, nil, "SET", "foo", "bar") + pipeline = client.PipeAppend(pipeline, &res, "GET", "foo") + + assert.Nil(t, client.PipeDo(pipeline)) + assert.Equal(t, "bar", res) + }) + + t.Run("INCRBY ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient(redisSrv.Addr()) + var res uint32 + hits := uint32(1) + + assert.Nil(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, &res, "INCRBY", "a", hits))) + assert.Equal(t, hits, res) + + assert.Nil(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, &res, "INCRBY", "a", hits))) + assert.Equal(t, uint32(2), res) + }) + + t.Run("connection broken", func(t *testing.T) { + redisSrv := mustNewRedisServer() + client := mkRedisClient(redisSrv.Addr()) + + assert.Nil(t, nil, client.PipeDo(client.PipeAppend(redis.Pipeline{}, nil, "SET", "foo", "bar"))) + + redisSrv.Close() + + expectErrContainEOF := func(t *testing.T, err error) { + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "EOF") + } + + expectErrContainEOF(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, nil, "GET", "foo"))) + }) + } +} + +func TestPipeDo(t *testing.T) { + t.Run("ImplicitPipeliningEnabled", testPipeDo(t, 10*time.Millisecond, 2)) + t.Run("ImplicitPipeliningDisabled", testPipeDo(t, 0, 0)) +} From d95eea3f544f5b1c490a1b92d83244bb51ab60ce Mon Sep 17 00:00:00 2001 From: Yuki Sawa Date: Tue, 25 Aug 2020 15:39:10 -0700 Subject: [PATCH 17/41] Clean go.mod file and update logrus to latest (#166) Signed-off-by: Yuki Sawa Signed-off-by: Diego Erdody --- go.mod | 12 +++--------- go.sum | 62 +++++++--------------------------------------------------- 2 files changed, 10 insertions(+), 64 deletions(-) diff --git a/go.mod b/go.mod index 02d83c2b7..52db99472 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 // indirect github.com/coocood/freecache v1.1.0 github.com/envoyproxy/go-control-plane v0.9.6 - github.com/gogo/protobuf v1.3.1 // indirect + github.com/fsnotify/fsnotify v1.4.7 // indirect github.com/golang/mock v1.4.1 github.com/golang/protobuf v1.4.2 github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 @@ -16,20 +16,14 @@ require ( github.com/kelseyhightower/envconfig v1.1.0 github.com/lyft/goruntime v0.2.5 github.com/lyft/gostats v0.4.0 - github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 // indirect github.com/mediocregopher/radix/v3 v3.5.1 - github.com/onsi/ginkgo v1.12.0 // indirect - github.com/onsi/gomega v1.9.0 // indirect - github.com/sirupsen/logrus v1.0.4 + github.com/sirupsen/logrus v1.6.0 github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/testify v1.5.1 - golang.org/x/crypto v0.0.0-20191219195013-becbf705a915 // indirect golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 - golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect + golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e // indirect golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb // indirect google.golang.org/grpc v1.27.0 google.golang.org/protobuf v1.25.0 // indirect - gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect - gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect gopkg.in/yaml.v2 v2.3.0 ) diff --git a/go.sum b/go.sum index d04c74455..a077d1811 100644 --- a/go.sum +++ b/go.sum @@ -4,7 +4,6 @@ github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= github.com/alicebob/miniredis/v2 v2.11.4 h1:GsuyeunTx7EllZBU3/6Ji3dhMQZDpC9rLf1luJ+6M5M= github.com/alicebob/miniredis/v2 v2.11.4/go.mod h1:VL3UDEfAH59bSa7MuHMuFToxkqyHh69s/WUbYlOAuyg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -14,7 +13,6 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa v0.0.0-20200629203442-efcf912fb354 h1:JBAT2dkeyeqzQOaAA8tB21Zfyv/nHfaqjZvWIllABnw= github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU= github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 h1:9kRtNpqLHbZVO/NNxhHp2ymxFxsHOe3x2efJGn//Tas= @@ -24,22 +22,14 @@ github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsip github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.6.9 h1:deEH9W8ZAUGNbCdX+9iNzBOGrAOrnpJGoy0PcTqk/tE= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU= -github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= -github.com/envoyproxy/go-control-plane v0.9.6-0.20200630214754-219d0fe20d5e h1:C4C1u9L0TNuvG9Se3g5DdqplIjf4qy7EqzUrOr8RCVI= -github.com/envoyproxy/go-control-plane v0.9.6-0.20200630214754-219d0fe20d5e/go.mod h1:JvuSsUgXzeWfLVfAe9OeW40eBtd+E8yMydqNm0iuBxs= github.com/envoyproxy/go-control-plane v0.9.6 h1:GgblEiDzxf5ajlAZY4aC8xp7DwkrGfauFNMGdB2bBv0= github.com/envoyproxy/go-control-plane v0.9.6/go.mod h1:GFqM7v0B62MraO4PWRedIbhThr/Rf7ev6aHOOPXeaDA= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -57,53 +47,39 @@ github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0 github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3 h1:6amM4HsNPOvMLVc2ZnyqrjeQ92YAVWn7T4WBKK87inY= github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/kavu/go_reuseport v1.2.0 h1:YO+pt6m5Z3WkVH9DjaDJzoSS/0FO2Q8x3CfObxk/i2E= github.com/kavu/go_reuseport v1.2.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= github.com/kelseyhightower/envconfig v1.1.0 h1:4htXR8ameS6KBfrNBoqEgpg0IK2D6rozN9ATOPwRfM0= github.com/kelseyhightower/envconfig v1.1.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/lyft/goruntime v0.2.5 h1:yRmwOXl3Zns3+Z03fDMWt5+p609rfhIErh7HYCayODg= github.com/lyft/goruntime v0.2.5/go.mod h1:8rUh5gwIPQtyIkIXHbLN1j45HOb8cMgDhrw5GA7DF4g= github.com/lyft/gostats v0.4.0 h1:PbRWmwidTPk6Y80S6itBWDa+XVt1hGvqFM88TBJYdOo= github.com/lyft/gostats v0.4.0/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= -github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414 h1:kLCSHuk3X+SI8Up26wM71id7jz77B3zCZDp01UWMVbM= -github.com/lyft/protoc-gen-validate v0.0.7-0.20180626203901-f9d2b11e4414/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 h1:ViNuGS149jgnttqhc6XQNPwdupEMBXqCx9wtlW7P3sA= -github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9/go.mod h1:fLRUbhbSd5Px2yKUaGYYPltlyxi1guJz1vCmo1RQL50= -github.com/mediocregopher/radix/v3 v3.4.2 h1:galbPBjIwmyREgwGCfQEN4X8lxbJnKBYurgz+VfcStA= -github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/mediocregopher/radix/v3 v3.5.1 h1:IOYgQUMA380N4khaL5eNT4v/P2LnHa8b0wnVdwZMFsY= github.com/mediocregopher/radix/v3 v3.5.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/sirupsen/logrus v1.0.4 h1:gzbtLsZC3Ic5PptoRG+kQj4L60qjK7H7XszrU163JNQ= -github.com/sirupsen/logrus v1.0.4/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.1.3 h1:76sIvNG1I8oBerx/MvuVHh5HBWBW7oxfsi3snKIsz5w= -github.com/stretchr/testify v1.1.3/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -112,18 +88,14 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0= github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191219195013-becbf705a915 h1:aJ0ex187qoXrJHPo8ZasVTASQB7llQP6YeNzgDALPRk= -golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -131,14 +103,10 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -146,7 +114,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb h1:MsKWO3hK1h941VWsQ8dKJqIdb3r3XP9/cDw8n/B95SM= golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -154,8 +121,6 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86J golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -164,8 +129,6 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f h1:0RYv5T9ZdroAqqfM2taEB0nJrArv0X1JpIdgUmY4xg8= -google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= @@ -185,20 +148,9 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From c54899034d4280ccd42bc40e5417d0c415e5b5ac Mon Sep 17 00:00:00 2001 From: Yuki Sawa Date: Fri, 4 Sep 2020 18:00:15 -0700 Subject: [PATCH 18/41] Add full test environment example. Fix bug in existing docker-compose. (#170) Signed-off-by: Yuki Sawa Signed-off-by: Diego Erdody --- README.md | 26 ++++++- docker-compose-example.yml | 87 +++++++++++++++++++++ docker-compose.yml | 3 +- examples/envoy/mock.yaml | 34 ++++++++ examples/envoy/proxy.yaml | 104 +++++++++++++++++++++++++ examples/ratelimit/config/example.yaml | 29 +++++++ 6 files changed, 278 insertions(+), 5 deletions(-) create mode 100644 docker-compose-example.yml create mode 100644 examples/envoy/mock.yaml create mode 100644 examples/envoy/proxy.yaml create mode 100644 examples/ratelimit/config/example.yaml diff --git a/README.md b/README.md index e2f45e36f..304e835d2 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@ - [Deprecation Schedule](#deprecation-schedule) - [Building and Testing](#building-and-testing) - [Docker-compose setup](#docker-compose-setup) + - [Full test environment](#full-test-environment) - [Configuration](#configuration) - [The configuration format](#the-configuration-format) - [Definitions](#definitions) @@ -110,6 +111,23 @@ If you want to run with [two redis instances](#two-redis-instances), you will ne the docker-compose.yaml file to run a second redis container, and change the environment variables as explained in the [two redis instances](#two-redis-instances) section. +## Full test environment +To run a fully configured environment to demo Envoy based rate limiting, run: +```bash +docker-compose -f docker-compose-example.yaml up +``` +This will run ratelimit, redis, prom-statsd-exporter and two Envoy containers such that you can demo rate limiting by hitting the below endpoints. +```bash +curl localhost:8888/test +curl localhost:8888/header -H "foo: foo" # Header based +curl localhost:8888/twoheader -H "foo: foo" -H "bar: bar" # Two headers +curl localhost:8888/twoheader -H "foo: foo" -H "baz: baz" +curl localhost:8888/twoheader -H "foo: foo" -H "bar: banned" # Ban a particular header value +``` +Edit `examples/ratelimit/config/example.yaml` to test different rate limit configs. Hot reloading is enabled. + +The descriptors in `example.yaml` and the actions in `examples/envoy/proxy.yaml` should give you a good idea on how to configure rate limits. + # Configuration ## The configuration format @@ -325,7 +343,7 @@ There are two methods for triggering a configuration reload: 1. Symlink RUNTIME_ROOT to a different directory. 2. Update the contents inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/` directly. -The former is the default behavior. To use the latter method, set the `RUNTIME_WATCH_ROOT` environment variable to `false`. +The former is the default behavior. To use the latter method, set the `RUNTIME_WATCH_ROOT` environment variable to `false`. For more information on how runtime works you can read its [README](https://github.com/lyft/goruntime). @@ -377,7 +395,7 @@ The ratelimit service listens to HTTP 1.1 (by default on port 8080) with two end ## /json endpoint -Takes an HTTP POST with a JSON body of the form e.g. +Takes an HTTP POST with a JSON body of the form e.g. ```json { "domain": "dummy", @@ -389,7 +407,7 @@ Takes an HTTP POST with a JSON body of the form e.g. ] } ``` -The service will return an http 200 if this request is allowed (if no ratelimits exceeded) or 429 if one or more +The service will return an http 200 if this request is allowed (if no ratelimits exceeded) or 429 if one or more ratelimits were exceeded. The response is a RateLimitResponse encoded with @@ -432,7 +450,7 @@ You can specify the debug port with the `DEBUG_PORT` environment variable. It de # Local Cache -Ratelimit optionally uses [freecache](https://github.com/coocood/freecache) as its local caching layer, which stores the over-the-limit cache keys, and thus avoids reading the +Ratelimit optionally uses [freecache](https://github.com/coocood/freecache) as its local caching layer, which stores the over-the-limit cache keys, and thus avoids reading the redis cache again for the already over-the-limit keys. The local cache size can be configured via `LocalCacheSizeInBytes` in the [settings](https://github.com/envoyproxy/ratelimit/blob/master/src/settings/settings.go). If `LocalCacheSizeInBytes` is 0, local cache is disabled. diff --git a/docker-compose-example.yml b/docker-compose-example.yml new file mode 100644 index 000000000..f96879ee4 --- /dev/null +++ b/docker-compose-example.yml @@ -0,0 +1,87 @@ +version: "3" +services: + redis: + image: redis:alpine + expose: + - 6379 + ports: + - 6379:6379 + networks: + - ratelimit-network + + statsd: + image: prom/statsd-exporter:v0.18.0 + expose: + - 9125 + ports: + - 9125:9125 + networks: + - ratelimit-network + + ratelimit: + image: envoyproxy/ratelimit:master + command: /bin/ratelimit + ports: + - 8080:8080 + - 8081:8081 + - 6070:6070 + depends_on: + - redis + - statsd + networks: + - ratelimit-network + volumes: + - ./examples/ratelimit/config:/data/ratelimit/config + environment: + - USE_STATSD=true + - STATSD_HOST=statsd + - STATSD_PORT=9125 + - LOG_LEVEL=debug + - REDIS_SOCKET_TYPE=tcp + - REDIS_URL=redis:6379 + - RUNTIME_ROOT=/data + - RUNTIME_SUBDIRECTORY=ratelimit + - RUNTIME_WATCH_ROOT=false + + envoy-proxy: + image: envoyproxy/envoy-dev:latest + entrypoint: "/usr/local/bin/envoy" + command: + - "--service-node proxy" + - "--service-cluster proxy" + - "--config-path /etc/envoy/envoy.yaml" + - "--concurrency 1" + - "--mode serve" + - "--log-level info" + volumes: + - ./examples/envoy/proxy.yaml:/etc/envoy/envoy.yaml + networks: + - ratelimit-network + expose: + - "8888" + - "8001" + ports: + - "8888:8888" + - "8001:8001" + + envoy-mock: + image: envoyproxy/envoy-dev:latest + entrypoint: "/usr/local/bin/envoy" + command: + - "--service-node mock" + - "--service-cluster mock" + - "--config-path /etc/envoy/envoy.yaml" + - "--concurrency 1" + - "--mode serve" + - "--log-level info" + volumes: + - ./examples/envoy/mock.yaml:/etc/envoy/envoy.yaml + networks: + - ratelimit-network + expose: + - "9999" + ports: + - "9999:9999" + +networks: + ratelimit-network: diff --git a/docker-compose.yml b/docker-compose.yml index 51360d361..ac1ab9063 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -29,7 +29,8 @@ services: ratelimit: image: alpine:3.6 - command: /usr/local/bin/ratelimit + command: > + sh -c "until test -f /usr/local/bin/ratelimit; do sleep 5; done; /usr/local/bin/ratelimit" ports: - 8080:8080 - 8081:8081 diff --git a/examples/envoy/mock.yaml b/examples/envoy/mock.yaml new file mode 100644 index 000000000..bd85fc3d2 --- /dev/null +++ b/examples/envoy/mock.yaml @@ -0,0 +1,34 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 9999 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: auto + stat_prefix: ingress + route_config: + name: ingress + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: "/" + direct_response: + status: "200" + body: + inline_string: "Hello World" + http_filters: + - name: envoy.router + config: {} +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/envoy/proxy.yaml b/examples/envoy/proxy.yaml new file mode 100644 index 000000000..bb45503f9 --- /dev/null +++ b/examples/envoy/proxy.yaml @@ -0,0 +1,104 @@ +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 +static_resources: + clusters: + - name: ratelimit + type: STRICT_DNS + connect_timeout: 1s + lb_policy: ROUND_ROBIN + protocol_selection: USE_CONFIGURED_PROTOCOL + http2_protocol_options: {} + load_assignment: + cluster_name: ratelimit + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ratelimit + port_value: 8081 + - name: mock + connect_timeout: 1s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: mock + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: envoy-mock + port_value: 9999 + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8888 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: auto + stat_prefix: ingress + http_filters: + - name: envoy.rate_limit + config: + domain: rl + request_type: external + stage: 0 + rate_limited_as_resource_exhausted: true + failure_mode_deny: false + rate_limit_service: + grpc_service: + envoy_grpc: + cluster_name: ratelimit + - name: envoy.router + config: {} + route_config: + name: route + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: /test + route: + cluster: mock + rate_limits: + - actions: + - source_cluster: {} + - destination_cluster: {} + - match: + prefix: /header + route: + cluster: mock + rate_limits: + - actions: + - request_headers: + header_name: "foo" + descriptor_key: "foo" + - match: + prefix: /twoheader + route: + cluster: mock + rate_limits: + - actions: + - request_headers: + header_name: "foo" + descriptor_key: "foo" + - request_headers: + header_name: "bar" + descriptor_key: "bar" + - actions: + - request_headers: + header_name: "foo" + descriptor_key: "foo" + - request_headers: + header_name: "baz" + descriptor_key: "baz" diff --git a/examples/ratelimit/config/example.yaml b/examples/ratelimit/config/example.yaml new file mode 100644 index 000000000..03e2f7839 --- /dev/null +++ b/examples/ratelimit/config/example.yaml @@ -0,0 +1,29 @@ +--- +domain: rl +descriptors: + - key: source_cluster + value: proxy + descriptors: + - key: destination_cluster + value: mock + rate_limit: + unit: minute + requests_per_unit: 1 + - key: foo + rate_limit: + unit: minute + requests_per_unit: 2 + descriptors: + - key: bar + rate_limit: + unit: minute + requests_per_unit: 3 + - key: bar + value: banned + rate_limit: + unit: minute + requests_per_unit: 0 + - key: baz + rate_limit: + unit: second + requests_per_unit: 1 From b065396d7a3087ec30e74974c828784ee6cbbfb6 Mon Sep 17 00:00:00 2001 From: Sergey Belyaev Date: Fri, 11 Sep 2020 04:44:34 +0700 Subject: [PATCH 19/41] Implement LOG_FORMAT=json (#173) Centralized log collection system works better with logs in json format. E.g. DataDog strongly encourage setting up your logging library to produce your logs in JSON format to avoid the need for custom parsing rules. So, the next small fix is all we need to get json logs. Signed-off-by: Sergey Belyaev Signed-off-by: Diego Erdody --- README.md | 35 ++++++++++++++++++++++++++++++++ src/service_cmd/runner/runner.go | 11 ++++++++++ src/settings/settings.go | 1 + 3 files changed, 47 insertions(+) diff --git a/README.md b/README.md index 304e835d2..ff38814ee 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,7 @@ - [Example 3](#example-3) - [Example 4](#example-4) - [Loading Configuration](#loading-configuration) + - [Log Format](#log-format) - [Request Fields](#request-fields) - [Statistics](#statistics) - [HTTP Port](#http-port) @@ -347,6 +348,40 @@ The former is the default behavior. To use the latter method, set the `RUNTIME_W For more information on how runtime works you can read its [README](https://github.com/lyft/goruntime). +## Log Format + +A centralized log collection system works better with logs in json format. JSON format avoids the need for custom parsing rules. +The Ratelimit service produces logs in a text format by default. For Example: + +``` +time="2020-09-10T17:22:35Z" level=debug msg="loading domain: messaging" +time="2020-09-10T17:22:35Z" level=debug msg="loading descriptor: key=messaging.message_type_marketing" +time="2020-09-10T17:22:35Z" level=debug msg="loading descriptor: key=messaging.message_type_marketing.to_number ratelimit={requests_per_unit=5, unit=DAY}" +time="2020-09-10T17:22:35Z" level=debug msg="loading descriptor: key=messaging.to_number ratelimit={requests_per_unit=100, unit=DAY}" +time="2020-09-10T17:21:55Z" level=warning msg="Listening for debug on ':6070'" +time="2020-09-10T17:21:55Z" level=warning msg="Listening for HTTP on ':8080'" +time="2020-09-10T17:21:55Z" level=debug msg="waiting for runtime update" +time="2020-09-10T17:21:55Z" level=warning msg="Listening for gRPC on ':8081'" +``` + +JSON Log format can be configured using the following environment variables: + +``` +LOG_FORMAT=json +``` + +Output example: +``` +{"@message":"loading domain: messaging","@timestamp":"2020-09-10T17:22:44.926010192Z","level":"debug"} +{"@message":"loading descriptor: key=messaging.message_type_marketing","@timestamp":"2020-09-10T17:22:44.926019315Z","level":"debug"} +{"@message":"loading descriptor: key=messaging.message_type_marketing.to_number ratelimit={requests_per_unit=5, unit=DAY}","@timestamp":"2020-09-10T17:22:44.926037174Z","level":"debug"} +{"@message":"loading descriptor: key=messaging.to_number ratelimit={requests_per_unit=100, unit=DAY}","@timestamp":"2020-09-10T17:22:44.926048993Z","level":"debug"} +{"@message":"Listening for debug on ':6070'","@timestamp":"2020-09-10T17:22:44.926113905Z","level":"warning"} +{"@message":"Listening for gRPC on ':8081'","@timestamp":"2020-09-10T17:22:44.926182006Z","level":"warning"} +{"@message":"Listening for HTTP on ':8080'","@timestamp":"2020-09-10T17:22:44.926227031Z","level":"warning"} +{"@message":"waiting for runtime update","@timestamp":"2020-09-10T17:22:44.926267808Z","level":"debug"} +``` + # Request Fields For information on the fields of a Ratelimit gRPC request please read the information diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index ef318bde1..80e8e7814 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -4,6 +4,7 @@ import ( "io" "math/rand" "net/http" + "strings" "time" stats "github.com/lyft/gostats" @@ -43,6 +44,16 @@ func (runner *Runner) Run() { } else { logger.SetLevel(logLevel) } + if strings.ToLower(s.LogFormat) == "json" { + logger.SetFormatter(&logger.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + FieldMap: logger.FieldMap{ + logger.FieldKeyTime: "@timestamp", + logger.FieldKeyMsg: "@message", + }, + }) + } + var localCache *freecache.Cache if s.LocalCacheSizeInBytes != 0 { localCache = freecache.NewCache(s.LocalCacheSizeInBytes) diff --git a/src/settings/settings.go b/src/settings/settings.go index 78e902426..3e951b389 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -22,6 +22,7 @@ type Settings struct { RuntimeIgnoreDotFiles bool `envconfig:"RUNTIME_IGNOREDOTFILES" default:"false"` RuntimeWatchRoot bool `envconfig:"RUNTIME_WATCH_ROOT" default:"true"` LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` + LogFormat string `envconfig:"LOG_FORMAT" default:"text"` RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` From 02ae72a069c7c4490b2d9a09b83f7df1496ebe96 Mon Sep 17 00:00:00 2001 From: Diego Erdody Date: Mon, 14 Sep 2020 23:30:55 -0700 Subject: [PATCH 20/41] Add cluster support Signed-off-by: Diego Erdody --- src/redis/cache_impl.go | 4 ++-- src/redis/driver_impl.go | 31 ++++++++++++++++++++++++++----- src/settings/settings.go | 2 ++ 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 22528e49a..c76ef067c 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -226,10 +226,10 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca var perSecondPool Client if s.RedisPerSecond { perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, - s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) + s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) } var otherPool Client - otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisUrl, s.RedisPoolSize, + otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisType, s.RedisUrl, s.RedisPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) return NewRateLimitCacheImpl( diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 077068952..4afeed48a 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -3,6 +3,7 @@ package redis import ( "crypto/tls" "fmt" + "strings" "time" "github.com/mediocregopher/radix/v3/trace" @@ -51,7 +52,7 @@ func checkError(err error) { } } -func NewClientImpl(scope stats.Scope, useTls bool, auth string, url string, poolSize int, +func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string, url string, poolSize int, pipelineWindow time.Duration, pipelineLimit int) Client { logger.Warnf("connecting to redis on %s with pool size %d", url, poolSize) @@ -86,19 +87,39 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, url string, pool opts = append(opts, radix.PoolPipelineWindow(pipelineWindow, pipelineLimit)) } - // TODO: support sentinel and redis cluster - pool, err := radix.NewPool("tcp", url, poolSize, opts...) + poolFunc := func(network, addr string) (radix.Client, error) { + return radix.NewPool(network, addr, poolSize, opts...) + } + + var client radix.Client + var err error + switch redisType { + case "SINGLE": + client, err = poolFunc("tcp", url) + case "CLUSTER": + urls := strings.Split(url, ",") + client, err = radix.NewCluster(urls, radix.ClusterPoolFunc(poolFunc)) + case "SENTINEL": + urls := strings.Split(url, ",") + if len(urls) < 2 { + panic(RedisError("Expected a list of urls for the sentinel mode, in the format: ,,...,")) + } + client, err = radix.NewSentinel(urls[0], urls[1:], radix.SentinelPoolFunc(poolFunc)) + default: + panic(RedisError("Unrecognized redis type " + redisType)) + } + checkError(err) // Check if connection is good var pingResponse string - checkError(pool.Do(radix.Cmd(&pingResponse, "PING"))) + checkError(client.Do(radix.Cmd(&pingResponse, "PING"))) if pingResponse != "PONG" { checkError(fmt.Errorf("connecting redis error: %s", pingResponse)) } return &clientImpl{ - client: pool, + client: client, stats: stats, implicitPipelining: implicitPipelining, } diff --git a/src/settings/settings.go b/src/settings/settings.go index 3e951b389..5073477cb 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -24,6 +24,7 @@ type Settings struct { LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` LogFormat string `envconfig:"LOG_FORMAT" default:"text"` RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` + RedisType string `envconfig:"REDIS_TYPE" default:"SINGLE"` RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` RedisAuth string `envconfig:"REDIS_AUTH" default:""` @@ -32,6 +33,7 @@ type Settings struct { RedisPipelineLimit int `envconfig:"REDIS_PIPELINE_LIMIT" default:"0"` RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"` RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"` + RedisPerSecondType string `envconfig:"REDIS_PERSECOND_TYPE" default:"SINGLE"` RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"` RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""` From c5f24c6b3c7e3792a0b3da6735a599c8c3ea15af Mon Sep 17 00:00:00 2001 From: Diego Erdody Date: Mon, 14 Sep 2020 23:53:22 -0700 Subject: [PATCH 21/41] Fix tests Signed-off-by: Diego Erdody --- src/redis/driver_impl.go | 8 ++++---- test/redis/bench_test.go | 2 +- test/redis/cache_impl_test.go | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 4afeed48a..01db05c66 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -93,13 +93,13 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string var client radix.Client var err error - switch redisType { - case "SINGLE": + switch strings.ToLower(redisType) { + case "single": client, err = poolFunc("tcp", url) - case "CLUSTER": + case "cluster": urls := strings.Split(url, ",") client, err = radix.NewCluster(urls, radix.ClusterPoolFunc(poolFunc)) - case "SENTINEL": + case "sentinel": urls := strings.Split(url, ",") if len(urls) < 2 { panic(RedisError("Expected a list of urls for the sentinel mode, in the format: ,,...,")) diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 8945f8706..ac40631e8 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -41,7 +41,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int) func(*testing.B) { return func(b *testing.B) { statsStore := stats.NewStore(stats.NewNullSink(), false) - client := redis.NewClientImpl(statsStore, false, "", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) + client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() cache := redis.NewRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil) diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index 65a9d2041..6b9a4d631 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -470,7 +470,7 @@ func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(auth, addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, auth, addr, 1, pipelineWindow, pipelineLimit) + return redis.NewClientImpl(statsStore, false, auth, "single", addr, 1, pipelineWindow, pipelineLimit) } t.Run("connection refused", func(t *testing.T) { @@ -537,7 +537,7 @@ func TestDoCmd(t *testing.T) { statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", addr, 1, 0, 0) + return redis.NewClientImpl(statsStore, false, "", "single", addr, 1, 0, 0) } t.Run("SETGET ok", func(t *testing.T) { @@ -582,7 +582,7 @@ func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) f statsStore := stats.NewStore(stats.NewNullSink(), false) mkRedisClient := func(addr string) redis.Client { - return redis.NewClientImpl(statsStore, false, "", addr, 1, pipelineWindow, pipelineLimit) + return redis.NewClientImpl(statsStore, false, "", "single", addr, 1, pipelineWindow, pipelineLimit) } t.Run("SETGET ok", func(t *testing.T) { From 7d5ed80ad6b15851853dcb771c3673d68b7999db Mon Sep 17 00:00:00 2001 From: Diego Erdody Date: Wed, 16 Sep 2020 16:57:09 -0700 Subject: [PATCH 22/41] Add documentation Signed-off-by: Diego Erdody --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index ff38814ee..8e5a1db64 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,7 @@ - [Debug Port](#debug-port) - [Local Cache](#local-cache) - [Redis](#redis) + - [Redis type](#redis-type) - [Pipelining](#pipelining) - [One Redis Instance](#one-redis-instance) - [Two Redis Instances](#two-redis-instances) @@ -501,6 +502,20 @@ As well Ratelimit supports TLS connections and authentication. These can be conf 1. `REDIS_TLS` & `REDIS_PERSECOND_TLS`: set to `"true"` to enable a TLS connection for the specific connection type. 1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable authentication to the redis host. +## Redis type + +Ratelimit supports different types of redis deployments: + +1. Single instance (default): Talk to a single instance of redis, or a redis proxy (e.g. https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/other_protocols/redis) +1. Sentinel: Talk to a redis deployment with sentinel instances (see https://redis.io/topics/sentinel) +1. Cluster: Talk to a redis in cluster mode (see https://redis.io/topics/cluster-spec) + +The deployment type can be specified with the `REDIS_TYPE` / `REDIS_PERSECOND_TYPE` environment variables. Depending on the type defined, the `REDIS_URL` and `REDIS_PERSECOND_URL` are expected to have the following formats: + +1. "single": Depending on the socket type defined, either a single hostname:port pair or a unix domain socket reference. +2. "sentinel": A comma separated list of hostname:port pairs. The list size should be >= 2. The first item is the primary node url and the rest are the sentinels. +3. "cluster": A comma separated list of hostname:port pairs with all the nodes in the cluster. + ## Pipelining By default, for each request, ratelimit will pick up a connection from pool, wirte multiple redis commands in a single write then reads their responses in a single read. This reduces network delay. @@ -519,6 +534,7 @@ To configure one Redis instance use the following environment variables: 1. `REDIS_SOCKET_TYPE` 1. `REDIS_URL` 1. `REDIS_POOL_SIZE` +1. `REDIS_TYPE` (optional) This setup will use the same Redis server for all limits. @@ -533,6 +549,7 @@ To configure two Redis instances use the following environment variables: 1. `REDIS_PERSECOND_SOCKET_TYPE` 1. `REDIS_PERSECOND_URL` 1. `REDIS_PERSECOND_POOL_SIZE` +1. `REDIS_PERSECOND_TYPE` (optional) This setup will use the Redis server configured with the `_PERSECOND_` vars for per second limits, and the other Redis server for all other limits. From 4098ddeb7e07f0568d4fc0bcb3a50bd0e6a8d354 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Fri, 18 Sep 2020 18:28:05 +0100 Subject: [PATCH 23/41] Update integration_test.go Signed-off-by: Diego Erdody --- test/integration/integration_test.go | 105 +++++++++++++++++++++------ 1 file changed, 84 insertions(+), 21 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index d56e29873..1635cc728 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -3,19 +3,15 @@ package integration_test import ( - "bytes" "fmt" - "io/ioutil" - "io" "math/rand" - "net/http" "os" "strconv" "testing" "time" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/envoyproxy/ratelimit/src/service_cmd/runner" "github.com/envoyproxy/ratelimit/test/common" "github.com/stretchr/testify/assert" @@ -48,32 +44,51 @@ func newDescriptorStatusLegacy( // TODO: Once adding the ability of stopping the server in the runner (https://github.com/envoyproxy/ratelimit/issues/119), // stop the server at the end of each test, thus we can reuse the grpc port among these integration tests. func TestBasicConfig(t *testing.T) { - t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0")) - t.Run("WithPerSecondRedis", testBasicConfig("8085", "true", "0")) - t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfig("18083", "false", "1000")) - t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("18085", "true", "1000")) +// t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0")) +// t.Run("WithPerSecondRedis", testBasicConfig("8085", "true", "0")) +// t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfig("18083", "false", "1000")) +// t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("18085", "true", "1000")) } -func TestBasicTLSConfig(t *testing.T) { +func TestBasicConfigWithRedisCluster(t *testing.T) { +// t.Run("WithoutPerSecondRedis", testBasicConfigWithRedisCluster("8083", "false", "0")) +// t.Run("WithPerSecondRedis", testBasicConfigWithRedisCluster("8085", "true", "0")) +// t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18083", "false", "1000")) +// t.Run("WithPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18085", "true", "1000")) +} + +/*func TestBasicTLSConfig(t *testing.T) { t.Run("WithoutPerSecondRedisTLS", testBasicConfigAuthTLS("8087", "false", "0")) t.Run("WithPerSecondRedisTLS", testBasicConfigAuthTLS("8089", "true", "0")) t.Run("WithoutPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("18087", "false", "1000")) t.Run("WithPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("18089", "true", "1000")) -} +}*/ func TestBasicAuthConfig(t *testing.T) { t.Run("WithoutPerSecondRedisAuth", testBasicConfigAuth("8091", "false", "0")) - t.Run("WithPerSecondRedisAuth", testBasicConfigAuth("8093", "true", "0")) - t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18091", "false", "1000")) - t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18093", "true", "1000")) +// t.Run("WithPerSecondRedisAuth", testBasicConfigAuth("8093", "true", "0")) +// t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18091", "false", "1000")) +// t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18093", "true", "1000")) +} + +func TestBasicAuthConfigWithRedisCluster(t *testing.T) { + t.Run("WithoutPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8091", "false", "0")) +// t.Run("WithPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8093", "true", "0")) +// t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18091", "false", "1000")) +// t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18093", "true", "1000")) } func TestBasicReloadConfig(t *testing.T) { - t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRoot("8095", "false", "0")) - t.Run("ReloadWithoutWatchRoot", testBasicConfigReload("8097", "false", "0", "false")) +// t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRoot("8095", "false", "0")) +// t.Run("ReloadWithoutWatchRoot", testBasicConfigReload("8097", "false", "0", "false")) } -func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { +func TestBasicReloadConfigWithRedisCluster(t *testing.T) { +// t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisCluster("8095", "false", "0")) +// t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisCluster("8097", "false", "0", "false")) +} + +/*func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:16382") os.Setenv("REDIS_URL", "localhost:16381") os.Setenv("REDIS_AUTH", "password123") @@ -91,7 +106,7 @@ func testBasicConfig(grpcPort, perSecond string, local_cache_size string) func(* os.Setenv("REDIS_PERSECOND_AUTH", "") os.Setenv("REDIS_PERSECOND_TLS", "false") return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) -} +}*/ func testBasicConfigAuth(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:6385") @@ -103,7 +118,7 @@ func testBasicConfigAuth(grpcPort, perSecond string, local_cache_size string) fu return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -func testBasicConfigWithoutWatchRoot(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { +/*func testBasicConfigWithoutWatchRoot(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") os.Setenv("REDIS_URL", "localhost:6379") os.Setenv("REDIS_AUTH", "") @@ -125,6 +140,54 @@ func testBasicConfigReload(grpcPort, perSecond string, local_cache_size, runtime return testConfigReload(grpcPort, perSecond, local_cache_size) } +func testBasicConfigWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "cluster") + os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") + os.Setenv("REDIS_TYPE", "cluster") + os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") + os.Setenv("REDIS_AUTH", "password123") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_TLS", "false") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +}*/ + +func testBasicConfigAuthWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "cluster") + os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") + os.Setenv("REDIS_TYPE", "cluster") + os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "password123") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + +/*func testBasicConfigWithoutWatchRootWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "cluster") + os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") + os.Setenv("REDIS_TYPE", "cluster") + os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") + os.Setenv("REDIS_AUTH", "password123") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", "false") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigReloadWithRedisCluster(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") + os.Setenv("REDIS_URL", "localhost:6379") + os.Setenv("REDIS_AUTH", "") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + return testConfigReload(grpcPort, perSecond, local_cache_size) +}*/ + func getCacheKey(cacheKey string, enableLocalCache bool) string { if enableLocalCache { return cacheKey + "_local" @@ -356,7 +419,7 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu } } -func TestBasicConfigLegacy(t *testing.T) { +/*func TestBasicConfigLegacy(t *testing.T) { os.Setenv("PORT", "8082") os.Setenv("GRPC_PORT", "8083") os.Setenv("DEBUG_PORT", "8084") @@ -595,4 +658,4 @@ func testConfigReload(grpcPort, perSecond string, local_cache_size string) func( panic(err) } } -} \ No newline at end of file +}*/ From 5e50b8a04bc2f1877ddce71316e9220942825ef7 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Fri, 18 Sep 2020 18:29:23 +0100 Subject: [PATCH 24/41] Update Makefile Signed-off-by: Diego Erdody --- Makefile | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Makefile b/Makefile index b66a67a79..9af180f2b 100644 --- a/Makefile +++ b/Makefile @@ -79,6 +79,14 @@ tests_with_redis: bootstrap_redis_tls tests_unit redis-server --port 6382 --requirepass password123 & redis-server --port 6384 --requirepass password123 & redis-server --port 6385 --requirepass password123 & + redis-server --port 6386 --cluster-enabled yes --requirepass password123 & + redis-server --port 6387 --cluster-enabled yes --requirepass password123 & + redis-server --port 6388 --cluster-enabled yes --requirepass password123 & + redis-server --port 6389 --cluster-enabled yes --requirepass password123 & + redis-server --port 6390 --cluster-enabled yes --requirepass password123 & + redis-server --port 6391 --cluster-enabled yes --requirepass password123 & + redis-cli --cluster create -a password123 127.0.0.1:6386 127.0.0.1:6387 127.0.0.1:6388 --cluster-replicas 0 & + redis-cli --cluster create -a password123 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 0 & go test -race -tags=integration $(MODULE)/... .PHONY: docker_tests From a9decc488ee08c989ccca931c1fb5b71a833966c Mon Sep 17 00:00:00 2001 From: Diego Erdody Date: Tue, 22 Sep 2020 00:50:08 -0700 Subject: [PATCH 25/41] fixes and debug statements Signed-off-by: Diego Erdody --- Makefile | 23 ++++++++++------ src/redis/driver_impl.go | 1 + test/integration/integration_test.go | 40 ++++++++++++++-------------- 3 files changed, 36 insertions(+), 28 deletions(-) diff --git a/Makefile b/Makefile index 9af180f2b..69ccc1a54 100644 --- a/Makefile +++ b/Makefile @@ -79,14 +79,21 @@ tests_with_redis: bootstrap_redis_tls tests_unit redis-server --port 6382 --requirepass password123 & redis-server --port 6384 --requirepass password123 & redis-server --port 6385 --requirepass password123 & - redis-server --port 6386 --cluster-enabled yes --requirepass password123 & - redis-server --port 6387 --cluster-enabled yes --requirepass password123 & - redis-server --port 6388 --cluster-enabled yes --requirepass password123 & - redis-server --port 6389 --cluster-enabled yes --requirepass password123 & - redis-server --port 6390 --cluster-enabled yes --requirepass password123 & - redis-server --port 6391 --cluster-enabled yes --requirepass password123 & - redis-cli --cluster create -a password123 127.0.0.1:6386 127.0.0.1:6387 127.0.0.1:6388 --cluster-replicas 0 & - redis-cli --cluster create -a password123 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 0 & + mkdir 6386 && cd 6386 && redis-server --port 6386 --cluster-enabled yes --requirepass password123 & + mkdir 6387 && cd 6387 && redis-server --port 6387 --cluster-enabled yes --requirepass password123 & + mkdir 6388 && cd 6388 && redis-server --port 6388 --cluster-enabled yes --requirepass password123 & + mkdir 6389 && cd 6389 && redis-server --port 6389 --cluster-enabled yes --requirepass password123 & + mkdir 6390 && cd 6390 && redis-server --port 6390 --cluster-enabled yes --requirepass password123 & + mkdir 6391 && cd 6391 && redis-server --port 6391 --cluster-enabled yes --requirepass password123 & + sleep 3 + yes | redis-cli --cluster create -a password123 127.0.0.1:6386 127.0.0.1:6387 127.0.0.1:6388 --cluster-replicas 0 + yes | redis-cli --cluster create -a password123 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 0 + sleep 3 + cat 6386/nodes.conf + cat 6387/nodes.conf + cat 6388/nodes.conf + redis-cli --cluster check -a password123 127.0.0.1:6386 + redis-cli --cluster check -a password123 127.0.0.1:6389 go test -race -tags=integration $(MODULE)/... .PHONY: docker_tests diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 01db05c66..58683cd5b 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -98,6 +98,7 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string client, err = poolFunc("tcp", url) case "cluster": urls := strings.Split(url, ",") + logger.Warnf("Creating cluster with urls %v", urls) client, err = radix.NewCluster(urls, radix.ClusterPoolFunc(poolFunc)) case "sentinel": urls := strings.Split(url, ",") diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 1635cc728..89108b80d 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -44,17 +44,17 @@ func newDescriptorStatusLegacy( // TODO: Once adding the ability of stopping the server in the runner (https://github.com/envoyproxy/ratelimit/issues/119), // stop the server at the end of each test, thus we can reuse the grpc port among these integration tests. func TestBasicConfig(t *testing.T) { -// t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0")) -// t.Run("WithPerSecondRedis", testBasicConfig("8085", "true", "0")) -// t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfig("18083", "false", "1000")) -// t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("18085", "true", "1000")) + // t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0")) + // t.Run("WithPerSecondRedis", testBasicConfig("8085", "true", "0")) + // t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfig("18083", "false", "1000")) + // t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("18085", "true", "1000")) } func TestBasicConfigWithRedisCluster(t *testing.T) { -// t.Run("WithoutPerSecondRedis", testBasicConfigWithRedisCluster("8083", "false", "0")) -// t.Run("WithPerSecondRedis", testBasicConfigWithRedisCluster("8085", "true", "0")) -// t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18083", "false", "1000")) -// t.Run("WithPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18085", "true", "1000")) + // t.Run("WithoutPerSecondRedis", testBasicConfigWithRedisCluster("8083", "false", "0")) + // t.Run("WithPerSecondRedis", testBasicConfigWithRedisCluster("8085", "true", "0")) + // t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18083", "false", "1000")) + // t.Run("WithPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18085", "true", "1000")) } /*func TestBasicTLSConfig(t *testing.T) { @@ -66,26 +66,26 @@ func TestBasicConfigWithRedisCluster(t *testing.T) { func TestBasicAuthConfig(t *testing.T) { t.Run("WithoutPerSecondRedisAuth", testBasicConfigAuth("8091", "false", "0")) -// t.Run("WithPerSecondRedisAuth", testBasicConfigAuth("8093", "true", "0")) -// t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18091", "false", "1000")) -// t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18093", "true", "1000")) + // t.Run("WithPerSecondRedisAuth", testBasicConfigAuth("8093", "true", "0")) + // t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18091", "false", "1000")) + // t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18093", "true", "1000")) } func TestBasicAuthConfigWithRedisCluster(t *testing.T) { t.Run("WithoutPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8091", "false", "0")) -// t.Run("WithPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8093", "true", "0")) -// t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18091", "false", "1000")) -// t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18093", "true", "1000")) + // t.Run("WithPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8093", "true", "0")) + // t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18091", "false", "1000")) + // t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18093", "true", "1000")) } func TestBasicReloadConfig(t *testing.T) { -// t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRoot("8095", "false", "0")) -// t.Run("ReloadWithoutWatchRoot", testBasicConfigReload("8097", "false", "0", "false")) + // t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRoot("8095", "false", "0")) + // t.Run("ReloadWithoutWatchRoot", testBasicConfigReload("8097", "false", "0", "false")) } func TestBasicReloadConfigWithRedisCluster(t *testing.T) { -// t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisCluster("8095", "false", "0")) -// t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisCluster("8097", "false", "0", "false")) + // t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisCluster("8095", "false", "0")) + // t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisCluster("8097", "false", "0", "false")) } /*func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { @@ -154,9 +154,9 @@ func testBasicConfigWithRedisCluster(grpcPort, perSecond string, local_cache_siz func testBasicConfigAuthWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_TYPE", "cluster") - os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") + os.Setenv("REDIS_PERSECOND_URL", "127.0.0.1:6389,127.0.0.1:6390,127.0.0.1:6391") os.Setenv("REDIS_TYPE", "cluster") - os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") + os.Setenv("REDIS_URL", "127.0.0.1:6386,127.0.0.1:6387,127.0.0.1:6388") os.Setenv("REDIS_TLS", "false") os.Setenv("REDIS_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") From 7017fcb3151a332e4deef79e8c336c78e810d1b2 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Tue, 22 Sep 2020 13:31:19 +0100 Subject: [PATCH 26/41] Update Makefile Signed-off-by: Diego Erdody --- Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 69ccc1a54..0b883b955 100644 --- a/Makefile +++ b/Makefile @@ -86,12 +86,9 @@ tests_with_redis: bootstrap_redis_tls tests_unit mkdir 6390 && cd 6390 && redis-server --port 6390 --cluster-enabled yes --requirepass password123 & mkdir 6391 && cd 6391 && redis-server --port 6391 --cluster-enabled yes --requirepass password123 & sleep 3 - yes | redis-cli --cluster create -a password123 127.0.0.1:6386 127.0.0.1:6387 127.0.0.1:6388 --cluster-replicas 0 - yes | redis-cli --cluster create -a password123 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 0 + echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6386 127.0.0.1:6387 127.0.0.1:6388 --cluster-replicas 0 + echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 0 sleep 3 - cat 6386/nodes.conf - cat 6387/nodes.conf - cat 6388/nodes.conf redis-cli --cluster check -a password123 127.0.0.1:6386 redis-cli --cluster check -a password123 127.0.0.1:6389 go test -race -tags=integration $(MODULE)/... From b15b97de83da1d73ad9236d7765d4bc35c25432e Mon Sep 17 00:00:00 2001 From: Diego Erdody Date: Tue, 22 Sep 2020 23:38:31 -0700 Subject: [PATCH 27/41] Fix integration tests: separate ports and enable implicit pipelining Signed-off-by: Diego Erdody --- Makefile | 3 +- src/redis/driver_impl.go | 1 + test/integration/integration_test.go | 114 +++++++++++++++++---------- 3 files changed, 73 insertions(+), 45 deletions(-) diff --git a/Makefile b/Makefile index 0b883b955..3f482d14c 100644 --- a/Makefile +++ b/Makefile @@ -85,10 +85,9 @@ tests_with_redis: bootstrap_redis_tls tests_unit mkdir 6389 && cd 6389 && redis-server --port 6389 --cluster-enabled yes --requirepass password123 & mkdir 6390 && cd 6390 && redis-server --port 6390 --cluster-enabled yes --requirepass password123 & mkdir 6391 && cd 6391 && redis-server --port 6391 --cluster-enabled yes --requirepass password123 & - sleep 3 + sleep 2 echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6386 127.0.0.1:6387 127.0.0.1:6388 --cluster-replicas 0 echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 0 - sleep 3 redis-cli --cluster check -a password123 127.0.0.1:6386 redis-cli --cluster check -a password123 127.0.0.1:6389 go test -race -tags=integration $(MODULE)/... diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 58683cd5b..c36c66d18 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -86,6 +86,7 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string } else { opts = append(opts, radix.PoolPipelineWindow(pipelineWindow, pipelineLimit)) } + logger.Debugf("Implicit pipelining enabled: %v", implicitPipelining) poolFunc := func(network, addr string) (radix.Client, error) { return radix.NewPool(network, addr, poolSize, opts...) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 89108b80d..8c2f5cc6e 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -3,8 +3,12 @@ package integration_test import ( + "bytes" "fmt" + "io" + "io/ioutil" "math/rand" + "net/http" "os" "strconv" "testing" @@ -44,57 +48,59 @@ func newDescriptorStatusLegacy( // TODO: Once adding the ability of stopping the server in the runner (https://github.com/envoyproxy/ratelimit/issues/119), // stop the server at the end of each test, thus we can reuse the grpc port among these integration tests. func TestBasicConfig(t *testing.T) { - // t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0")) - // t.Run("WithPerSecondRedis", testBasicConfig("8085", "true", "0")) - // t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfig("18083", "false", "1000")) - // t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("18085", "true", "1000")) + t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0")) + t.Run("WithPerSecondRedis", testBasicConfig("8085", "true", "0")) + t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfig("18083", "false", "1000")) + t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("18085", "true", "1000")) } func TestBasicConfigWithRedisCluster(t *testing.T) { - // t.Run("WithoutPerSecondRedis", testBasicConfigWithRedisCluster("8083", "false", "0")) - // t.Run("WithPerSecondRedis", testBasicConfigWithRedisCluster("8085", "true", "0")) - // t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18083", "false", "1000")) - // t.Run("WithPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18085", "true", "1000")) + t.Run("WithoutPerSecondRedis", testBasicConfigWithRedisCluster("8183", "false", "0")) + t.Run("WithPerSecondRedis", testBasicConfigWithRedisCluster("8185", "true", "0")) + t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18183", "false", "1000")) + t.Run("WithPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18185", "true", "1000")) } -/*func TestBasicTLSConfig(t *testing.T) { +func TestBasicTLSConfig(t *testing.T) { t.Run("WithoutPerSecondRedisTLS", testBasicConfigAuthTLS("8087", "false", "0")) t.Run("WithPerSecondRedisTLS", testBasicConfigAuthTLS("8089", "true", "0")) t.Run("WithoutPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("18087", "false", "1000")) t.Run("WithPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("18089", "true", "1000")) -}*/ +} func TestBasicAuthConfig(t *testing.T) { t.Run("WithoutPerSecondRedisAuth", testBasicConfigAuth("8091", "false", "0")) - // t.Run("WithPerSecondRedisAuth", testBasicConfigAuth("8093", "true", "0")) - // t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18091", "false", "1000")) - // t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18093", "true", "1000")) + t.Run("WithPerSecondRedisAuth", testBasicConfigAuth("8093", "true", "0")) + t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18091", "false", "1000")) + t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18093", "true", "1000")) } func TestBasicAuthConfigWithRedisCluster(t *testing.T) { - t.Run("WithoutPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8091", "false", "0")) - // t.Run("WithPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8093", "true", "0")) - // t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18091", "false", "1000")) - // t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18093", "true", "1000")) + t.Run("WithoutPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8191", "false", "0")) + t.Run("WithPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8193", "true", "0")) + t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18191", "false", "1000")) + t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18193", "true", "1000")) } func TestBasicReloadConfig(t *testing.T) { - // t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRoot("8095", "false", "0")) - // t.Run("ReloadWithoutWatchRoot", testBasicConfigReload("8097", "false", "0", "false")) + t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRoot("8095", "false", "0")) + t.Run("ReloadWithoutWatchRoot", testBasicConfigReload("8097", "false", "0", "false")) } -func TestBasicReloadConfigWithRedisCluster(t *testing.T) { - // t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisCluster("8095", "false", "0")) - // t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisCluster("8097", "false", "0", "false")) -} +// func TestBasicReloadConfigWithRedisCluster(t *testing.T) { +// t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisCluster("8195", "false", "0")) +// t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisCluster("8197", "false", "0", "false")) +// } -/*func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { +func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:16382") os.Setenv("REDIS_URL", "localhost:16381") os.Setenv("REDIS_AUTH", "password123") os.Setenv("REDIS_TLS", "true") os.Setenv("REDIS_PERSECOND_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "true") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } @@ -105,8 +111,10 @@ func testBasicConfig(grpcPort, perSecond string, local_cache_size string) func(* os.Setenv("REDIS_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "") os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) -}*/ +} func testBasicConfigAuth(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:6385") @@ -115,10 +123,12 @@ func testBasicConfigAuth(grpcPort, perSecond string, local_cache_size string) fu os.Setenv("REDIS_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -/*func testBasicConfigWithoutWatchRoot(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { +func testBasicConfigWithoutWatchRoot(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") os.Setenv("REDIS_URL", "localhost:6379") os.Setenv("REDIS_AUTH", "") @@ -126,6 +136,8 @@ func testBasicConfigAuth(grpcPort, perSecond string, local_cache_size string) fu os.Setenv("REDIS_PERSECOND_AUTH", "") os.Setenv("REDIS_PERSECOND_TLS", "false") os.Setenv("RUNTIME_WATCH_ROOT", "false") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } @@ -137,6 +149,8 @@ func testBasicConfigReload(grpcPort, perSecond string, local_cache_size, runtime os.Setenv("REDIS_PERSECOND_AUTH", "") os.Setenv("REDIS_PERSECOND_TLS", "false") os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") return testConfigReload(grpcPort, perSecond, local_cache_size) } @@ -149,22 +163,29 @@ func testBasicConfigWithRedisCluster(grpcPort, perSecond string, local_cache_siz os.Setenv("REDIS_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") + + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "1") + os.Setenv("REDIS_PIPELINE_LIMIT", "1") return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) -}*/ +} func testBasicConfigAuthWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_TYPE", "cluster") - os.Setenv("REDIS_PERSECOND_URL", "127.0.0.1:6389,127.0.0.1:6390,127.0.0.1:6391") + os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") os.Setenv("REDIS_TYPE", "cluster") - os.Setenv("REDIS_URL", "127.0.0.1:6386,127.0.0.1:6387,127.0.0.1:6388") + os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") os.Setenv("REDIS_TLS", "false") os.Setenv("REDIS_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "password123") + // os.Setenv("LOG_LEVEL", "DEBUG") + // TODO These seem to be required to be != 0 for cluster, we should check during initialization + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "1") + os.Setenv("REDIS_PIPELINE_LIMIT", "1") return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -/*func testBasicConfigWithoutWatchRootWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { +func testBasicConfigWithoutWatchRootWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_TYPE", "cluster") os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") os.Setenv("REDIS_TYPE", "cluster") @@ -174,19 +195,24 @@ func testBasicConfigAuthWithRedisCluster(grpcPort, perSecond string, local_cache os.Setenv("REDIS_PERSECOND_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") os.Setenv("RUNTIME_WATCH_ROOT", "false") + + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "1") + os.Setenv("REDIS_PIPELINE_LIMIT", "1") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -func testBasicConfigReloadWithRedisCluster(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { - os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") - os.Setenv("REDIS_URL", "localhost:6379") - os.Setenv("REDIS_AUTH", "") - os.Setenv("REDIS_TLS", "false") - os.Setenv("REDIS_PERSECOND_AUTH", "") - os.Setenv("REDIS_PERSECOND_TLS", "false") - os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) - return testConfigReload(grpcPort, perSecond, local_cache_size) -}*/ +// func testBasicConfigReloadWithRedisCluster(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { +// os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") +// os.Setenv("REDIS_URL", "localhost:6379") +// os.Setenv("REDIS_AUTH", "") +// os.Setenv("REDIS_TLS", "false") +// os.Setenv("REDIS_PERSECOND_AUTH", "") +// os.Setenv("REDIS_PERSECOND_TLS", "false") +// os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + +// return testConfigReload(grpcPort, perSecond, local_cache_size) +// } func getCacheKey(cacheKey string, enableLocalCache bool) string { if enableLocalCache { @@ -353,6 +379,7 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu limitRemaining2 = 0 } + assert.NoError(err) common.AssertProtoEqual( assert, &pb.RateLimitResponse{ @@ -361,7 +388,6 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu newDescriptorStatus(pb.RateLimitResponse_OK, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining1), newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}}, response) - assert.NoError(err) key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) assert.Equal(i+26, int(key2HitCounter.Value())) key2OverlimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit", getCacheKey("key2", enable_local_cache))) @@ -419,7 +445,7 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu } } -/*func TestBasicConfigLegacy(t *testing.T) { +func TestBasicConfigLegacy(t *testing.T) { os.Setenv("PORT", "8082") os.Setenv("GRPC_PORT", "8083") os.Setenv("DEBUG_PORT", "8084") @@ -432,6 +458,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu os.Setenv("REDIS_AUTH", "") os.Setenv("REDIS_PERSECOND_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") runner := runner.NewRunner() go func() { @@ -658,4 +686,4 @@ func testConfigReload(grpcPort, perSecond string, local_cache_size string) func( panic(err) } } -}*/ +} From 68f882391c24a0d90ec2c58c45672e769f643a43 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:25:20 +0100 Subject: [PATCH 28/41] Create sentinel.conf Signed-off-by: Diego Erdody --- test/integration/conf/sentinel.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 test/integration/conf/sentinel.conf diff --git a/test/integration/conf/sentinel.conf b/test/integration/conf/sentinel.conf new file mode 100644 index 000000000..b56cefc9a --- /dev/null +++ b/test/integration/conf/sentinel.conf @@ -0,0 +1,3 @@ +sentinel monitor mymaster 127.0.0.1 6392 2 +sentinel auth-pass mymaster password123 +sentinel down-after-milliseconds mymaster 3000 From cedd3a00a72b51e7dfa0270a88e1dc82728bbe49 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:25:56 +0100 Subject: [PATCH 29/41] Create sentinel-pre-second.conf Signed-off-by: Diego Erdody --- test/integration/conf/sentinel-pre-second.conf | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 test/integration/conf/sentinel-pre-second.conf diff --git a/test/integration/conf/sentinel-pre-second.conf b/test/integration/conf/sentinel-pre-second.conf new file mode 100644 index 000000000..51188eeb8 --- /dev/null +++ b/test/integration/conf/sentinel-pre-second.conf @@ -0,0 +1,3 @@ +sentinel monitor mymaster 127.0.0.1 6397 2 +sentinel auth-pass mymaster password123 +sentinel down-after-milliseconds mymaster 3000 From 8ea331545f60393fb33c0168fe66ce048e844272 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:26:35 +0100 Subject: [PATCH 30/41] Update Makefile Signed-off-by: Diego Erdody --- Makefile | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 3f482d14c..c1dbffcfa 100644 --- a/Makefile +++ b/Makefile @@ -79,6 +79,18 @@ tests_with_redis: bootstrap_redis_tls tests_unit redis-server --port 6382 --requirepass password123 & redis-server --port 6384 --requirepass password123 & redis-server --port 6385 --requirepass password123 & + + redis-server --port 6392 --requirepass password123 & + redis-server --port 6393 --requirepass password123 --slaveof 127.0.0.1 6392 --masterauth password123 & + mkdir 26394 && cp test/integration/conf/sentinel.conf 26394/sentinel.conf && redis-server 26394/sentinel.conf --sentinel --port 26394 & + mkdir 26395 && cp test/integration/conf/sentinel.conf 26395/sentinel.conf && redis-server 26395/sentinel.conf --sentinel --port 26395 & + mkdir 26396 && cp test/integration/conf/sentinel.conf 26396/sentinel.conf && redis-server 26396/sentinel.conf --sentinel --port 26396 & + redis-server --port 6397 --requirepass password123 & + redis-server --port 6398 --requirepass password123 --slaveof 127.0.0.1 6397 --masterauth password123 & + mkdir 26399 && cp test/integration/conf/sentinel-pre-second.conf 26399/sentinel.conf && redis-server 26399/sentinel.conf --sentinel --port 26399 & + mkdir 26400 && cp test/integration/conf/sentinel-pre-second.conf 26400/sentinel.conf && redis-server 26400/sentinel.conf --sentinel --port 26400 & + mkdir 26401 && cp test/integration/conf/sentinel-pre-second.conf 26401/sentinel.conf && redis-server 26401/sentinel.conf --sentinel --port 26401 & + mkdir 6386 && cd 6386 && redis-server --port 6386 --cluster-enabled yes --requirepass password123 & mkdir 6387 && cd 6387 && redis-server --port 6387 --cluster-enabled yes --requirepass password123 & mkdir 6388 && cd 6388 && redis-server --port 6388 --cluster-enabled yes --requirepass password123 & @@ -86,10 +98,11 @@ tests_with_redis: bootstrap_redis_tls tests_unit mkdir 6390 && cd 6390 && redis-server --port 6390 --cluster-enabled yes --requirepass password123 & mkdir 6391 && cd 6391 && redis-server --port 6391 --cluster-enabled yes --requirepass password123 & sleep 2 - echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6386 127.0.0.1:6387 127.0.0.1:6388 --cluster-replicas 0 - echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 0 - redis-cli --cluster check -a password123 127.0.0.1:6386 - redis-cli --cluster check -a password123 127.0.0.1:6389 + echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6386 127.0.0.1:6387 127.0.0.1:6388 --cluster-replicas 0 & + echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 0 & + redis-cli --cluster check -a password123 127.0.0.1:6386 & + redis-cli --cluster check -a password123 127.0.0.1:6389 & + go test -race -tags=integration $(MODULE)/... .PHONY: docker_tests @@ -104,4 +117,3 @@ docker_image: docker_tests .PHONY: docker_push docker_push: docker_image docker push $(IMAGE):$(VERSION) - From 9b19bb1a1adfef318eafe935a63d09519049828a Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:27:03 +0100 Subject: [PATCH 31/41] Update README.md Signed-off-by: Diego Erdody --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8e5a1db64..e662edf90 100644 --- a/README.md +++ b/README.md @@ -513,7 +513,7 @@ Ratelimit supports different types of redis deployments: The deployment type can be specified with the `REDIS_TYPE` / `REDIS_PERSECOND_TYPE` environment variables. Depending on the type defined, the `REDIS_URL` and `REDIS_PERSECOND_URL` are expected to have the following formats: 1. "single": Depending on the socket type defined, either a single hostname:port pair or a unix domain socket reference. -2. "sentinel": A comma separated list of hostname:port pairs. The list size should be >= 2. The first item is the primary node url and the rest are the sentinels. +2. "sentinel": A comma separated list with the first string as the master name of the sentinel cluster followed by hostname:port pairs. The list size should be >= 2. The first item is the name of the primary node and the rest are the sentinels. 3. "cluster": A comma separated list of hostname:port pairs with all the nodes in the cluster. ## Pipelining From 76467fed5f2d175bf8c22bd7ac308d0460a23720 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:27:55 +0100 Subject: [PATCH 32/41] Update driver_impl.go Signed-off-by: Diego Erdody --- src/redis/driver_impl.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index c36c66d18..18e213f1b 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -99,12 +99,15 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string client, err = poolFunc("tcp", url) case "cluster": urls := strings.Split(url, ",") + if implicitPipelining == false { + panic(RedisError("Implicit Pipelining must be enabled to work with Redis Cluster Mode. Set values for REDIS_PIPELINE_WINDOW or REDIS_PIPELINE_LIMIT to enable implicit pipelining")) + } logger.Warnf("Creating cluster with urls %v", urls) client, err = radix.NewCluster(urls, radix.ClusterPoolFunc(poolFunc)) case "sentinel": urls := strings.Split(url, ",") if len(urls) < 2 { - panic(RedisError("Expected a list of urls for the sentinel mode, in the format: ,,...,")) + panic(RedisError("Expected master name and a list of urls for the sentinels, in the format: ,,...,")) } client, err = radix.NewSentinel(urls[0], urls[1:], radix.SentinelPoolFunc(poolFunc)) default: From 2a0dde2ac73216649870bfe895d230ffe74f8290 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:34:09 +0100 Subject: [PATCH 33/41] Update integration_test.go Signed-off-by: Diego Erdody --- test/integration/integration_test.go | 157 +++++++++++++++++---------- 1 file changed, 102 insertions(+), 55 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 8c2f5cc6e..c1d2f6ef5 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -54,13 +54,6 @@ func TestBasicConfig(t *testing.T) { t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("18085", "true", "1000")) } -func TestBasicConfigWithRedisCluster(t *testing.T) { - t.Run("WithoutPerSecondRedis", testBasicConfigWithRedisCluster("8183", "false", "0")) - t.Run("WithPerSecondRedis", testBasicConfigWithRedisCluster("8185", "true", "0")) - t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18183", "false", "1000")) - t.Run("WithPerSecondRedisWithLocalCache", testBasicConfigWithRedisCluster("18185", "true", "1000")) -} - func TestBasicTLSConfig(t *testing.T) { t.Run("WithoutPerSecondRedisTLS", testBasicConfigAuthTLS("8087", "false", "0")) t.Run("WithPerSecondRedisTLS", testBasicConfigAuthTLS("8089", "true", "0")) @@ -82,15 +75,27 @@ func TestBasicAuthConfigWithRedisCluster(t *testing.T) { t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18193", "true", "1000")) } +func TestBasicAuthConfigWithRedisSentinel(t *testing.T) { + t.Run("WithoutPerSecondRedisAuth", testBasicAuthConfigWithRedisSentinel("8291", "false", "0")) + t.Run("WithPerSecondRedisAuth", testBasicAuthConfigWithRedisSentinel("8293", "true", "0")) + t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicAuthConfigWithRedisSentinel("18291", "false", "1000")) + t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicAuthConfigWithRedisSentinel("18293", "true", "1000")) +} + func TestBasicReloadConfig(t *testing.T) { t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRoot("8095", "false", "0")) t.Run("ReloadWithoutWatchRoot", testBasicConfigReload("8097", "false", "0", "false")) } -// func TestBasicReloadConfigWithRedisCluster(t *testing.T) { -// t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisCluster("8195", "false", "0")) -// t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisCluster("8197", "false", "0", "false")) -// } +func TestBasicReloadConfigWithRedisCluster(t *testing.T) { + t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisCluster("8096", "false", "0")) + t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisCluster("8098", "false", "0", "false")) +} + +func TestBasicReloadConfigWithRedisSentinel(t *testing.T) { + t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisSentinel("8296", "false", "0")) + t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisSentinel("8298", "false", "0", "false")) +} func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:16382") @@ -101,6 +106,22 @@ func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) os.Setenv("REDIS_PERSECOND_TLS", "true") os.Setenv("REDIS_TYPE", "single") os.Setenv("REDIS_PERSECOND_TYPE", "single") + + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigAuthTLSWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "cluster") + os.Setenv("REDIS_PERSECOND_URL", "localhost:16389,localhost:16390,localhost:16391") + os.Setenv("REDIS_TYPE", "cluster") + os.Setenv("REDIS_URL", "localhost:16386,localhost:16387,localhost:16388") + os.Setenv("REDIS_TLS", "true") + os.Setenv("REDIS_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_TLS", "true") + os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "8") + os.Setenv("REDIS_PIPELINE_LIMIT", "8") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } @@ -113,6 +134,7 @@ func testBasicConfig(grpcPort, perSecond string, local_cache_size string) func(* os.Setenv("REDIS_PERSECOND_TLS", "false") os.Setenv("REDIS_TYPE", "single") os.Setenv("REDIS_PERSECOND_TYPE", "single") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } @@ -125,36 +147,51 @@ func testBasicConfigAuth(grpcPort, perSecond string, local_cache_size string) fu os.Setenv("REDIS_PERSECOND_AUTH", "password123") os.Setenv("REDIS_TYPE", "single") os.Setenv("REDIS_PERSECOND_TYPE", "single") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -func testBasicConfigWithoutWatchRoot(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { - os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") - os.Setenv("REDIS_URL", "localhost:6379") - os.Setenv("REDIS_AUTH", "") +func testBasicConfigAuthWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "cluster") + os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") + os.Setenv("REDIS_TYPE", "cluster") + os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") os.Setenv("REDIS_TLS", "false") - os.Setenv("REDIS_PERSECOND_AUTH", "") + os.Setenv("REDIS_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") - os.Setenv("RUNTIME_WATCH_ROOT", "false") - os.Setenv("REDIS_TYPE", "single") - os.Setenv("REDIS_PERSECOND_TYPE", "single") + os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "8") + os.Setenv("REDIS_PIPELINE_LIMIT", "8") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -func testBasicConfigReload(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { +func testBasicAuthConfigWithRedisSentinel(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("REDIS_PERSECOND_TYPE", "sentinel") + os.Setenv("REDIS_PERSECOND_URL", "mymaster,localhost:26399,localhost:26400,localhost:26401") + os.Setenv("REDIS_TYPE", "sentinel") + os.Setenv("REDIS_URL", "mymaster,localhost:26394,localhost:26395,localhost:26396") + os.Setenv("REDIS_TLS", "false") + + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigWithoutWatchRoot(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") os.Setenv("REDIS_URL", "localhost:6379") os.Setenv("REDIS_AUTH", "") os.Setenv("REDIS_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "") os.Setenv("REDIS_PERSECOND_TLS", "false") - os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + os.Setenv("RUNTIME_WATCH_ROOT", "false") os.Setenv("REDIS_TYPE", "single") os.Setenv("REDIS_PERSECOND_TYPE", "single") - return testConfigReload(grpcPort, perSecond, local_cache_size) + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -func testBasicConfigWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + +func testBasicConfigWithoutWatchRootWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_TYPE", "cluster") os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") os.Setenv("REDIS_TYPE", "cluster") @@ -163,56 +200,66 @@ func testBasicConfigWithRedisCluster(grpcPort, perSecond string, local_cache_siz os.Setenv("REDIS_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", "false") + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "8") + os.Setenv("REDIS_PIPELINE_LIMIT", "8") - os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "1") - os.Setenv("REDIS_PIPELINE_LIMIT", "1") return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -func testBasicConfigAuthWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { - os.Setenv("REDIS_PERSECOND_TYPE", "cluster") - os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") - os.Setenv("REDIS_TYPE", "cluster") - os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") +func testBasicConfigWithoutWatchRootWithRedisSentinel(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "sentinel") + os.Setenv("REDIS_PERSECOND_URL", "mymaster,localhost:26399,localhost:26400,localhost:26401") + os.Setenv("REDIS_TYPE", "sentinel") + os.Setenv("REDIS_URL", "mymaster,localhost:26394,localhost:26395,localhost:26396") os.Setenv("REDIS_TLS", "false") - os.Setenv("REDIS_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") - os.Setenv("REDIS_PERSECOND_AUTH", "password123") - // os.Setenv("LOG_LEVEL", "DEBUG") - // TODO These seem to be required to be != 0 for cluster, we should check during initialization - os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "1") - os.Setenv("REDIS_PIPELINE_LIMIT", "1") + os.Setenv("RUNTIME_WATCH_ROOT", "false") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -func testBasicConfigWithoutWatchRootWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { +func testBasicConfigReload(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") + os.Setenv("REDIS_URL", "localhost:6379") + os.Setenv("REDIS_AUTH", "") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") + + return testConfigReload(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigReloadWithRedisCluster(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_TYPE", "cluster") os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") os.Setenv("REDIS_TYPE", "cluster") os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") - os.Setenv("REDIS_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "8") + os.Setenv("REDIS_PIPELINE_LIMIT", "8") os.Setenv("REDIS_TLS", "false") - os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("REDIS_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") - os.Setenv("RUNTIME_WATCH_ROOT", "false") - - os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "1") - os.Setenv("REDIS_PIPELINE_LIMIT", "1") + os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) - return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) + return testConfigReload(grpcPort, perSecond, local_cache_size) } -// func testBasicConfigReloadWithRedisCluster(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { -// os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") -// os.Setenv("REDIS_URL", "localhost:6379") -// os.Setenv("REDIS_AUTH", "") -// os.Setenv("REDIS_TLS", "false") -// os.Setenv("REDIS_PERSECOND_AUTH", "") -// os.Setenv("REDIS_PERSECOND_TLS", "false") -// os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) - -// return testConfigReload(grpcPort, perSecond, local_cache_size) -// } +func testBasicConfigReloadWithRedisSentinel(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "sentinel") + os.Setenv("REDIS_PERSECOND_URL", "mymaster,localhost:26399,localhost:26400,localhost:26401") + os.Setenv("REDIS_TYPE", "sentinel") + os.Setenv("REDIS_URL", "mymaster,localhost:26394,localhost:26395,localhost:26396") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + + return testConfigReload(grpcPort, perSecond, local_cache_size) +} func getCacheKey(cacheKey string, enableLocalCache bool) string { if enableLocalCache { From f01202ec611b1062f05f1b922466bd2fb3c222b2 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:36:13 +0100 Subject: [PATCH 34/41] Update integration_test.go Signed-off-by: Diego Erdody --- test/integration/integration_test.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index c1d2f6ef5..28fd8d2ba 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -110,21 +110,6 @@ func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } -func testBasicConfigAuthTLSWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { - os.Setenv("REDIS_PERSECOND_TYPE", "cluster") - os.Setenv("REDIS_PERSECOND_URL", "localhost:16389,localhost:16390,localhost:16391") - os.Setenv("REDIS_TYPE", "cluster") - os.Setenv("REDIS_URL", "localhost:16386,localhost:16387,localhost:16388") - os.Setenv("REDIS_TLS", "true") - os.Setenv("REDIS_AUTH", "password123") - os.Setenv("REDIS_PERSECOND_TLS", "true") - os.Setenv("REDIS_PERSECOND_AUTH", "password123") - os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "8") - os.Setenv("REDIS_PIPELINE_LIMIT", "8") - - return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) -} - func testBasicConfig(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") os.Setenv("REDIS_URL", "localhost:6379") From 20a17889a67053252ad085cc7b0fb48ad79752d1 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:42:33 +0100 Subject: [PATCH 35/41] Update README.md Signed-off-by: Diego Erdody --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e662edf90..4baf562a6 100644 --- a/README.md +++ b/README.md @@ -513,7 +513,7 @@ Ratelimit supports different types of redis deployments: The deployment type can be specified with the `REDIS_TYPE` / `REDIS_PERSECOND_TYPE` environment variables. Depending on the type defined, the `REDIS_URL` and `REDIS_PERSECOND_URL` are expected to have the following formats: 1. "single": Depending on the socket type defined, either a single hostname:port pair or a unix domain socket reference. -2. "sentinel": A comma separated list with the first string as the master name of the sentinel cluster followed by hostname:port pairs. The list size should be >= 2. The first item is the name of the primary node and the rest are the sentinels. +2. "sentinel": A comma separated list with the first string as the primary node name of the sentinel cluster followed by hostname:port pairs. The list size should be >= 2. The first item is the name of the primary node and the rest are the sentinels. 3. "cluster": A comma separated list of hostname:port pairs with all the nodes in the cluster. ## Pipelining From 858bd4d52c6bbc64ece7e51e0cbec09e73098536 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:43:14 +0100 Subject: [PATCH 36/41] Update driver_impl.go Signed-off-by: Diego Erdody --- src/redis/driver_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 18e213f1b..1d401a052 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -107,7 +107,7 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string case "sentinel": urls := strings.Split(url, ",") if len(urls) < 2 { - panic(RedisError("Expected master name and a list of urls for the sentinels, in the format: ,,...,")) + panic(RedisError("Expected primary node name and a list of urls for the sentinels, in the format: ,,...,")) } client, err = radix.NewSentinel(urls[0], urls[1:], radix.SentinelPoolFunc(poolFunc)) default: From b930783f0a1525a3d7be88fa05aaeb2d7d41a456 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Wed, 23 Sep 2020 21:46:42 +0100 Subject: [PATCH 37/41] Update integration_test.go Signed-off-by: Diego Erdody --- test/integration/integration_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 28fd8d2ba..069dde00d 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -411,7 +411,6 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu limitRemaining2 = 0 } - assert.NoError(err) common.AssertProtoEqual( assert, &pb.RateLimitResponse{ From 2d2869d02d595ef8cd12ff66e091bd29e49b3dee Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Thu, 24 Sep 2020 00:58:01 +0100 Subject: [PATCH 38/41] Update integration_test.go Signed-off-by: Diego Erdody --- test/integration/integration_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 069dde00d..bf484dce0 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -410,7 +410,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu status = pb.RateLimitResponse_OVER_LIMIT limitRemaining2 = 0 } - + + assert.NoError(err) common.AssertProtoEqual( assert, &pb.RateLimitResponse{ From 76ac5a4e83db1a8dd6f864f026c0bcc653d93f2c Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Thu, 24 Sep 2020 00:58:50 +0100 Subject: [PATCH 39/41] Update README.md Signed-off-by: Diego Erdody --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4baf562a6..062d493f7 100644 --- a/README.md +++ b/README.md @@ -513,7 +513,7 @@ Ratelimit supports different types of redis deployments: The deployment type can be specified with the `REDIS_TYPE` / `REDIS_PERSECOND_TYPE` environment variables. Depending on the type defined, the `REDIS_URL` and `REDIS_PERSECOND_URL` are expected to have the following formats: 1. "single": Depending on the socket type defined, either a single hostname:port pair or a unix domain socket reference. -2. "sentinel": A comma separated list with the first string as the primary node name of the sentinel cluster followed by hostname:port pairs. The list size should be >= 2. The first item is the name of the primary node and the rest are the sentinels. +2. "sentinel": A comma separated list with the first string as the master name of the sentinel cluster followed by hostname:port pairs. The list size should be >= 2. The first item is the name of the master and the rest are the sentinels. 3. "cluster": A comma separated list of hostname:port pairs with all the nodes in the cluster. ## Pipelining From 27eb96309482537c32cf81f3a8002227d4825719 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Thu, 24 Sep 2020 00:59:25 +0100 Subject: [PATCH 40/41] Update driver_impl.go Signed-off-by: Diego Erdody --- src/redis/driver_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 1d401a052..18e213f1b 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -107,7 +107,7 @@ func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string case "sentinel": urls := strings.Split(url, ",") if len(urls) < 2 { - panic(RedisError("Expected primary node name and a list of urls for the sentinels, in the format: ,,...,")) + panic(RedisError("Expected master name and a list of urls for the sentinels, in the format: ,,...,")) } client, err = radix.NewSentinel(urls[0], urls[1:], radix.SentinelPoolFunc(poolFunc)) default: From cf50f23ab6a0ca3550d34e774c9d57c79c0d81e4 Mon Sep 17 00:00:00 2001 From: kriti-shaw <71521094+kriti-shaw@users.noreply.github.com> Date: Thu, 24 Sep 2020 01:02:40 +0100 Subject: [PATCH 41/41] Update integration_test.go Signed-off-by: Diego Erdody --- test/integration/integration_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index bf484dce0..01a2c950e 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -411,7 +411,6 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu limitRemaining2 = 0 } - assert.NoError(err) common.AssertProtoEqual( assert, &pb.RateLimitResponse{ @@ -420,6 +419,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu newDescriptorStatus(pb.RateLimitResponse_OK, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining1), newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}}, response) + assert.NoError(err) + key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) assert.Equal(i+26, int(key2HitCounter.Value())) key2OverlimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit", getCacheKey("key2", enable_local_cache)))