diff --git a/.github/workflows/master.yaml b/.github/workflows/master.yaml new file mode 100644 index 000000000..a3bd5867a --- /dev/null +++ b/.github/workflows/master.yaml @@ -0,0 +1,27 @@ +name: Build and push :master image + +on: + push: + branches: + - master + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: check format + run: make check_format + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: build and push docker image + run: | + echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin + make docker_push + env: + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + VERSION: master \ No newline at end of file diff --git a/.github/workflows/pullrequest.yaml b/.github/workflows/pullrequest.yaml new file mode 100644 index 000000000..da03fd13d --- /dev/null +++ b/.github/workflows/pullrequest.yaml @@ -0,0 +1,21 @@ +name: CI Build and Test for PR + +on: + pull_request: + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: check format + run: make check_format + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: build and test + run: | + make docker_tests diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..e2349f992 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,26 @@ +name: Build and push :release image + +on: + push: + tags: + - 'v*' + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: check format + run: make check_format + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: build and push docker image + run: | + echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin + make docker_push + env: + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index f5010830b..7897ead09 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,8 @@ cover.out bin/ .idea/ vendor +cert.pem +key.pem +private.pem +redis-per-second.conf +redis.conf diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 02bdf2e5b..000000000 --- a/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: required -language: go -go: "1.11" -services: redis-server -before_install: sudo apt-get update -y && sudo apt-get install stunnel4 -y -install: make bootstrap bootstrap_redis_tls -before_script: -- redis-server --port 6380 & -- redis-server --port 6381 --requirepass password123 & -- redis-server --port 6382 --requirepass password123 & -- redis-server --port 6384 --requirepass password123 & -- redis-server --port 6385 --requirepass password123 & -script: make check_format tests diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index afcbd8c21..91f111f23 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1 +1,3 @@ -This project is governed by [Lyft's code of conduct](https://github.com/lyft/code-of-conduct). All contributors and participants agree to abide by its terms. \ No newline at end of file +## Community Code of Conduct + +ratelimit follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 85d4e24e2..5d641c988 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,12 +8,85 @@ We welcome contributions from the community. Here are some guidelines. * Fork the repo and create your PR. * Tests will automatically run for you. -* When all of the tests are passing, tag @lyft/network-team and we will review it and - merge once our CLA has been signed (see below). +* When all of the tests are passing, tag @envoyproxy/ratelimit-maintainers and + we will review it and merge. * Party time. -# CLA +# DCO: Sign your work -* We require a CLA for code contributions, so before we can accept a pull request we need - to have a signed CLA. Please visit our [CLA service](https://oss.lyft.com/cla) and follow - the instructions to sign the CLA. +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](https://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +If you want this to be automatic you can set up some aliases: + +```bash +git config --add alias.amend "commit -s --amend" +git config --add alias.c "commit -s" +``` + +## Fixing DCO + +If your PR fails the DCO check, it's necessary to fix the entire commit history in the PR. Best +practice is to [squash](https://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) +the commit history to a single commit, append the DCO sign-off as described above, and [force +push](https://git-scm.com/docs/git-push#git-push---force). For example, if you have 2 commits in +your history: + +```bash +git rebase -i HEAD^^ +(interactive squash + DCO append) +git push origin -f +``` + +Note, that in general rewriting history in this way is a hindrance to the review process and this +should only be done to correct a DCO mistake. diff --git a/DCO b/DCO new file mode 100644 index 000000000..8201f9921 --- /dev/null +++ b/DCO @@ -0,0 +1,37 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/Dockerfile b/Dockerfile index 57173a93a..b0bdb0bd3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,15 @@ -FROM golang:1.10.4 AS build -WORKDIR /go/src/github.com/lyft/ratelimit +FROM golang:1.14 AS build +WORKDIR /ratelimit + +ENV GOPROXY=https://proxy.golang.org +COPY go.mod go.sum /ratelimit/ +RUN go mod download COPY src src COPY script script -COPY vendor vendor -COPY glide.yaml glide.yaml -COPY glide.lock glide.lock -COPY proto proto - -RUN script/install-glide -RUN glide install -RUN CGO_ENABLED=0 GOOS=linux go build -o /usr/local/bin/ratelimit -ldflags="-w -s" -v github.com/lyft/ratelimit/src/service_cmd +RUN CGO_ENABLED=0 GOOS=linux go build -o /go/bin/ratelimit -ldflags="-w -s" -v github.com/envoyproxy/ratelimit/src/service_cmd -FROM alpine:3.8 AS final +FROM alpine:3.11 AS final RUN apk --no-cache add ca-certificates -COPY --from=build /usr/local/bin/ratelimit /bin/ratelimit +COPY --from=build /go/bin/ratelimit /bin/ratelimit diff --git a/Dockerfile.integration b/Dockerfile.integration new file mode 100644 index 000000000..efff81438 --- /dev/null +++ b/Dockerfile.integration @@ -0,0 +1,17 @@ +# Running this docker image runs the integration tests. +FROM golang:1.14 + +RUN apt-get update -y && apt-get install sudo stunnel4 redis -y && rm -rf /var/lib/apt/lists/* + +WORKDIR /workdir + +ENV GOPROXY=https://proxy.golang.org +COPY go.mod go.sum /workdir/ +RUN go mod download + +COPY Makefile /workdir +RUN make bootstrap + +COPY src /workdir/src +COPY test /workdir/test +CMD make tests_with_redis diff --git a/Makefile b/Makefile index 2110f1075..c1dbffcfa 100644 --- a/Makefile +++ b/Makefile @@ -1,18 +1,16 @@ -ifeq ("$(GOPATH)","") -$(error GOPATH must be set) -endif - +export GO111MODULE=on +PROJECT = ratelimit +REGISTRY ?= envoyproxy +IMAGE := $(REGISTRY)/$(PROJECT) +INTEGRATION_IMAGE := $(REGISTRY)/$(PROJECT)_integration +MODULE = github.com/envoyproxy/ratelimit +GIT_REF = $(shell git describe --tags || git rev-parse --short=8 --verify HEAD) +VERSION ?= $(GIT_REF) SHELL := /bin/bash -GOREPO := ${GOPATH}/src/github.com/lyft/ratelimit .PHONY: bootstrap -bootstrap: - script/install-glide - glide install +bootstrap: ; -.PHONY: bootstrap_tests -bootstrap_tests: - cd ./vendor/github.com/golang/mock/mockgen && go install define REDIS_STUNNEL cert = private.pem pid = /var/run/stunnel.pid @@ -33,6 +31,7 @@ redis.conf: echo "$$REDIS_STUNNEL" >> $@ redis-per-second.conf: echo "$$REDIS_PER_SECOND_STUNNEL" >> $@ + .PHONY: bootstrap_redis_tls bootstrap_redis_tls: redis.conf redis-per-second.conf openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \ @@ -51,27 +50,70 @@ docs_format: .PHONY: fix_format fix_format: script/docs_fix_format - go fmt $(shell glide nv) + go fmt $(MODULE)/... .PHONY: check_format check_format: docs_format - @gofmt -l $(shell glide nv | sed 's/\.\.\.//g') | tee /dev/stderr | read && echo "Files failed gofmt" && exit 1 || true + @gofmt -l $(shell go list -f '{{.Dir}}' ./...) | tee /dev/stderr | read && echo "Files failed gofmt" && exit 1 || true .PHONY: compile compile: - mkdir -p ${GOREPO}/bin - cd ${GOREPO}/src/service_cmd && go build -o ratelimit ./ && mv ./ratelimit ${GOREPO}/bin - cd ${GOREPO}/src/client_cmd && go build -o ratelimit_client ./ && mv ./ratelimit_client ${GOREPO}/bin - cd ${GOREPO}/src/config_check_cmd && go build -o ratelimit_config_check ./ && mv ./ratelimit_config_check ${GOREPO}/bin + mkdir -p ./bin + go build -mod=readonly -o ./bin/ratelimit $(MODULE)/src/service_cmd + go build -mod=readonly -o ./bin/ratelimit_client $(MODULE)/src/client_cmd + go build -mod=readonly -o ./bin/ratelimit_config_check $(MODULE)/src/config_check_cmd .PHONY: tests_unit tests_unit: compile - go test -race ./... + go test -race $(MODULE)/... .PHONY: tests tests: compile - go test -race -tags=integration ./... + go test -race -tags=integration $(MODULE)/... + +.PHONY: tests_with_redis +tests_with_redis: bootstrap_redis_tls tests_unit + redis-server --port 6379 & + redis-server --port 6380 & + redis-server --port 6381 --requirepass password123 & + redis-server --port 6382 --requirepass password123 & + redis-server --port 6384 --requirepass password123 & + redis-server --port 6385 --requirepass password123 & + + redis-server --port 6392 --requirepass password123 & + redis-server --port 6393 --requirepass password123 --slaveof 127.0.0.1 6392 --masterauth password123 & + mkdir 26394 && cp test/integration/conf/sentinel.conf 26394/sentinel.conf && redis-server 26394/sentinel.conf --sentinel --port 26394 & + mkdir 26395 && cp test/integration/conf/sentinel.conf 26395/sentinel.conf && redis-server 26395/sentinel.conf --sentinel --port 26395 & + mkdir 26396 && cp test/integration/conf/sentinel.conf 26396/sentinel.conf && redis-server 26396/sentinel.conf --sentinel --port 26396 & + redis-server --port 6397 --requirepass password123 & + redis-server --port 6398 --requirepass password123 --slaveof 127.0.0.1 6397 --masterauth password123 & + mkdir 26399 && cp test/integration/conf/sentinel-pre-second.conf 26399/sentinel.conf && redis-server 26399/sentinel.conf --sentinel --port 26399 & + mkdir 26400 && cp test/integration/conf/sentinel-pre-second.conf 26400/sentinel.conf && redis-server 26400/sentinel.conf --sentinel --port 26400 & + mkdir 26401 && cp test/integration/conf/sentinel-pre-second.conf 26401/sentinel.conf && redis-server 26401/sentinel.conf --sentinel --port 26401 & + + mkdir 6386 && cd 6386 && redis-server --port 6386 --cluster-enabled yes --requirepass password123 & + mkdir 6387 && cd 6387 && redis-server --port 6387 --cluster-enabled yes --requirepass password123 & + mkdir 6388 && cd 6388 && redis-server --port 6388 --cluster-enabled yes --requirepass password123 & + mkdir 6389 && cd 6389 && redis-server --port 6389 --cluster-enabled yes --requirepass password123 & + mkdir 6390 && cd 6390 && redis-server --port 6390 --cluster-enabled yes --requirepass password123 & + mkdir 6391 && cd 6391 && redis-server --port 6391 --cluster-enabled yes --requirepass password123 & + sleep 2 + echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6386 127.0.0.1:6387 127.0.0.1:6388 --cluster-replicas 0 & + echo "yes" | redis-cli --cluster create -a password123 127.0.0.1:6389 127.0.0.1:6390 127.0.0.1:6391 --cluster-replicas 0 & + redis-cli --cluster check -a password123 127.0.0.1:6386 & + redis-cli --cluster check -a password123 127.0.0.1:6389 & + + go test -race -tags=integration $(MODULE)/... + +.PHONY: docker_tests +docker_tests: + docker build -f Dockerfile.integration . -t $(INTEGRATION_IMAGE):$(VERSION) && \ + docker run $$(tty -s && echo "-it" || echo) $(INTEGRATION_IMAGE):$(VERSION) + +.PHONY: docker_image +docker_image: docker_tests + docker build . -t $(IMAGE):$(VERSION) -.PHONY: docker -docker: tests - docker build . -t lyft/ratelimit:`git rev-parse HEAD` +.PHONY: docker_push +docker_push: docker_image + docker push $(IMAGE):$(VERSION) diff --git a/README.md b/README.md index 67290c441..062d493f7 100644 --- a/README.md +++ b/README.md @@ -3,10 +3,11 @@ **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* - [Overview](#overview) -- [Deprecation of Legacy Ratelimit Proto](#deprecation-of-legacy-ratelimit-proto) +- [Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto](#deprecation-of-legacy-ratelimit-proto-and-v2-ratelimit-proto) - [Deprecation Schedule](#deprecation-schedule) - [Building and Testing](#building-and-testing) - [Docker-compose setup](#docker-compose-setup) + - [Full test environment](#full-test-environment) - [Configuration](#configuration) - [The configuration format](#the-configuration-format) - [Definitions](#definitions) @@ -18,11 +19,16 @@ - [Example 3](#example-3) - [Example 4](#example-4) - [Loading Configuration](#loading-configuration) + - [Log Format](#log-format) - [Request Fields](#request-fields) - [Statistics](#statistics) +- [HTTP Port](#http-port) + - [/json endpoint](#json-endpoint) - [Debug Port](#debug-port) - [Local Cache](#local-cache) - [Redis](#redis) + - [Redis type](#redis-type) + - [Pipelining](#pipelining) - [One Redis Instance](#one-redis-instance) - [Two Redis Instances](#two-redis-instances) - [Contact](#contact) @@ -36,27 +42,27 @@ applications. Applications request a rate limit decision based on a domain and a reads the configuration from disk via [runtime](https://github.com/lyft/goruntime), composes a cache key, and talks to the Redis cache. A decision is then returned to the caller. -# Deprecation of Legacy Ratelimit Proto - -Envoy's data-plane-api defines a ratelimit service proto [rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto). -Logically the data-plane-api [rls](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto) -is equivalent to the [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) -defined in this repo. However, due -to the namespace differences and how gRPC routing works it is not possible to transparently route the -legacy ratelimit (ones based in the [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) -defined in this repo) requests to the data-plane-api -definitions. Therefore, the ratelimit service will upgrade the requests, process them internally as it would -process a data-plane-api ratelimit request, and then downgrade the response to send back to the client. This means that, +# Deprecation of Legacy Ratelimit Proto and v2 Ratelimit proto + +Envoy's data-plane-api defines a ratelimit service proto v3 [rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto). +Logically the data-plane-api rls [v3](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +is equivalent to the rls [v2](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +However, due to the namespace differences and how gRPC routing works it is not possible to transparently route the +legacy v2 ratelimit requests to the v3 definitions. Therefore, the ratelimit service will upgrade the requests, process them internally as it would +process a v3 ratelimit request, and then downgrade the response to send back to the client. This means that, for a slight performance hit for clients using the legacy proto, ratelimit is backwards compatible with the legacy proto. +Prior to version 2.0.0 ratelimit service contained a protocol definition that used to be supported in a legacy mode, +but support for it and was removed in 2.0.0. ## Deprecation Schedule -1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production -use at Lyft for over 2 years. +1. `v1.0.0` tagged on commit `0ded92a2af8261d43096eba4132e45b99a3b8b14`. Ratelimit has been in production use at Lyft for over 2 years. 2. `v1.1.0` introduces the data-plane-api proto and initiates the deprecation of the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). -3. `v2.0.0` deletes support for the legacy [ratelimit.proto](https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). This version will be tagged by the end of 2018Q3 (~September 2018) -to give time to community members running ratelimit off of `master`. - +3. `v2.0.0` deleted support for the legacy [ratelimit.proto](https://github.com/envoyproxy/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto). +The current version of ratelimit protocol is changed to [v3 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) +while [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) is still supported +as a legacy protocol. +4. `v3.0.0` deletes support for legacy [v2 rls.proto](https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto) # Building and Testing @@ -97,7 +103,6 @@ go [here](https://golang.org/doc/install). The docker-compose setup has three containers: redis, ratelimit-build, and ratelimit. In order to run the docker-compose setup from the root of the repo, run ```bash -glide install docker-compose up ``` @@ -108,6 +113,23 @@ If you want to run with [two redis instances](#two-redis-instances), you will ne the docker-compose.yaml file to run a second redis container, and change the environment variables as explained in the [two redis instances](#two-redis-instances) section. +## Full test environment +To run a fully configured environment to demo Envoy based rate limiting, run: +```bash +docker-compose -f docker-compose-example.yaml up +``` +This will run ratelimit, redis, prom-statsd-exporter and two Envoy containers such that you can demo rate limiting by hitting the below endpoints. +```bash +curl localhost:8888/test +curl localhost:8888/header -H "foo: foo" # Header based +curl localhost:8888/twoheader -H "foo: foo" -H "bar: bar" # Two headers +curl localhost:8888/twoheader -H "foo: foo" -H "baz: baz" +curl localhost:8888/twoheader -H "foo: foo" -H "bar: banned" # Ban a particular header value +``` +Edit `examples/ratelimit/config/example.yaml` to test different rate limit configs. Hot reloading is enabled. + +The descriptors in `example.yaml` and the actions in `examples/envoy/proxy.yaml` should give you a good idea on how to configure rate limits. + # Configuration ## The configuration format @@ -308,7 +330,7 @@ descriptors: The Ratelimit service uses a library written by Lyft called [goruntime](https://github.com/lyft/goruntime) to do configuration loading. Goruntime monitors a designated path, and watches for symlink swaps to files in the directory tree to reload configuration files. -The path to watch can be configured via the [settings](https://github.com/lyft/ratelimit/blob/master/src/settings/settings.go) +The path to watch can be configured via the [settings](https://github.com/envoyproxy/ratelimit/blob/master/src/settings/settings.go) package with the following environment variables: ``` @@ -319,12 +341,52 @@ RUNTIME_IGNOREDOTFILES default:"false" **Configuration files are loaded from RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/\*.yaml** +There are two methods for triggering a configuration reload: +1. Symlink RUNTIME_ROOT to a different directory. +2. Update the contents inside `RUNTIME_ROOT/RUNTIME_SUBDIRECTORY/config/` directly. + +The former is the default behavior. To use the latter method, set the `RUNTIME_WATCH_ROOT` environment variable to `false`. + For more information on how runtime works you can read its [README](https://github.com/lyft/goruntime). +## Log Format + +A centralized log collection system works better with logs in json format. JSON format avoids the need for custom parsing rules. +The Ratelimit service produces logs in a text format by default. For Example: + +``` +time="2020-09-10T17:22:35Z" level=debug msg="loading domain: messaging" +time="2020-09-10T17:22:35Z" level=debug msg="loading descriptor: key=messaging.message_type_marketing" +time="2020-09-10T17:22:35Z" level=debug msg="loading descriptor: key=messaging.message_type_marketing.to_number ratelimit={requests_per_unit=5, unit=DAY}" +time="2020-09-10T17:22:35Z" level=debug msg="loading descriptor: key=messaging.to_number ratelimit={requests_per_unit=100, unit=DAY}" +time="2020-09-10T17:21:55Z" level=warning msg="Listening for debug on ':6070'" +time="2020-09-10T17:21:55Z" level=warning msg="Listening for HTTP on ':8080'" +time="2020-09-10T17:21:55Z" level=debug msg="waiting for runtime update" +time="2020-09-10T17:21:55Z" level=warning msg="Listening for gRPC on ':8081'" +``` + +JSON Log format can be configured using the following environment variables: + +``` +LOG_FORMAT=json +``` + +Output example: +``` +{"@message":"loading domain: messaging","@timestamp":"2020-09-10T17:22:44.926010192Z","level":"debug"} +{"@message":"loading descriptor: key=messaging.message_type_marketing","@timestamp":"2020-09-10T17:22:44.926019315Z","level":"debug"} +{"@message":"loading descriptor: key=messaging.message_type_marketing.to_number ratelimit={requests_per_unit=5, unit=DAY}","@timestamp":"2020-09-10T17:22:44.926037174Z","level":"debug"} +{"@message":"loading descriptor: key=messaging.to_number ratelimit={requests_per_unit=100, unit=DAY}","@timestamp":"2020-09-10T17:22:44.926048993Z","level":"debug"} +{"@message":"Listening for debug on ':6070'","@timestamp":"2020-09-10T17:22:44.926113905Z","level":"warning"} +{"@message":"Listening for gRPC on ':8081'","@timestamp":"2020-09-10T17:22:44.926182006Z","level":"warning"} +{"@message":"Listening for HTTP on ':8080'","@timestamp":"2020-09-10T17:22:44.926227031Z","level":"warning"} +{"@message":"waiting for runtime update","@timestamp":"2020-09-10T17:22:44.926267808Z","level":"debug"} +``` + # Request Fields For information on the fields of a Ratelimit gRPC request please read the information -on the RateLimitRequest message type in the Ratelimit [proto file.](https://github.com/lyft/ratelimit/blob/master/proto/ratelimit/ratelimit.proto) +on the RateLimitRequest message type in the Ratelimit [proto file.](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/ratelimit/v3/rls.proto) # Statistics @@ -361,6 +423,54 @@ ratelimit.service.rate_limit.messaging.message_type_marketing.to_number.over_lim ratelimit.service.rate_limit.messaging.message_type_marketing.to_number.total_hits: 0 ``` +# HTTP Port + +The ratelimit service listens to HTTP 1.1 (by default on port 8080) with two endpoints: +1. /healthcheck → return a 200 if this service is healthy +1. /json → HTTP 1.1 endpoint for interacting with ratelimit service + +## /json endpoint + +Takes an HTTP POST with a JSON body of the form e.g. +```json +{ + "domain": "dummy", + "descriptors": [ + {"entries": [ + {"key": "one_per_day", + "value": "something"} + ]} + ] +} +``` +The service will return an http 200 if this request is allowed (if no ratelimits exceeded) or 429 if one or more +ratelimits were exceeded. + +The response is a RateLimitResponse encoded with +[proto3-to-json mapping](https://developers.google.com/protocol-buffers/docs/proto3#json): +```json +{ + "overallCode": "OVER_LIMIT", + "statuses": [ + { + "code": "OVER_LIMIT", + "currentLimit": { + "requestsPerUnit": 1, + "unit": "MINUTE" + } + }, + { + "code": "OK", + "currentLimit": { + "requestsPerUnit": 2, + "unit": "MINUTE" + }, + "limitRemaining": 1 + } + ] +} +``` + # Debug Port The debug port can be used to interact with the running process. @@ -376,8 +486,8 @@ You can specify the debug port with the `DEBUG_PORT` environment variable. It de # Local Cache -Ratelimit optionally uses [freecache](https://github.com/coocood/freecache) as its local caching layer, which stores the over-the-limit cache keys, and thus avoids reading the -redis cache again for the already over-the-limit keys. The local cache size can be configured via `LocalCacheSizeInBytes` in the [settings](https://github.com/lyft/ratelimit/blob/master/src/settings/settings.go). +Ratelimit optionally uses [freecache](https://github.com/coocood/freecache) as its local caching layer, which stores the over-the-limit cache keys, and thus avoids reading the +redis cache again for the already over-the-limit keys. The local cache size can be configured via `LocalCacheSizeInBytes` in the [settings](https://github.com/envoyproxy/ratelimit/blob/master/src/settings/settings.go). If `LocalCacheSizeInBytes` is 0, local cache is disabled. # Redis @@ -392,6 +502,31 @@ As well Ratelimit supports TLS connections and authentication. These can be conf 1. `REDIS_TLS` & `REDIS_PERSECOND_TLS`: set to `"true"` to enable a TLS connection for the specific connection type. 1. `REDIS_AUTH` & `REDIS_PERSECOND_AUTH`: set to `"password"` to enable authentication to the redis host. +## Redis type + +Ratelimit supports different types of redis deployments: + +1. Single instance (default): Talk to a single instance of redis, or a redis proxy (e.g. https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/other_protocols/redis) +1. Sentinel: Talk to a redis deployment with sentinel instances (see https://redis.io/topics/sentinel) +1. Cluster: Talk to a redis in cluster mode (see https://redis.io/topics/cluster-spec) + +The deployment type can be specified with the `REDIS_TYPE` / `REDIS_PERSECOND_TYPE` environment variables. Depending on the type defined, the `REDIS_URL` and `REDIS_PERSECOND_URL` are expected to have the following formats: + +1. "single": Depending on the socket type defined, either a single hostname:port pair or a unix domain socket reference. +2. "sentinel": A comma separated list with the first string as the master name of the sentinel cluster followed by hostname:port pairs. The list size should be >= 2. The first item is the name of the master and the rest are the sentinels. +3. "cluster": A comma separated list of hostname:port pairs with all the nodes in the cluster. + +## Pipelining + +By default, for each request, ratelimit will pick up a connection from pool, wirte multiple redis commands in a single write then reads their responses in a single read. This reduces network delay. + +For high throughput scenarios, ratelimit also support [implicit pipelining](https://github.com/mediocregopher/radix/blob/v3.5.1/pool.go#L238) . It can be configured using the following environment variables: + +1. `REDIS_PIPELINE_WINDOW` & `REDIS_PERSECOND_PIPELINE_WINDOW`: sets the duration after which internal pipelines will be flushed. +If window is zero then implicit pipelining will be disabled. +1. `REDIS_PIPELINE_LIMIT` & `REDIS_PERSECOND_PIPELINE_LIMIT`: sets maximum number of commands that can be pipelined before flushing. +If limit is zero then no limit will be used and pipelines will only be limited by the specified time window. + ## One Redis Instance To configure one Redis instance use the following environment variables: @@ -399,6 +534,7 @@ To configure one Redis instance use the following environment variables: 1. `REDIS_SOCKET_TYPE` 1. `REDIS_URL` 1. `REDIS_POOL_SIZE` +1. `REDIS_TYPE` (optional) This setup will use the same Redis server for all limits. @@ -413,6 +549,7 @@ To configure two Redis instances use the following environment variables: 1. `REDIS_PERSECOND_SOCKET_TYPE` 1. `REDIS_PERSECOND_URL` 1. `REDIS_PERSECOND_POOL_SIZE` +1. `REDIS_PERSECOND_TYPE` (optional) This setup will use the Redis server configured with the `_PERSECOND_` vars for per second limits, and the other Redis server for all other limits. diff --git a/docker-compose-example.yml b/docker-compose-example.yml new file mode 100644 index 000000000..f96879ee4 --- /dev/null +++ b/docker-compose-example.yml @@ -0,0 +1,87 @@ +version: "3" +services: + redis: + image: redis:alpine + expose: + - 6379 + ports: + - 6379:6379 + networks: + - ratelimit-network + + statsd: + image: prom/statsd-exporter:v0.18.0 + expose: + - 9125 + ports: + - 9125:9125 + networks: + - ratelimit-network + + ratelimit: + image: envoyproxy/ratelimit:master + command: /bin/ratelimit + ports: + - 8080:8080 + - 8081:8081 + - 6070:6070 + depends_on: + - redis + - statsd + networks: + - ratelimit-network + volumes: + - ./examples/ratelimit/config:/data/ratelimit/config + environment: + - USE_STATSD=true + - STATSD_HOST=statsd + - STATSD_PORT=9125 + - LOG_LEVEL=debug + - REDIS_SOCKET_TYPE=tcp + - REDIS_URL=redis:6379 + - RUNTIME_ROOT=/data + - RUNTIME_SUBDIRECTORY=ratelimit + - RUNTIME_WATCH_ROOT=false + + envoy-proxy: + image: envoyproxy/envoy-dev:latest + entrypoint: "/usr/local/bin/envoy" + command: + - "--service-node proxy" + - "--service-cluster proxy" + - "--config-path /etc/envoy/envoy.yaml" + - "--concurrency 1" + - "--mode serve" + - "--log-level info" + volumes: + - ./examples/envoy/proxy.yaml:/etc/envoy/envoy.yaml + networks: + - ratelimit-network + expose: + - "8888" + - "8001" + ports: + - "8888:8888" + - "8001:8001" + + envoy-mock: + image: envoyproxy/envoy-dev:latest + entrypoint: "/usr/local/bin/envoy" + command: + - "--service-node mock" + - "--service-cluster mock" + - "--config-path /etc/envoy/envoy.yaml" + - "--concurrency 1" + - "--mode serve" + - "--log-level info" + volumes: + - ./examples/envoy/mock.yaml:/etc/envoy/envoy.yaml + networks: + - ratelimit-network + expose: + - "9999" + ports: + - "9999:9999" + +networks: + ratelimit-network: diff --git a/docker-compose.yml b/docker-compose.yml index d2af278c2..ac1ab9063 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,16 +12,25 @@ services: # minimal container that builds the ratelimit service binary and exits. ratelimit-build: - image: golang:1.10-alpine - working_dir: /go/src/github.com/lyft/ratelimit - command: go build -o /usr/local/bin/ratelimit /go/src/github.com/lyft/ratelimit/src/service_cmd/main.go + image: golang:1.14-alpine + working_dir: /go/src/github.com/envoyproxy/ratelimit + command: go build -o /usr/local/bin/ratelimit ./src/service_cmd/main.go volumes: - - .:/go/src/github.com/lyft/ratelimit + - .:/go/src/github.com/envoyproxy/ratelimit + - binary:/usr/local/bin/ + + ratelimit-client-build: + image: golang:1.14-alpine + working_dir: /go/src/github.com/envoyproxy/ratelimit + command: go build -o /usr/local/bin/ratelimit_client ./src/client_cmd/main.go + volumes: + - .:/go/src/github.com/envoyproxy/ratelimit - binary:/usr/local/bin/ ratelimit: image: alpine:3.6 - command: /usr/local/bin/ratelimit + command: > + sh -c "until test -f /usr/local/bin/ratelimit; do sleep 5; done; /usr/local/bin/ratelimit" ports: - 8080:8080 - 8081:8081 @@ -29,6 +38,7 @@ services: depends_on: - redis - ratelimit-build + - ratelimit-client-build networks: - ratelimit-network volumes: diff --git a/examples/envoy/mock.yaml b/examples/envoy/mock.yaml new file mode 100644 index 000000000..bd85fc3d2 --- /dev/null +++ b/examples/envoy/mock.yaml @@ -0,0 +1,34 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 9999 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: auto + stat_prefix: ingress + route_config: + name: ingress + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: "/" + direct_response: + status: "200" + body: + inline_string: "Hello World" + http_filters: + - name: envoy.router + config: {} +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 diff --git a/examples/envoy/proxy.yaml b/examples/envoy/proxy.yaml new file mode 100644 index 000000000..bb45503f9 --- /dev/null +++ b/examples/envoy/proxy.yaml @@ -0,0 +1,104 @@ +admin: + access_log_path: "/dev/null" + address: + socket_address: + address: 0.0.0.0 + port_value: 8001 +static_resources: + clusters: + - name: ratelimit + type: STRICT_DNS + connect_timeout: 1s + lb_policy: ROUND_ROBIN + protocol_selection: USE_CONFIGURED_PROTOCOL + http2_protocol_options: {} + load_assignment: + cluster_name: ratelimit + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: ratelimit + port_value: 8081 + - name: mock + connect_timeout: 1s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: mock + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: envoy-mock + port_value: 9999 + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 8888 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: auto + stat_prefix: ingress + http_filters: + - name: envoy.rate_limit + config: + domain: rl + request_type: external + stage: 0 + rate_limited_as_resource_exhausted: true + failure_mode_deny: false + rate_limit_service: + grpc_service: + envoy_grpc: + cluster_name: ratelimit + - name: envoy.router + config: {} + route_config: + name: route + virtual_hosts: + - name: backend + domains: + - "*" + routes: + - match: + prefix: /test + route: + cluster: mock + rate_limits: + - actions: + - source_cluster: {} + - destination_cluster: {} + - match: + prefix: /header + route: + cluster: mock + rate_limits: + - actions: + - request_headers: + header_name: "foo" + descriptor_key: "foo" + - match: + prefix: /twoheader + route: + cluster: mock + rate_limits: + - actions: + - request_headers: + header_name: "foo" + descriptor_key: "foo" + - request_headers: + header_name: "bar" + descriptor_key: "bar" + - actions: + - request_headers: + header_name: "foo" + descriptor_key: "foo" + - request_headers: + header_name: "baz" + descriptor_key: "baz" diff --git a/examples/ratelimit/config/example.yaml b/examples/ratelimit/config/example.yaml new file mode 100644 index 000000000..03e2f7839 --- /dev/null +++ b/examples/ratelimit/config/example.yaml @@ -0,0 +1,29 @@ +--- +domain: rl +descriptors: + - key: source_cluster + value: proxy + descriptors: + - key: destination_cluster + value: mock + rate_limit: + unit: minute + requests_per_unit: 1 + - key: foo + rate_limit: + unit: minute + requests_per_unit: 2 + descriptors: + - key: bar + rate_limit: + unit: minute + requests_per_unit: 3 + - key: bar + value: banned + rate_limit: + unit: minute + requests_per_unit: 0 + - key: baz + rate_limit: + unit: second + requests_per_unit: 1 diff --git a/glide.lock b/glide.lock deleted file mode 100644 index 2d7ff379f..000000000 --- a/glide.lock +++ /dev/null @@ -1,142 +0,0 @@ -hash: 8cc8fb031b0204aa915eaa8947e364aa1ce4362a3a9eede8086ffa97ced02b4c -updated: 2019-12-19T15:33:02.225239-08:00 -imports: -- name: github.com/cespare/xxhash - version: d7df74196a9e781ede915320c11c378c1b2f3a1f -- name: github.com/coocood/freecache - version: 3c79a0a23c1940ab4479332fb3e0127265650ce3 -- name: github.com/envoyproxy/go-control-plane - version: 0ad6fa1cf0b9b6ca8f3617a7188a568e81f40b87 - subpackages: - - envoy/api/v2/core - - envoy/api/v2/ratelimit - - envoy/service/ratelimit/v2 - - envoy/type -- name: github.com/envoyproxy/protoc-gen-validate - version: ff6f7a9bc2e5fe006509b9f8c7594c41a953d50f -- name: github.com/fsnotify/fsnotify - version: 629574ca2a5df945712d3079857300b5e4da0236 -- name: github.com/gogo/protobuf - version: 5628607bb4c51c3157aacc3a50f0ab707582b805 - subpackages: - - gogoproto - - proto - - protoc-gen-gogo/descriptor - - protoc-gen-gogofast - - sortkeys - - types -- name: github.com/golang/mock - version: 41e7e9a91aa20115266b326233308d17079ea51c - subpackages: - - gomock -- name: github.com/golang/protobuf - version: b5d812f8a3706043e23a9cd5babf2e5423744d30 - subpackages: - - jsonpb - - proto - - protoc-gen-go/descriptor - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/struct - - ptypes/timestamp -- name: github.com/google/protobuf - version: 6973c3a5041636c1d8dc5f7f6c8c1f3c15bc63d6 -- name: github.com/gorilla/mux - version: 49c01487a141b49f8ffe06277f3dca3ee80a55fa -- name: github.com/kavu/go_reuseport - version: 3d6c1e425f717ee59152524e73b904b67705eeb8 -- name: github.com/kelseyhightower/envconfig - version: ac12b1f15efba734211a556d8b125110dc538016 -- name: github.com/lyft/goruntime - version: a0d6acf20fcfd48f53e623ed62b87ffb7fe17038 - subpackages: - - loader - - snapshot - - snapshot/entry -- name: github.com/lyft/gostats - version: 943f43ede7b2dbf1d7162587689cb484d49ecd15 -- name: github.com/lyft/protoc-gen-validate - version: f9d2b11e44149635b23a002693b76512b01ae515 - subpackages: - - validate -- name: github.com/mediocregopher/radix.v2 - version: b67df6e626f993b64b3ca9f4b8630900e61002e3 - subpackages: - - pool - - redis -- name: github.com/sirupsen/logrus - version: d682213848ed68c0a260ca37d6dd5ace8423f5ba -- name: github.com/stretchr/testify - version: f390dcf405f7b83c997eac1b06768bb9f44dec18 - subpackages: - - assert -- name: golang.org/x/crypto - version: becbf705a91575484002d598f87d74f0002801e7 - subpackages: - - ssh/terminal -- name: golang.org/x/net - version: c0dbc17a35534bf2e581d7a942408dc936316da4 - subpackages: - - context - - http/httpguts - - http2 - - http2/hpack - - idna - - internal/timeseries - - trace -- name: golang.org/x/sys - version: acbc56fc7007d2a01796d5bde54f39e3b3e95945 - subpackages: - - unix - - windows -- name: golang.org/x/text - version: cbf43d21aaebfdfeb81d91a5f444d13a3046e686 - subpackages: - - secure/bidirule - - transform - - unicode/bidi - - unicode/norm -- name: google.golang.org/genproto - version: b31c10ee225f87dbb9f5f878ead9d64f34f5cbbb - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: 41344da2231b913fa3d983840a57a6b1b7b631a1 - subpackages: - - balancer - - balancer/base - - balancer/roundrobin - - channelz - - codes - - connectivity - - credentials - - encoding - - encoding/proto - - grpclb/grpc_lb_v1/messages - - grpclog - - health - - health/grpc_health_v1 - - internal - - keepalive - - metadata - - naming - - peer - - resolver - - resolver/dns - - resolver/passthrough - - stats - - status - - tap - - transport -- name: gopkg.in/yaml.v2 - version: 1f64d6156d11335c3f22d9330b0ad14fc1e789ce -testImports: -- name: github.com/davecgh/go-spew - version: 2df174808ee097f90d259e432cc04442cf60be21 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib diff --git a/glide.yaml b/glide.yaml deleted file mode 100644 index 82fbcdc97..000000000 --- a/glide.yaml +++ /dev/null @@ -1,41 +0,0 @@ -package: github.com/lyft/ratelimit -import: -- package: github.com/golang/mock - version: master - subpackages: - - gomock -- package: github.com/kelseyhightower/envconfig - version: 1.1.0 -- package: github.com/lyft/gostats - version: v0.2.6 -- package: github.com/lyft/goruntime - version: v0.2.1 -- package: github.com/mediocregopher/radix.v2 - version: master - subpackages: - - pool - - redis -- package: github.com/sirupsen/logrus - version: ^1.0 -- package: golang.org/x/net - version: master - subpackages: - - context -- package: gopkg.in/yaml.v2 - version: master -- package: github.com/stretchr/testify - version: v1.1.3 -- package: google.golang.org/grpc - version: v1.12.0 -- package: github.com/kavu/go_reuseport - version: v1.2.0 -- package: github.com/envoyproxy/go-control-plane - version: v0.6.9 -- package: github.com/envoyproxy/protoc-gen-validate - version: v0.0.14 -- package: github.com/google/protobuf - version: v3.7.1 -- package: github.com/golang/protobuf/proto - version: v1.3.1 -- package: github.com/coocood/freecache - version: v1.1.0 diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..52db99472 --- /dev/null +++ b/go.mod @@ -0,0 +1,29 @@ +module github.com/envoyproxy/ratelimit + +go 1.14 + +require ( + github.com/alicebob/miniredis/v2 v2.11.4 + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 // indirect + github.com/coocood/freecache v1.1.0 + github.com/envoyproxy/go-control-plane v0.9.6 + github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/golang/mock v1.4.1 + github.com/golang/protobuf v1.4.2 + github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 + github.com/kavu/go_reuseport v1.2.0 + github.com/kelseyhightower/envconfig v1.1.0 + github.com/lyft/goruntime v0.2.5 + github.com/lyft/gostats v0.4.0 + github.com/mediocregopher/radix/v3 v3.5.1 + github.com/sirupsen/logrus v1.6.0 + github.com/stretchr/objx v0.2.0 // indirect + github.com/stretchr/testify v1.5.1 + golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 + golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e // indirect + golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb // indirect + google.golang.org/grpc v1.27.0 + google.golang.org/protobuf v1.25.0 // indirect + gopkg.in/yaml.v2 v2.3.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..a077d1811 --- /dev/null +++ b/go.sum @@ -0,0 +1,159 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= +github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis/v2 v2.11.4 h1:GsuyeunTx7EllZBU3/6Ji3dhMQZDpC9rLf1luJ+6M5M= +github.com/alicebob/miniredis/v2 v2.11.4/go.mod h1:VL3UDEfAH59bSa7MuHMuFToxkqyHh69s/WUbYlOAuyg= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 h1:9kRtNpqLHbZVO/NNxhHp2ymxFxsHOe3x2efJGn//Tas= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/coocood/freecache v1.1.0 h1:ENiHOsWdj1BrrlPwblhbn4GdAsMymK3pZORJ+bJGAjA= +github.com/coocood/freecache v1.1.0/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.6 h1:GgblEiDzxf5ajlAZY4aC8xp7DwkrGfauFNMGdB2bBv0= +github.com/envoyproxy/go-control-plane v0.9.6/go.mod h1:GFqM7v0B62MraO4PWRedIbhThr/Rf7ev6aHOOPXeaDA= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.1 h1:ocYkMQY5RrXTYgXl7ICpV0IXwlEQGwKIsery4gyXa1U= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3 h1:6amM4HsNPOvMLVc2ZnyqrjeQ92YAVWn7T4WBKK87inY= +github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141 h1:VQjjMh+uElTfioy6GnUrVrTMAiLTNF3xsrAlSwC+g8o= +github.com/gorilla/mux v1.7.4-0.20191121170500-49c01487a141/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/kavu/go_reuseport v1.2.0 h1:YO+pt6m5Z3WkVH9DjaDJzoSS/0FO2Q8x3CfObxk/i2E= +github.com/kavu/go_reuseport v1.2.0/go.mod h1:CG8Ee7ceMFSMnx/xr25Vm0qXaj2Z4i5PWoUx+JZ5/CU= +github.com/kelseyhightower/envconfig v1.1.0 h1:4htXR8ameS6KBfrNBoqEgpg0IK2D6rozN9ATOPwRfM0= +github.com/kelseyhightower/envconfig v1.1.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/lyft/goruntime v0.2.5 h1:yRmwOXl3Zns3+Z03fDMWt5+p609rfhIErh7HYCayODg= +github.com/lyft/goruntime v0.2.5/go.mod h1:8rUh5gwIPQtyIkIXHbLN1j45HOb8cMgDhrw5GA7DF4g= +github.com/lyft/gostats v0.4.0 h1:PbRWmwidTPk6Y80S6itBWDa+XVt1hGvqFM88TBJYdOo= +github.com/lyft/gostats v0.4.0/go.mod h1:Tpx2xRzz4t+T2Tx0xdVgIoBdR2UMVz+dKnE3X01XSd8= +github.com/mediocregopher/radix/v3 v3.5.1 h1:IOYgQUMA380N4khaL5eNT4v/P2LnHa8b0wnVdwZMFsY= +github.com/mediocregopher/radix/v3 v3.5.1/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0= +github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb h1:MsKWO3hK1h941VWsQ8dKJqIdb3r3XP9/cDw8n/B95SM= +golang.org/x/text v0.3.3-0.20191122225017-cbf43d21aaeb/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86JNJe4spst6Ff8MjvPUdPg= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/proto/ratelimit/ratelimit.pb.go b/proto/ratelimit/ratelimit.pb.go deleted file mode 100644 index 77f41dbbd..000000000 --- a/proto/ratelimit/ratelimit.pb.go +++ /dev/null @@ -1,538 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: proto/ratelimit/ratelimit.proto - -package ratelimit - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RateLimit_Unit int32 - -const ( - RateLimit_UNKNOWN RateLimit_Unit = 0 - RateLimit_SECOND RateLimit_Unit = 1 - RateLimit_MINUTE RateLimit_Unit = 2 - RateLimit_HOUR RateLimit_Unit = 3 - RateLimit_DAY RateLimit_Unit = 4 -) - -var RateLimit_Unit_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SECOND", - 2: "MINUTE", - 3: "HOUR", - 4: "DAY", -} -var RateLimit_Unit_value = map[string]int32{ - "UNKNOWN": 0, - "SECOND": 1, - "MINUTE": 2, - "HOUR": 3, - "DAY": 4, -} - -func (x RateLimit_Unit) String() string { - return proto.EnumName(RateLimit_Unit_name, int32(x)) -} -func (RateLimit_Unit) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{2, 0} -} - -type RateLimitResponse_Code int32 - -const ( - RateLimitResponse_UNKNOWN RateLimitResponse_Code = 0 - RateLimitResponse_OK RateLimitResponse_Code = 1 - RateLimitResponse_OVER_LIMIT RateLimitResponse_Code = 2 -) - -var RateLimitResponse_Code_name = map[int32]string{ - 0: "UNKNOWN", - 1: "OK", - 2: "OVER_LIMIT", -} -var RateLimitResponse_Code_value = map[string]int32{ - "UNKNOWN": 0, - "OK": 1, - "OVER_LIMIT": 2, -} - -func (x RateLimitResponse_Code) String() string { - return proto.EnumName(RateLimitResponse_Code_name, int32(x)) -} -func (RateLimitResponse_Code) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{3, 0} -} - -// Main message for a rate limit request. The rate limit service is designed to be fully generic -// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded -// configuration will parse the request and find the most specific limit to apply. In addition, -// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors -// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any -// of them are over limit. This enables more complex application level rate limiting scenarios -// if desired. -type RateLimitRequest struct { - // All rate limit requests must specify a domain. This enables the configuration to be per - // application without fear of overlap. E.g., "envoy". - Domain string `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` - // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is - // processed by the service (see below). If any of the descriptors are over limit, the entire - // request is considered to be over limit. - Descriptors []*RateLimitDescriptor `protobuf:"bytes,2,rep,name=descriptors" json:"descriptors,omitempty"` - // Rate limit requests can optionally specify the number of hits a request adds to the matched limit. If the - // value is not set in the message, a request increases the matched limit by 1. - HitsAddend uint32 `protobuf:"varint,3,opt,name=hits_addend,json=hitsAddend" json:"hits_addend,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitRequest) Reset() { *m = RateLimitRequest{} } -func (m *RateLimitRequest) String() string { return proto.CompactTextString(m) } -func (*RateLimitRequest) ProtoMessage() {} -func (*RateLimitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{0} -} -func (m *RateLimitRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitRequest.Unmarshal(m, b) -} -func (m *RateLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitRequest.Marshal(b, m, deterministic) -} -func (dst *RateLimitRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitRequest.Merge(dst, src) -} -func (m *RateLimitRequest) XXX_Size() int { - return xxx_messageInfo_RateLimitRequest.Size(m) -} -func (m *RateLimitRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitRequest proto.InternalMessageInfo - -func (m *RateLimitRequest) GetDomain() string { - if m != nil { - return m.Domain - } - return "" -} - -func (m *RateLimitRequest) GetDescriptors() []*RateLimitDescriptor { - if m != nil { - return m.Descriptors - } - return nil -} - -func (m *RateLimitRequest) GetHitsAddend() uint32 { - if m != nil { - return m.HitsAddend - } - return 0 -} - -// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to -// determine the final rate limit key and overall allowed limit. Here are some examples of how -// they might be used for the domain "envoy". -// 1) ["authenticated": "false"], ["ip_address": "10.0.0.1"] -// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The -// configuration supplies a default limit for the ip_address field. If there is a desire to raise -// the limit for 10.0.0.1 or block it entirely it can be specified directly in the -// configuration. -// 2) ["authenticated": "false"], ["path": "/foo/bar"] -// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if -// configured that way in the service). -// 3) ["authenticated": "false"], ["path": "/foo/bar"], ["ip_address": "10.0.0.1"] -// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. -// Like (1) we can raise/block specific IP addresses if we want with an override configuration. -// 4) ["authenticated": "true"], ["client_id": "foo"] -// What it does: Limits all traffic for an authenticated client "foo" -// 5) ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] -// What it does: Limits traffic to a specific path for an authenticated client "foo" -// -// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. -// This enables building complex application scenarios with a generic backend. -type RateLimitDescriptor struct { - Entries []*RateLimitDescriptor_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitDescriptor) Reset() { *m = RateLimitDescriptor{} } -func (m *RateLimitDescriptor) String() string { return proto.CompactTextString(m) } -func (*RateLimitDescriptor) ProtoMessage() {} -func (*RateLimitDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{1} -} -func (m *RateLimitDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitDescriptor.Unmarshal(m, b) -} -func (m *RateLimitDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitDescriptor.Marshal(b, m, deterministic) -} -func (dst *RateLimitDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitDescriptor.Merge(dst, src) -} -func (m *RateLimitDescriptor) XXX_Size() int { - return xxx_messageInfo_RateLimitDescriptor.Size(m) -} -func (m *RateLimitDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitDescriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitDescriptor proto.InternalMessageInfo - -func (m *RateLimitDescriptor) GetEntries() []*RateLimitDescriptor_Entry { - if m != nil { - return m.Entries - } - return nil -} - -type RateLimitDescriptor_Entry struct { - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitDescriptor_Entry) Reset() { *m = RateLimitDescriptor_Entry{} } -func (m *RateLimitDescriptor_Entry) String() string { return proto.CompactTextString(m) } -func (*RateLimitDescriptor_Entry) ProtoMessage() {} -func (*RateLimitDescriptor_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{1, 0} -} -func (m *RateLimitDescriptor_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitDescriptor_Entry.Unmarshal(m, b) -} -func (m *RateLimitDescriptor_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitDescriptor_Entry.Marshal(b, m, deterministic) -} -func (dst *RateLimitDescriptor_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitDescriptor_Entry.Merge(dst, src) -} -func (m *RateLimitDescriptor_Entry) XXX_Size() int { - return xxx_messageInfo_RateLimitDescriptor_Entry.Size(m) -} -func (m *RateLimitDescriptor_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitDescriptor_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitDescriptor_Entry proto.InternalMessageInfo - -func (m *RateLimitDescriptor_Entry) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *RateLimitDescriptor_Entry) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// Defines an actual rate limit in terms of requests per unit of time and the unit itself. -type RateLimit struct { - RequestsPerUnit uint32 `protobuf:"varint,1,opt,name=requests_per_unit,json=requestsPerUnit" json:"requests_per_unit,omitempty"` - Unit RateLimit_Unit `protobuf:"varint,2,opt,name=unit,enum=pb.lyft.ratelimit.RateLimit_Unit" json:"unit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimit) Reset() { *m = RateLimit{} } -func (m *RateLimit) String() string { return proto.CompactTextString(m) } -func (*RateLimit) ProtoMessage() {} -func (*RateLimit) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{2} -} -func (m *RateLimit) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimit.Unmarshal(m, b) -} -func (m *RateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimit.Marshal(b, m, deterministic) -} -func (dst *RateLimit) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimit.Merge(dst, src) -} -func (m *RateLimit) XXX_Size() int { - return xxx_messageInfo_RateLimit.Size(m) -} -func (m *RateLimit) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimit.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimit proto.InternalMessageInfo - -func (m *RateLimit) GetRequestsPerUnit() uint32 { - if m != nil { - return m.RequestsPerUnit - } - return 0 -} - -func (m *RateLimit) GetUnit() RateLimit_Unit { - if m != nil { - return m.Unit - } - return RateLimit_UNKNOWN -} - -// A response from a ShouldRateLimit call. -type RateLimitResponse struct { - // The overall response code which takes into account all of the descriptors that were passed - // in the RateLimitRequest message. - OverallCode RateLimitResponse_Code `protobuf:"varint,1,opt,name=overall_code,json=overallCode,enum=pb.lyft.ratelimit.RateLimitResponse_Code" json:"overall_code,omitempty"` - // A list of DescriptorStatus messages which matches the length of the descriptor list passed - // in the RateLimitRequest. This can be used by the caller to determine which individual - // descriptors failed and/or what the currently configured limits are for all of them. - Statuses []*RateLimitResponse_DescriptorStatus `protobuf:"bytes,2,rep,name=statuses" json:"statuses,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitResponse) Reset() { *m = RateLimitResponse{} } -func (m *RateLimitResponse) String() string { return proto.CompactTextString(m) } -func (*RateLimitResponse) ProtoMessage() {} -func (*RateLimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{3} -} -func (m *RateLimitResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitResponse.Unmarshal(m, b) -} -func (m *RateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitResponse.Marshal(b, m, deterministic) -} -func (dst *RateLimitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitResponse.Merge(dst, src) -} -func (m *RateLimitResponse) XXX_Size() int { - return xxx_messageInfo_RateLimitResponse.Size(m) -} -func (m *RateLimitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitResponse proto.InternalMessageInfo - -func (m *RateLimitResponse) GetOverallCode() RateLimitResponse_Code { - if m != nil { - return m.OverallCode - } - return RateLimitResponse_UNKNOWN -} - -func (m *RateLimitResponse) GetStatuses() []*RateLimitResponse_DescriptorStatus { - if m != nil { - return m.Statuses - } - return nil -} - -type RateLimitResponse_DescriptorStatus struct { - // The response code for an individual descriptor. - Code RateLimitResponse_Code `protobuf:"varint,1,opt,name=code,enum=pb.lyft.ratelimit.RateLimitResponse_Code" json:"code,omitempty"` - // The current limit as configured by the server. Useful for debugging, etc. - CurrentLimit *RateLimit `protobuf:"bytes,2,opt,name=current_limit,json=currentLimit" json:"current_limit,omitempty"` - // The limit remaining in the current time unit. - LimitRemaining uint32 `protobuf:"varint,3,opt,name=limit_remaining,json=limitRemaining" json:"limit_remaining,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitResponse_DescriptorStatus) Reset() { *m = RateLimitResponse_DescriptorStatus{} } -func (m *RateLimitResponse_DescriptorStatus) String() string { return proto.CompactTextString(m) } -func (*RateLimitResponse_DescriptorStatus) ProtoMessage() {} -func (*RateLimitResponse_DescriptorStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{3, 0} -} -func (m *RateLimitResponse_DescriptorStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitResponse_DescriptorStatus.Unmarshal(m, b) -} -func (m *RateLimitResponse_DescriptorStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitResponse_DescriptorStatus.Marshal(b, m, deterministic) -} -func (dst *RateLimitResponse_DescriptorStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitResponse_DescriptorStatus.Merge(dst, src) -} -func (m *RateLimitResponse_DescriptorStatus) XXX_Size() int { - return xxx_messageInfo_RateLimitResponse_DescriptorStatus.Size(m) -} -func (m *RateLimitResponse_DescriptorStatus) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitResponse_DescriptorStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitResponse_DescriptorStatus proto.InternalMessageInfo - -func (m *RateLimitResponse_DescriptorStatus) GetCode() RateLimitResponse_Code { - if m != nil { - return m.Code - } - return RateLimitResponse_UNKNOWN -} - -func (m *RateLimitResponse_DescriptorStatus) GetCurrentLimit() *RateLimit { - if m != nil { - return m.CurrentLimit - } - return nil -} - -func (m *RateLimitResponse_DescriptorStatus) GetLimitRemaining() uint32 { - if m != nil { - return m.LimitRemaining - } - return 0 -} - -func init() { - proto.RegisterType((*RateLimitRequest)(nil), "pb.lyft.ratelimit.RateLimitRequest") - proto.RegisterType((*RateLimitDescriptor)(nil), "pb.lyft.ratelimit.RateLimitDescriptor") - proto.RegisterType((*RateLimitDescriptor_Entry)(nil), "pb.lyft.ratelimit.RateLimitDescriptor.Entry") - proto.RegisterType((*RateLimit)(nil), "pb.lyft.ratelimit.RateLimit") - proto.RegisterType((*RateLimitResponse)(nil), "pb.lyft.ratelimit.RateLimitResponse") - proto.RegisterType((*RateLimitResponse_DescriptorStatus)(nil), "pb.lyft.ratelimit.RateLimitResponse.DescriptorStatus") - proto.RegisterEnum("pb.lyft.ratelimit.RateLimit_Unit", RateLimit_Unit_name, RateLimit_Unit_value) - proto.RegisterEnum("pb.lyft.ratelimit.RateLimitResponse_Code", RateLimitResponse_Code_name, RateLimitResponse_Code_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for RateLimitService service - -type RateLimitServiceClient interface { - // Determine whether rate limiting should take place. - ShouldRateLimit(ctx context.Context, in *RateLimitRequest, opts ...grpc.CallOption) (*RateLimitResponse, error) -} - -type rateLimitServiceClient struct { - cc *grpc.ClientConn -} - -func NewRateLimitServiceClient(cc *grpc.ClientConn) RateLimitServiceClient { - return &rateLimitServiceClient{cc} -} - -func (c *rateLimitServiceClient) ShouldRateLimit(ctx context.Context, in *RateLimitRequest, opts ...grpc.CallOption) (*RateLimitResponse, error) { - out := new(RateLimitResponse) - err := grpc.Invoke(ctx, "/pb.lyft.ratelimit.RateLimitService/ShouldRateLimit", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for RateLimitService service - -type RateLimitServiceServer interface { - // Determine whether rate limiting should take place. - ShouldRateLimit(context.Context, *RateLimitRequest) (*RateLimitResponse, error) -} - -func RegisterRateLimitServiceServer(s *grpc.Server, srv RateLimitServiceServer) { - s.RegisterService(&_RateLimitService_serviceDesc, srv) -} - -func _RateLimitService_ShouldRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RateLimitRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RateLimitServiceServer).ShouldRateLimit(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/pb.lyft.ratelimit.RateLimitService/ShouldRateLimit", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RateLimitServiceServer).ShouldRateLimit(ctx, req.(*RateLimitRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _RateLimitService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "pb.lyft.ratelimit.RateLimitService", - HandlerType: (*RateLimitServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ShouldRateLimit", - Handler: _RateLimitService_ShouldRateLimit_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "proto/ratelimit/ratelimit.proto", -} - -func init() { - proto.RegisterFile("proto/ratelimit/ratelimit.proto", fileDescriptor_ratelimit_8ec600a45de499be) -} - -var fileDescriptor_ratelimit_8ec600a45de499be = []byte{ - // 532 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x8e, 0xd2, 0x40, - 0x14, 0xde, 0xa1, 0x5d, 0x58, 0x4e, 0x17, 0x28, 0xa3, 0x31, 0x84, 0x98, 0x2c, 0x56, 0xa3, 0xf8, - 0x93, 0x6e, 0x82, 0xd9, 0x4b, 0x4d, 0x70, 0xc1, 0x2c, 0x59, 0x16, 0x74, 0x58, 0x34, 0x7a, 0x61, - 0xd3, 0xa5, 0x47, 0xb7, 0xb1, 0xdb, 0xe2, 0xcc, 0x94, 0x84, 0x3b, 0x9f, 0xc0, 0x3b, 0x1f, 0xc0, - 0x17, 0xf0, 0x0d, 0x7c, 0x37, 0xd3, 0xa1, 0x14, 0xfc, 0x09, 0x21, 0x7b, 0x77, 0xfe, 0xbe, 0xef, - 0x9c, 0x9e, 0xef, 0x4c, 0xe1, 0x60, 0xca, 0x23, 0x19, 0x1d, 0x72, 0x57, 0x62, 0xe0, 0x5f, 0xf9, - 0x72, 0x65, 0xd9, 0x2a, 0x43, 0xab, 0xd3, 0x0b, 0x3b, 0x98, 0x7f, 0x94, 0x76, 0x96, 0xb0, 0xbe, - 0x13, 0x30, 0x99, 0x2b, 0xb1, 0x9f, 0x78, 0x0c, 0xbf, 0xc4, 0x28, 0x24, 0xbd, 0x05, 0x79, 0x2f, - 0xba, 0x72, 0xfd, 0xb0, 0x46, 0x1a, 0xa4, 0x59, 0x64, 0xa9, 0x47, 0x4f, 0xc0, 0xf0, 0x50, 0x4c, - 0xb8, 0x3f, 0x95, 0x11, 0x17, 0xb5, 0x5c, 0x43, 0x6b, 0x1a, 0xad, 0xfb, 0xf6, 0x3f, 0xac, 0x76, - 0xc6, 0xd8, 0xc9, 0xca, 0xd9, 0x3a, 0x94, 0x1e, 0x80, 0x71, 0xe9, 0x4b, 0xe1, 0xb8, 0x9e, 0x87, - 0xa1, 0x57, 0xd3, 0x1a, 0xa4, 0x59, 0x62, 0x90, 0x84, 0xda, 0x2a, 0x62, 0x7d, 0x23, 0x70, 0xe3, - 0x3f, 0x2c, 0xf4, 0x25, 0x14, 0x30, 0x94, 0xdc, 0x47, 0x51, 0x23, 0xaa, 0xfd, 0x93, 0xed, 0xda, - 0xdb, 0xdd, 0x50, 0xf2, 0x39, 0x5b, 0x82, 0xeb, 0x87, 0xb0, 0xab, 0x22, 0xd4, 0x04, 0xed, 0x33, - 0xce, 0xd3, 0x0f, 0x4d, 0x4c, 0x7a, 0x13, 0x76, 0x67, 0x6e, 0x10, 0x63, 0x2d, 0xa7, 0x62, 0x0b, - 0xc7, 0xfa, 0x49, 0xa0, 0x98, 0xf1, 0xd2, 0x47, 0x50, 0xe5, 0x8b, 0x65, 0x09, 0x67, 0x8a, 0xdc, - 0x89, 0x43, 0x5f, 0x2a, 0x8e, 0x12, 0xab, 0x2c, 0x13, 0xaf, 0x90, 0x8f, 0x43, 0x5f, 0xd2, 0x23, - 0xd0, 0x55, 0x3a, 0xa1, 0x2b, 0xb7, 0xee, 0x6c, 0x9a, 0xd7, 0x4e, 0x00, 0x4c, 0x95, 0x5b, 0xcf, - 0x41, 0x57, 0x70, 0x03, 0x0a, 0xe3, 0xc1, 0xe9, 0x60, 0xf8, 0x76, 0x60, 0xee, 0x50, 0x80, 0xfc, - 0xa8, 0x7b, 0x3c, 0x1c, 0x74, 0x4c, 0x92, 0xd8, 0x67, 0xbd, 0xc1, 0xf8, 0xbc, 0x6b, 0xe6, 0xe8, - 0x1e, 0xe8, 0x27, 0xc3, 0x31, 0x33, 0x35, 0x5a, 0x00, 0xad, 0xd3, 0x7e, 0x67, 0xea, 0xd6, 0x0f, - 0x0d, 0xaa, 0x6b, 0xca, 0x8a, 0x69, 0x14, 0x0a, 0xa4, 0x7d, 0xd8, 0x8f, 0x66, 0xc8, 0xdd, 0x20, - 0x70, 0x26, 0x91, 0x87, 0x6a, 0xe6, 0x72, 0xeb, 0xe1, 0xa6, 0xa1, 0x96, 0x58, 0xfb, 0x38, 0xf2, - 0x90, 0x19, 0x29, 0x3c, 0x71, 0xe8, 0x6b, 0xd8, 0x13, 0xd2, 0x95, 0xb1, 0xc0, 0xe5, 0x35, 0x1c, - 0x6d, 0xc5, 0xb4, 0xd2, 0x65, 0xa4, 0xe0, 0x2c, 0xa3, 0xa9, 0xff, 0x22, 0x60, 0xfe, 0x9d, 0xa6, - 0xcf, 0x40, 0xbf, 0xde, 0xb4, 0x0a, 0x46, 0xdb, 0x50, 0x9a, 0xc4, 0x9c, 0x63, 0x28, 0x1d, 0x55, - 0xad, 0xa4, 0x30, 0x5a, 0xb7, 0x37, 0xf2, 0xec, 0xa7, 0x90, 0x85, 0xe0, 0x0f, 0xa0, 0xa2, 0x0a, - 0x1c, 0x8e, 0xc9, 0x53, 0xf0, 0xc3, 0x4f, 0xe9, 0xd1, 0x96, 0x83, 0x45, 0xd7, 0x34, 0x6a, 0x3d, - 0x06, 0x5d, 0xad, 0xe6, 0x0f, 0xd9, 0xf2, 0x90, 0x1b, 0x9e, 0x9a, 0x84, 0x96, 0x01, 0x86, 0x6f, - 0xba, 0xcc, 0xe9, 0xf7, 0xce, 0x7a, 0xe7, 0x66, 0xae, 0xc5, 0xd7, 0x1e, 0xdf, 0x08, 0xf9, 0xcc, - 0x9f, 0x20, 0xfd, 0x00, 0x95, 0xd1, 0x65, 0x14, 0x07, 0xde, 0xea, 0xda, 0xee, 0x6e, 0xfe, 0x60, - 0x75, 0x6e, 0xf5, 0x7b, 0xdb, 0x6c, 0xc5, 0xda, 0x79, 0x51, 0x7e, 0x5f, 0xcc, 0x0a, 0xbe, 0x12, - 0x72, 0x91, 0x57, 0xff, 0x86, 0xa7, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x0e, 0xa0, 0x99, - 0x3e, 0x04, 0x00, 0x00, -} diff --git a/proto/ratelimit/ratelimit.proto b/proto/ratelimit/ratelimit.proto deleted file mode 100644 index 7e1ee60fa..000000000 --- a/proto/ratelimit/ratelimit.proto +++ /dev/null @@ -1,102 +0,0 @@ -syntax = "proto3"; - -option go_package = "ratelimit"; - -option cc_generic_services = true; - -package pb.lyft.ratelimit; - -service RateLimitService { - // Determine whether rate limiting should take place. - rpc ShouldRateLimit (RateLimitRequest) returns (RateLimitResponse) {} -} - -// Main message for a rate limit request. The rate limit service is designed to be fully generic -// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded -// configuration will parse the request and find the most specific limit to apply. In addition, -// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors -// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any -// of them are over limit. This enables more complex application level rate limiting scenarios -// if desired. -message RateLimitRequest { - // All rate limit requests must specify a domain. This enables the configuration to be per - // application without fear of overlap. E.g., "envoy". - string domain = 1; - // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is - // processed by the service (see below). If any of the descriptors are over limit, the entire - // request is considered to be over limit. - repeated RateLimitDescriptor descriptors = 2; - // Rate limit requests can optionally specify the number of hits a request adds to the matched limit. If the - // value is not set in the message, a request increases the matched limit by 1. - uint32 hits_addend = 3; -} - -// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to -// determine the final rate limit key and overall allowed limit. Here are some examples of how -// they might be used for the domain "envoy". -// 1) ["authenticated": "false"], ["ip_address": "10.0.0.1"] -// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The -// configuration supplies a default limit for the ip_address field. If there is a desire to raise -// the limit for 10.0.0.1 or block it entirely it can be specified directly in the -// configuration. -// 2) ["authenticated": "false"], ["path": "/foo/bar"] -// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if -// configured that way in the service). -// 3) ["authenticated": "false"], ["path": "/foo/bar"], ["ip_address": "10.0.0.1"] -// What it does: Limits unauthenticated traffic to a specific path for a specific IP address. -// Like (1) we can raise/block specific IP addresses if we want with an override configuration. -// 4) ["authenticated": "true"], ["client_id": "foo"] -// What it does: Limits all traffic for an authenticated client "foo" -// 5) ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] -// What it does: Limits traffic to a specific path for an authenticated client "foo" -// -// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. -// This enables building complex application scenarios with a generic backend. -message RateLimitDescriptor { - message Entry { - string key = 1; - string value = 2; - } - - repeated Entry entries = 1; -} - -// Defines an actual rate limit in terms of requests per unit of time and the unit itself. -message RateLimit { - enum Unit { - UNKNOWN = 0; - SECOND = 1; - MINUTE = 2; - HOUR = 3; - DAY = 4; - } - - uint32 requests_per_unit = 1; - Unit unit = 2; -} - -// A response from a ShouldRateLimit call. -message RateLimitResponse { - enum Code { - UNKNOWN = 0; - OK = 1; - OVER_LIMIT = 2; - } - - message DescriptorStatus { - // The response code for an individual descriptor. - Code code = 1; - // The current limit as configured by the server. Useful for debugging, etc. - RateLimit current_limit = 2; - // The limit remaining in the current time unit. - uint32 limit_remaining = 3; - } - - // The overall response code which takes into account all of the descriptors that were passed - // in the RateLimitRequest message. - Code overall_code = 1; - // A list of DescriptorStatus messages which matches the length of the descriptor list passed - // in the RateLimitRequest. This can be used by the caller to determine which individual - // descriptors failed and/or what the currently configured limits are for all of them. - repeated DescriptorStatus statuses = 2; -} \ No newline at end of file diff --git a/script/install-glide b/script/install-glide deleted file mode 100755 index 8b450e880..000000000 --- a/script/install-glide +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -e - -which glide > /dev/null 2>&1 && exit 0 - -if test "Darwin" == "$(uname)" - then brew install glide -fi - -which glide > /dev/null 2>&1 || { - mkdir -p ./glide - - curl -L https://github.com/Masterminds/glide/releases/download/v0.12.2/glide-v0.12.2-linux-amd64.tar.gz | tar xz -C ./glide --strip-components=1 - chmod 755 -R ./glide - - if which sudo >/dev/null; - then sudo mv ./glide/glide /usr/local/bin/ - else - mv ./glide/glide /usr/local/bin/ - fi -} - -which glide > /dev/null 2>&1 \ No newline at end of file diff --git a/src/client_cmd/main.go b/src/client_cmd/main.go index 37d1f5fd0..d65b025ff 100644 --- a/src/client_cmd/main.go +++ b/src/client_cmd/main.go @@ -7,8 +7,8 @@ import ( "os" "strings" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "golang.org/x/net/context" "google.golang.org/grpc" ) diff --git a/src/config/config.go b/src/config/config.go index 70f3b9730..8f94715c9 100644 --- a/src/config/config.go +++ b/src/config/config.go @@ -1,8 +1,8 @@ package config import ( - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" stats "github.com/lyft/gostats" "golang.org/x/net/context" ) diff --git a/src/config/config_impl.go b/src/config/config_impl.go index a0ad1ff4f..e19b5ce09 100644 --- a/src/config/config_impl.go +++ b/src/config/config_impl.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -39,7 +39,8 @@ type rateLimitDomain struct { } type rateLimitConfigImpl struct { - domains map[string]*rateLimitDomain + domains map[string]*rateLimitDomain + statsScope stats.Scope } var validKeys = map[string]bool{ @@ -197,8 +198,7 @@ func validateYamlKeys(config RateLimitConfigToLoad, config_map map[interface{}]i // Load a single YAML config file into the global config. // @param config specifies the file contents to load. -// @param statsScope supplies the owning scope. -func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad, statsScope stats.Scope) { +func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad) { // validate keys in config with generic map any := map[interface{}]interface{}{} err := yaml.Unmarshal([]byte(config.FileBytes), &any) @@ -228,10 +228,24 @@ func (this *rateLimitConfigImpl) loadConfig(config RateLimitConfigToLoad, statsS logger.Debugf("loading domain: %s", root.Domain) newDomain := &rateLimitDomain{rateLimitDescriptor{map[string]*rateLimitDescriptor{}, nil}} - newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, statsScope) + newDomain.loadDescriptors(config, root.Domain+".", root.Descriptors, this.statsScope) this.domains[root.Domain] = newDomain } +func (this *rateLimitConfigImpl) descriptorToKey(descriptor *pb_struct.RateLimitDescriptor) string { + rateLimitKey := "" + for _, entry := range descriptor.Entries { + if rateLimitKey != "" { + rateLimitKey += "." + } + rateLimitKey += entry.Key + if entry.Value != "" { + rateLimitKey += "_" + entry.Value + } + } + return rateLimitKey +} + func (this *rateLimitConfigImpl) Dump() string { ret := "" for _, domain := range this.domains { @@ -252,6 +266,17 @@ func (this *rateLimitConfigImpl) GetLimit( return rateLimit } + if descriptor.GetLimit() != nil { + rateLimitKey := domain + "." + this.descriptorToKey(descriptor) + rateLimitOverrideUnit := pb.RateLimitResponse_RateLimit_Unit(descriptor.GetLimit().GetUnit()) + rateLimit = NewRateLimit( + descriptor.GetLimit().GetRequestsPerUnit(), + rateLimitOverrideUnit, + rateLimitKey, + this.statsScope) + return rateLimit + } + descriptorsMap := value.descriptors for i, entry := range descriptor.Entries { // First see if key_value is in the map. If that isn't in the map we look for just key @@ -292,9 +317,9 @@ func (this *rateLimitConfigImpl) GetLimit( func NewRateLimitConfigImpl( configs []RateLimitConfigToLoad, statsScope stats.Scope) RateLimitConfig { - ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}} + ret := &rateLimitConfigImpl{map[string]*rateLimitDomain{}, statsScope} for _, config := range configs { - ret.loadConfig(config, statsScope) + ret.loadConfig(config) } return ret diff --git a/src/config_check_cmd/main.go b/src/config_check_cmd/main.go index d2defbfab..f9f3c7426 100644 --- a/src/config_check_cmd/main.go +++ b/src/config_check_cmd/main.go @@ -7,8 +7,8 @@ import ( "os" "path/filepath" + "github.com/envoyproxy/ratelimit/src/config" "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/config" ) func loadConfigs(allConfigs []config.RateLimitConfigToLoad) { diff --git a/src/redis/cache.go b/src/limiter/cache.go similarity index 95% rename from src/redis/cache.go rename to src/limiter/cache.go index e5617c552..9408126ca 100644 --- a/src/redis/cache.go +++ b/src/limiter/cache.go @@ -1,8 +1,8 @@ -package redis +package limiter import ( - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - "github.com/lyft/ratelimit/src/config" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" "golang.org/x/net/context" ) diff --git a/src/limiter/cache_key.go b/src/limiter/cache_key.go new file mode 100644 index 000000000..a06087056 --- /dev/null +++ b/src/limiter/cache_key.go @@ -0,0 +1,90 @@ +package limiter + +import ( + "bytes" + "strconv" + "sync" + + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" +) + +type CacheKeyGenerator struct { + // bytes.Buffer pool used to efficiently generate cache keys. + bufferPool sync.Pool +} + +func NewCacheKeyGenerator() CacheKeyGenerator { + return CacheKeyGenerator{bufferPool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }} +} + +type CacheKey struct { + Key string + // True if the key corresponds to a limit with a SECOND unit. False otherwise. + PerSecond bool +} + +func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { + return unit == pb.RateLimitResponse_RateLimit_SECOND +} + +// Convert a rate limit into a time divider. +// @param unit supplies the unit to convert. +// @return the divider to use in time computations. +func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { + switch unit { + case pb.RateLimitResponse_RateLimit_SECOND: + return 1 + case pb.RateLimitResponse_RateLimit_MINUTE: + return 60 + case pb.RateLimitResponse_RateLimit_HOUR: + return 60 * 60 + case pb.RateLimitResponse_RateLimit_DAY: + return 60 * 60 * 24 + } + + panic("should not get here") +} + +// Generate a cache key for a limit lookup. +// @param domain supplies the cache key domain. +// @param descriptor supplies the descriptor to generate the key for. +// @param limit supplies the rate limit to generate the key for (may be nil). +// @param now supplies the current unix time. +// @return CacheKey struct. +func (this *CacheKeyGenerator) GenerateCacheKey( + domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) CacheKey { + + if limit == nil { + return CacheKey{ + Key: "", + PerSecond: false, + } + } + + b := this.bufferPool.Get().(*bytes.Buffer) + defer this.bufferPool.Put(b) + b.Reset() + + b.WriteString(domain) + b.WriteByte('_') + + for _, entry := range descriptor.Entries { + b.WriteString(entry.Key) + b.WriteByte('_') + b.WriteString(entry.Value) + b.WriteByte('_') + } + + divider := UnitToDivider(limit.Limit.Unit) + b.WriteString(strconv.FormatInt((now/divider)*divider, 10)) + + return CacheKey{ + Key: b.String(), + PerSecond: isPerSecondLimit(limit.Limit.Unit)} +} diff --git a/src/redis/local_cache_stats.go b/src/limiter/local_cache_stats.go similarity index 98% rename from src/redis/local_cache_stats.go rename to src/limiter/local_cache_stats.go index 60a94194f..d0d59dc27 100644 --- a/src/redis/local_cache_stats.go +++ b/src/limiter/local_cache_stats.go @@ -1,4 +1,4 @@ -package redis +package limiter import ( "github.com/coocood/freecache" diff --git a/src/limiter/time.go b/src/limiter/time.go new file mode 100644 index 000000000..e6a779e70 --- /dev/null +++ b/src/limiter/time.go @@ -0,0 +1,40 @@ +package limiter + +import ( + "math/rand" + "sync" + "time" +) + +type timeSourceImpl struct{} + +func NewTimeSourceImpl() TimeSource { + return &timeSourceImpl{} +} + +func (this *timeSourceImpl) UnixNow() int64 { + return time.Now().Unix() +} + +// rand for jitter. +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func NewLockedSource(seed int64) JitterRandSource { + return &lockedSource{src: rand.NewSource(seed)} +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index b3a34f49a..c76ef067c 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,95 +1,32 @@ package redis import ( - "bytes" "math" "math/rand" - "strconv" - "sync" - "time" "github.com/coocood/freecache" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - "github.com/lyft/ratelimit/src/assert" - "github.com/lyft/ratelimit/src/config" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/assert" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/server" + "github.com/envoyproxy/ratelimit/src/settings" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) type rateLimitCacheImpl struct { - pool Pool - // Optional Pool for a dedicated cache of per second limits. - // If this pool is nil, then the Cache will use the pool for all - // limits regardless of unit. If this pool is not nil, then it + client Client + // Optional Client for a dedicated cache of per second limits. + // If this client is nil, then the Cache will use the client for all + // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. - perSecondPool Pool - timeSource TimeSource + perSecondClient Client + timeSource limiter.TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 - // bytes.Buffer pool used to efficiently generate cache keys. - bufferPool sync.Pool - localCache *freecache.Cache -} - -// Convert a rate limit into a time divider. -// @param unit supplies the unit to convert. -// @return the divider to use in time computations. -func unitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { - switch unit { - case pb.RateLimitResponse_RateLimit_SECOND: - return 1 - case pb.RateLimitResponse_RateLimit_MINUTE: - return 60 - case pb.RateLimitResponse_RateLimit_HOUR: - return 60 * 60 - case pb.RateLimitResponse_RateLimit_DAY: - return 60 * 60 * 24 - } - - panic("should not get here") -} - -// Generate a cache key for a limit lookup. -// @param domain supplies the cache key domain. -// @param descriptor supplies the descriptor to generate the key for. -// @param limit supplies the rate limit to generate the key for (may be nil). -// @param now supplies the current unix time. -// @return cacheKey struct. -func (this *rateLimitCacheImpl) generateCacheKey( - domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) cacheKey { - - if limit == nil { - return cacheKey{ - key: "", - perSecond: false, - } - } - - b := this.bufferPool.Get().(*bytes.Buffer) - defer this.bufferPool.Put(b) - b.Reset() - - b.WriteString(domain) - b.WriteByte('_') - - for _, entry := range descriptor.Entries { - b.WriteString(entry.Key) - b.WriteByte('_') - b.WriteString(entry.Value) - b.WriteByte('_') - } - - divider := unitToDivider(limit.Limit.Unit) - b.WriteString(strconv.FormatInt((now/divider)*divider, 10)) - - return cacheKey{ - key: b.String(), - perSecond: isPerSecondLimit(limit.Limit.Unit)} -} - -func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { - return unit == pb.RateLimitResponse_RateLimit_SECOND + cacheKeyGenerator limiter.CacheKeyGenerator + localCache *freecache.Cache } func max(a uint32, b uint32) uint32 { @@ -99,22 +36,9 @@ func max(a uint32, b uint32) uint32 { return b } -type cacheKey struct { - key string - // True if the key corresponds to a limit with a SECOND unit. False otherwise. - perSecond bool -} - -func pipelineAppend(conn Connection, key string, hitsAddend uint32, expirationSeconds int64) { - conn.PipeAppend("INCRBY", key, hitsAddend) - conn.PipeAppend("EXPIRE", key, expirationSeconds) -} - -func pipelineFetch(conn Connection) uint32 { - ret := uint32(conn.PipeResponse().Int()) - // Pop off EXPIRE response and check for error. - conn.PipeResponse() - return ret +func pipelineAppend(client Client, pipeline *Pipeline, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) { + *pipeline = client.PipeAppend(*pipeline, result, "INCRBY", key, hitsAddend) + *pipeline = client.PipeAppend(*pipeline, nil, "EXPIRE", key, expirationSeconds) } func (this *rateLimitCacheImpl) DoLimit( @@ -124,28 +48,18 @@ func (this *rateLimitCacheImpl) DoLimit( logger.Debugf("starting cache lookup") - conn := this.pool.Get() - defer this.pool.Put(conn) - - // Optional connection for per second limits. If the cache has a perSecondPool setup, - // then use a connection from the pool for per second limits. - var perSecondConn Connection = nil - if this.perSecondPool != nil { - perSecondConn = this.perSecondPool.Get() - defer this.perSecondPool.Put(perSecondConn) - } - // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. hitsAddend := max(1, request.HitsAddend) - // First build a list of all cache keys that we are actually going to hit. generateCacheKey() + // First build a list of all cache keys that we are actually going to hit. GenerateCacheKey() // returns an empty string in the key if there is no limit so that we can keep the arrays // all the same size. assert.Assert(len(request.Descriptors) == len(limits)) - cacheKeys := make([]cacheKey, len(request.Descriptors)) + cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) now := this.timeSource.UnixNow() for i := 0; i < len(request.Descriptors); i++ { - cacheKeys[i] = this.generateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) + cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey( + request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { @@ -154,43 +68,58 @@ func (this *rateLimitCacheImpl) DoLimit( } isOverLimitWithLocalCache := make([]bool, len(request.Descriptors)) + results := make([]uint32, len(request.Descriptors)) + var pipeline, perSecondPipeline Pipeline // Now, actually setup the pipeline, skipping empty cache keys. for i, cacheKey := range cacheKeys { - if cacheKey.key == "" { + if cacheKey.Key == "" { continue } if this.localCache != nil { // Get returns the value or not found error. - _, err := this.localCache.Get([]byte(cacheKey.key)) + _, err := this.localCache.Get([]byte(cacheKey.Key)) if err == nil { isOverLimitWithLocalCache[i] = true - logger.Debugf("cache key is over the limit: %s", cacheKey.key) + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) continue } } - logger.Debugf("looking up cache key: %s", cacheKey.key) + logger.Debugf("looking up cache key: %s", cacheKey.Key) - expirationSeconds := unitToDivider(limits[i].Limit.Unit) + expirationSeconds := limiter.UnitToDivider(limits[i].Limit.Unit) if this.expirationJitterMaxSeconds > 0 { expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) } // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. - if perSecondConn != nil && cacheKey.perSecond { - pipelineAppend(perSecondConn, cacheKey.key, hitsAddend, expirationSeconds) + if this.perSecondClient != nil && cacheKey.PerSecond { + if perSecondPipeline == nil { + perSecondPipeline = Pipeline{} + } + pipelineAppend(this.perSecondClient, &perSecondPipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) } else { - pipelineAppend(conn, cacheKey.key, hitsAddend, expirationSeconds) + if pipeline == nil { + pipeline = Pipeline{} + } + pipelineAppend(this.client, &pipeline, cacheKey.Key, hitsAddend, &results[i], expirationSeconds) } } + if pipeline != nil { + checkError(this.client.PipeDo(pipeline)) + } + if perSecondPipeline != nil { + checkError(this.perSecondClient.PipeDo(perSecondPipeline)) + } + // Now fetch the pipeline. responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) for i, cacheKey := range cacheKeys { - if cacheKey.key == "" { + if cacheKey.Key == "" { responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -212,21 +141,14 @@ func (this *rateLimitCacheImpl) DoLimit( continue } - var limitAfterIncrease uint32 - // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. - if this.perSecondPool != nil && cacheKey.perSecond { - limitAfterIncrease = pipelineFetch(perSecondConn) - } else { - limitAfterIncrease = pipelineFetch(conn) - } - + limitAfterIncrease := results[i] limitBeforeIncrease := limitAfterIncrease - hitsAddend overLimitThreshold := limits[i].Limit.RequestsPerUnit // The nearLimitThreshold is the number of requests that can be made before hitting the NearLimitRatio. // We need to know it in both the OK and OVER_LIMIT scenarios. nearLimitThreshold := uint32(math.Floor(float64(float32(overLimitThreshold) * config.NearLimitRatio))) - logger.Debugf("cache key: %s current: %d", cacheKey.key, limitAfterIncrease) + logger.Debugf("cache key: %s current: %d", cacheKey.Key, limitAfterIncrease) if limitAfterIncrease > overLimitThreshold { responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ @@ -257,9 +179,9 @@ func (this *rateLimitCacheImpl) DoLimit( // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. - err := this.localCache.Set([]byte(cacheKey.key), []byte{}, int(unitToDivider(limits[i].Limit.Unit))) + err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(limiter.UnitToDivider(limits[i].Limit.Unit))) if err != nil { - logger.Errorf("Failing to set local cache key: %s", cacheKey.key) + logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) } } } else { @@ -288,55 +210,33 @@ func (this *rateLimitCacheImpl) DoLimit( return responseDescriptorStatuses } -func NewRateLimitCacheImpl(pool Pool, perSecondPool Pool, timeSource TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) RateLimitCache { +func NewRateLimitCacheImpl(client Client, perSecondClient Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) limiter.RateLimitCache { return &rateLimitCacheImpl{ - pool: pool, - perSecondPool: perSecondPool, + client: client, + perSecondClient: perSecondClient, timeSource: timeSource, jitterRand: jitterRand, expirationJitterMaxSeconds: expirationJitterMaxSeconds, - bufferPool: newBufferPool(), + cacheKeyGenerator: limiter.NewCacheKeyGenerator(), localCache: localCache, } } -func newBufferPool() sync.Pool { - return sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache { + var perSecondPool Client + if s.RedisPerSecond { + perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, + s.RedisPerSecondType, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) } -} - -type timeSourceImpl struct{} - -func NewTimeSourceImpl() TimeSource { - return &timeSourceImpl{} -} - -func (this *timeSourceImpl) UnixNow() int64 { - return time.Now().Unix() -} - -// rand for jitter. -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func NewLockedSource(seed int64) JitterRandSource { - return &lockedSource{src: rand.NewSource(seed)} -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() + var otherPool Client + otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisType, s.RedisUrl, s.RedisPoolSize, + s.RedisPipelineWindow, s.RedisPipelineLimit) + + return NewRateLimitCacheImpl( + otherPool, + perSecondPool, + timeSource, + jitterRand, + expirationJitterMaxSeconds, + localCache) } diff --git a/src/redis/driver.go b/src/redis/driver.go index 1f4bea32f..7ffc0c7b7 100644 --- a/src/redis/driver.go +++ b/src/redis/driver.go @@ -1,5 +1,7 @@ package redis +import "github.com/mediocregopher/radix/v3" + // Errors that may be raised during config parsing. type RedisError string @@ -7,33 +9,41 @@ func (e RedisError) Error() string { return string(e) } -// Interface for a redis connection pool. -type Pool interface { - // Get a connection from the pool. Call Put() on the connection when done. - // Throws RedisError if a connection can not be obtained. - Get() Connection - - // Put a connection back into the pool. - // @param c supplies the connection to put back. - Put(c Connection) -} +// Interface for a redis client. +type Client interface { + // DoCmd is used to perform a redis command and retrieve a result. + // + // @param rcv supplies receiver for the result. + // @param cmd supplies the command to append. + // @param key supplies the key to append. + // @param args supplies the additional arguments. + DoCmd(rcv interface{}, cmd, key string, args ...interface{}) error -// Interface for a redis connection. -type Connection interface { - // Append a command onto the pipeline queue. - // @param command supplies the command to append. + // PipeAppend append a command onto the pipeline queue. + // + // @param pipeline supplies the queue for pending commands. + // @param rcv supplies receiver for the result. + // @param cmd supplies the command to append. + // @param key supplies the key to append. // @param args supplies the additional arguments. - PipeAppend(command string, args ...interface{}) + PipeAppend(pipeline Pipeline, rcv interface{}, cmd, key string, args ...interface{}) Pipeline - // Execute the pipeline queue and wait for a response. - // @return a response object. - // Throws a RedisError if there was an error fetching the response. - PipeResponse() Response -} + // PipeDo writes multiple commands to a Conn in + // a single write, then reads their responses in a single read. This reduces + // network delay into a single round-trip. + // + // @param pipeline supplies the queue for pending commands. + PipeDo(pipeline Pipeline) error + + // Once Close() is called all future method calls on the Client will return + // an error + Close() error -// Interface for a redis response. -type Response interface { - // @return the response as an integer. - // Throws a RedisError if the response is not convertable to an integer. - Int() int64 + // NumActiveConns return number of active connections, used in testing. + NumActiveConns() int + + // ImplicitPipeliningEnabled return true if implicit pipelining is enabled. + ImplicitPipeliningEnabled() bool } + +type Pipeline []radix.CmdAction diff --git a/src/redis/driver_impl.go b/src/redis/driver_impl.go index 5405bc8c5..18e213f1b 100644 --- a/src/redis/driver_impl.go +++ b/src/redis/driver_impl.go @@ -2,12 +2,14 @@ package redis import ( "crypto/tls" - "net" + "fmt" + "strings" + "time" + + "github.com/mediocregopher/radix/v3/trace" stats "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/assert" - "github.com/mediocregopher/radix.v2/pool" - "github.com/mediocregopher/radix.v2/redis" + "github.com/mediocregopher/radix/v3" logger "github.com/sirupsen/logrus" ) @@ -25,18 +27,23 @@ func newPoolStats(scope stats.Scope) poolStats { return ret } -type poolImpl struct { - pool *pool.Pool - stats poolStats -} - -type connectionImpl struct { - client *redis.Client - pending uint +func poolTrace(ps *poolStats) trace.PoolTrace { + return trace.PoolTrace{ + ConnCreated: func(_ trace.PoolConnCreated) { + ps.connectionTotal.Add(1) + ps.connectionActive.Add(1) + }, + ConnClosed: func(_ trace.PoolConnClosed) { + ps.connectionActive.Sub(1) + ps.connectionClose.Add(1) + }, + } } -type responseImpl struct { - response *redis.Resp +type clientImpl struct { + client radix.Client + stats poolStats + implicitPipelining bool } func checkError(err error) { @@ -45,78 +52,113 @@ func checkError(err error) { } } -func (this *poolImpl) Get() Connection { - client, err := this.pool.Get() - checkError(err) - this.stats.connectionActive.Inc() - this.stats.connectionTotal.Inc() - return &connectionImpl{client, 0} -} +func NewClientImpl(scope stats.Scope, useTls bool, auth string, redisType string, url string, poolSize int, + pipelineWindow time.Duration, pipelineLimit int) Client { + logger.Warnf("connecting to redis on %s with pool size %d", url, poolSize) -func (this *poolImpl) Put(c Connection) { - impl := c.(*connectionImpl) - this.stats.connectionActive.Dec() - if impl.pending == 0 { - this.pool.Put(impl.client) - } else { - // radix does not appear to track if we attempt to put a connection back with pipelined - // responses that have not been flushed. If we are in this state, just kill the connection - // and don't put it back in the pool. - impl.client.Close() - this.stats.connectionClose.Inc() - } -} + df := func(network, addr string) (radix.Conn, error) { + var dialOpts []radix.DialOpt -func NewPoolImpl(scope stats.Scope, useTls bool, auth string, url string, poolSize int) Pool { - logger.Warnf("connecting to redis on %s with pool size %d", url, poolSize) - df := func(network, addr string) (*redis.Client, error) { - var conn net.Conn var err error if useTls { - conn, err = tls.Dial("tcp", addr, &tls.Config{}) - } else { - conn, err = net.Dial("tcp", addr) + dialOpts = append(dialOpts, radix.DialUseTLS(&tls.Config{})) } - if err != nil { - return nil, err - } - client, err := redis.NewClient(conn) if err != nil { return nil, err } if auth != "" { logger.Warnf("enabling authentication to redis on %s", url) - if err = client.Cmd("AUTH", auth).Err; err != nil { - client.Close() - return nil, err - } + + dialOpts = append(dialOpts, radix.DialAuthPass(auth)) } - return client, nil + + return radix.Dial(network, addr, dialOpts...) + } + + stats := newPoolStats(scope) + + opts := []radix.PoolOpt{radix.PoolConnFunc(df), radix.PoolWithTrace(poolTrace(&stats))} + + implicitPipelining := true + if pipelineWindow == 0 && pipelineLimit == 0 { + implicitPipelining = false + } else { + opts = append(opts, radix.PoolPipelineWindow(pipelineWindow, pipelineLimit)) } - pool, err := pool.NewCustom("tcp", url, poolSize, df) + logger.Debugf("Implicit pipelining enabled: %v", implicitPipelining) + + poolFunc := func(network, addr string) (radix.Client, error) { + return radix.NewPool(network, addr, poolSize, opts...) + } + + var client radix.Client + var err error + switch strings.ToLower(redisType) { + case "single": + client, err = poolFunc("tcp", url) + case "cluster": + urls := strings.Split(url, ",") + if implicitPipelining == false { + panic(RedisError("Implicit Pipelining must be enabled to work with Redis Cluster Mode. Set values for REDIS_PIPELINE_WINDOW or REDIS_PIPELINE_LIMIT to enable implicit pipelining")) + } + logger.Warnf("Creating cluster with urls %v", urls) + client, err = radix.NewCluster(urls, radix.ClusterPoolFunc(poolFunc)) + case "sentinel": + urls := strings.Split(url, ",") + if len(urls) < 2 { + panic(RedisError("Expected master name and a list of urls for the sentinels, in the format: ,,...,")) + } + client, err = radix.NewSentinel(urls[0], urls[1:], radix.SentinelPoolFunc(poolFunc)) + default: + panic(RedisError("Unrecognized redis type " + redisType)) + } + checkError(err) - return &poolImpl{ - pool: pool, - stats: newPoolStats(scope)} + + // Check if connection is good + var pingResponse string + checkError(client.Do(radix.Cmd(&pingResponse, "PING"))) + if pingResponse != "PONG" { + checkError(fmt.Errorf("connecting redis error: %s", pingResponse)) + } + + return &clientImpl{ + client: client, + stats: stats, + implicitPipelining: implicitPipelining, + } } -func (this *connectionImpl) PipeAppend(cmd string, args ...interface{}) { - this.client.PipeAppend(cmd, args...) - this.pending++ +func (c *clientImpl) DoCmd(rcv interface{}, cmd, key string, args ...interface{}) error { + return c.client.Do(radix.FlatCmd(rcv, cmd, key, args...)) } -func (this *connectionImpl) PipeResponse() Response { - assert.Assert(this.pending > 0) - this.pending-- +func (c *clientImpl) Close() error { + return c.client.Close() +} - resp := this.client.PipeResp() - checkError(resp.Err) - return &responseImpl{resp} +func (c *clientImpl) NumActiveConns() int { + return int(c.stats.connectionActive.Value()) } -func (this *responseImpl) Int() int64 { - i, err := this.response.Int64() - checkError(err) - return i +func (c *clientImpl) PipeAppend(pipeline Pipeline, rcv interface{}, cmd, key string, args ...interface{}) Pipeline { + return append(pipeline, radix.FlatCmd(rcv, cmd, key, args...)) +} + +func (c *clientImpl) PipeDo(pipeline Pipeline) error { + if c.implicitPipelining { + for _, action := range pipeline { + if err := c.client.Do(action); err != nil { + return err + } + } + return nil + } + + return c.client.Do(radix.Pipeline(pipeline...)) +} + +func (c *clientImpl) ImplicitPipeliningEnabled() bool { + return c.implicitPipelining } diff --git a/src/server/server.go b/src/server/server.go index 820085744..d0570868b 100644 --- a/src/server/server.go +++ b/src/server/server.go @@ -1,6 +1,7 @@ package server import ( + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "net/http" "github.com/lyft/goruntime/loader" @@ -25,6 +26,7 @@ type Server interface { * Add an HTTP endpoint to the local debug port. */ AddDebugHttpEndpoint(path string, help string, handler http.HandlerFunc) + AddJsonHandler(pb.RateLimitServiceServer) /** * Returns the embedded gRPC server to be used for registering gRPC endpoints. diff --git a/src/server/server_impl.go b/src/server/server_impl.go index d685f847c..8652bf5d8 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -1,15 +1,15 @@ package server import ( + "bytes" "expvar" "fmt" "io" "net/http" "net/http/pprof" + "path/filepath" "sort" - "github.com/lyft/ratelimit/src/redis" - "os" "os/signal" "syscall" @@ -17,11 +17,14 @@ import ( "net" "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/settings" + "github.com/golang/protobuf/jsonpb" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" "github.com/lyft/goruntime/loader" stats "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/settings" logger "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/health" @@ -52,6 +55,54 @@ func (server *server) AddDebugHttpEndpoint(path string, help string, handler htt server.debugListener.endpoints[path] = help } +// create an http/1 handler at the /json endpoint which allows this ratelimit service to work with +// clients that cannot use the gRPC interface (e.g. lua) +// example usage from cURL with domain "dummy" and descriptor "perday": +// echo '{"domain": "dummy", "descriptors": [{"entries": [{"key": "perday"}]}]}' | curl -vvvXPOST --data @/dev/stdin localhost:8080/json +func NewJsonHandler(svc pb.RateLimitServiceServer) func(http.ResponseWriter, *http.Request) { + // Default options include enums as strings and no identation. + m := &jsonpb.Marshaler{} + + return func(writer http.ResponseWriter, request *http.Request) { + var req pb.RateLimitRequest + + if err := jsonpb.Unmarshal(request.Body, &req); err != nil { + logger.Warnf("error: %s", err.Error()) + http.Error(writer, err.Error(), http.StatusBadRequest) + return + } + + resp, err := svc.ShouldRateLimit(nil, &req) + if err != nil { + logger.Warnf("error: %s", err.Error()) + http.Error(writer, err.Error(), http.StatusBadRequest) + return + } + + logger.Debugf("resp:%s", resp) + + buf := bytes.NewBuffer(nil) + err = m.Marshal(buf, resp) + if err != nil { + logger.Errorf("error marshaling proto3 to json: %s", err.Error()) + http.Error(writer, "error marshaling proto3 to json: "+err.Error(), http.StatusInternalServerError) + return + } + + writer.Header().Set("Content-Type", "application/json") + if resp == nil || resp.OverallCode == pb.RateLimitResponse_UNKNOWN { + writer.WriteHeader(http.StatusInternalServerError) + } else if resp.OverallCode == pb.RateLimitResponse_OVER_LIMIT { + writer.WriteHeader(http.StatusTooManyRequests) + } + writer.Write(buf.Bytes()) + } +} + +func (server *server) AddJsonHandler(svc pb.RateLimitServiceServer) { + server.router.HandleFunc("/json", NewJsonHandler(svc)) +} + func (server *server) GrpcServer() *grpc.Server { return server.grpcServer } @@ -126,7 +177,7 @@ func newServer(name string, store stats.Store, localCache *freecache.Cache, opts ret.scope = ret.store.Scope(name) ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) if localCache != nil { - ret.store.AddStatGenerator(redis.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) + ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) } // setup runtime @@ -137,12 +188,22 @@ func newServer(name string, store stats.Store, localCache *freecache.Cache, opts loaderOpts = append(loaderOpts, loader.AllowDotFiles) } - ret.runtime = loader.New( - s.RuntimePath, - s.RuntimeSubdirectory, - ret.store.Scope("runtime"), - &loader.SymlinkRefresher{RuntimePath: s.RuntimePath}, - loaderOpts...) + if s.RuntimeWatchRoot { + ret.runtime = loader.New( + s.RuntimePath, + s.RuntimeSubdirectory, + ret.store.Scope("runtime"), + &loader.SymlinkRefresher{RuntimePath: s.RuntimePath}, + loaderOpts...) + + } else { + ret.runtime = loader.New( + filepath.Join(s.RuntimePath, s.RuntimeSubdirectory), + "config", + ret.store.Scope("runtime"), + &loader.DirectoryRefresher{}, + loaderOpts...) + } // setup http router ret.router = mux.NewRouter() diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index d9817721f..08b392d21 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -4,12 +4,13 @@ import ( "strings" "sync" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/assert" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/redis" "github.com/lyft/goruntime/loader" - "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/assert" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" + stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -52,10 +53,11 @@ type service struct { configLoader config.RateLimitConfigLoader config config.RateLimitConfig runtimeUpdateEvent chan int - cache redis.RateLimitCache + cache limiter.RateLimitCache stats serviceStats rlStatsScope stats.Scope legacy *legacyService + runtimeWatchRoot bool } func (this *service) reloadConfig() { @@ -74,7 +76,7 @@ func (this *service) reloadConfig() { files := []config.RateLimitConfigToLoad{} snapshot := this.runtime.Snapshot() for _, key := range snapshot.Keys() { - if !strings.HasPrefix(key, "config.") { + if this.runtimeWatchRoot && !strings.HasPrefix(key, "config.") { continue } @@ -174,8 +176,8 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { return this.config } -func NewService(runtime loader.IFace, cache redis.RateLimitCache, - configLoader config.RateLimitConfigLoader, stats stats.Scope) RateLimitServiceServer { +func NewService(runtime loader.IFace, cache limiter.RateLimitCache, + configLoader config.RateLimitConfigLoader, stats stats.Scope, runtimeWatchRoot bool) RateLimitServiceServer { newService := &service{ runtime: runtime, @@ -186,6 +188,7 @@ func NewService(runtime loader.IFace, cache redis.RateLimitCache, cache: cache, stats: newServiceStats(stats), rlStatsScope: stats.Scope("rate_limit"), + runtimeWatchRoot: runtimeWatchRoot, } newService.legacy = &legacyService{ s: newService, diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index a8218279b..17112675c 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -1,10 +1,11 @@ package ratelimit import ( - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - "github.com/golang/protobuf/jsonpb" + core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" "github.com/lyft/gostats" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" "golang.org/x/net/context" ) @@ -12,7 +13,7 @@ type RateLimitLegacyServiceServer interface { pb_legacy.RateLimitServiceServer } -// legacyService is used to implement ratelimit.proto (https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto) +// legacyService is used to implement v2 rls.proto (https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto) // the legacyService receives RateLimitRequests, converts the request, and calls the service's ShouldRateLimit method. type legacyService struct { s *service @@ -62,20 +63,32 @@ func ConvertLegacyRequest(legacyRequest *pb_legacy.RateLimitRequest) (*pb.RateLi if legacyRequest == nil { return nil, nil } - - m := &jsonpb.Marshaler{} - s, err := m.MarshalToString(legacyRequest) - if err != nil { - return nil, err + request := &pb.RateLimitRequest{ + Domain: legacyRequest.GetDomain(), + HitsAddend: legacyRequest.GetHitsAddend(), } - - req := &pb.RateLimitRequest{} - err = jsonpb.UnmarshalString(s, req) - if err != nil { - return nil, err + if legacyRequest.GetDescriptors() != nil { + descriptors := make([]*pb_struct.RateLimitDescriptor, len(legacyRequest.GetDescriptors())) + for i, descriptor := range legacyRequest.GetDescriptors() { + if descriptor != nil { + descriptors[i] = &pb_struct.RateLimitDescriptor{} + if descriptor.GetEntries() != nil { + entries := make([]*pb_struct.RateLimitDescriptor_Entry, len(descriptor.GetEntries())) + for j, entry := range descriptor.GetEntries() { + if entry != nil { + entries[j] = &pb_struct.RateLimitDescriptor_Entry{ + Key: entry.GetKey(), + Value: entry.GetValue(), + } + } + } + descriptors[i].Entries = entries + } + } + } + request.Descriptors = descriptors } - - return req, nil + return request, nil } func ConvertResponse(response *pb.RateLimitResponse) (*pb_legacy.RateLimitResponse, error) { @@ -83,17 +96,55 @@ func ConvertResponse(response *pb.RateLimitResponse) (*pb_legacy.RateLimitRespon return nil, nil } - m := &jsonpb.Marshaler{} - s, err := m.MarshalToString(response) - if err != nil { - return nil, err + legacyResponse := &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_Code(response.GetOverallCode()), } - resp := &pb_legacy.RateLimitResponse{} - err = jsonpb.UnmarshalString(s, resp) - if err != nil { - return nil, err + if response.GetStatuses() != nil { + statuses := make([]*pb_legacy.RateLimitResponse_DescriptorStatus, len(response.GetStatuses())) + for i, status := range response.GetStatuses() { + if status != nil { + statuses[i] = &pb_legacy.RateLimitResponse_DescriptorStatus{ + Code: pb_legacy.RateLimitResponse_Code(status.GetCode()), + LimitRemaining: status.GetLimitRemaining(), + } + if status.GetCurrentLimit() != nil { + statuses[i].CurrentLimit = &pb_legacy.RateLimitResponse_RateLimit{ + Name: status.GetCurrentLimit().GetName(), + RequestsPerUnit: status.GetCurrentLimit().GetRequestsPerUnit(), + Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(status.GetCurrentLimit().GetUnit()), + } + } + } + } + legacyResponse.Statuses = statuses + } + + if response.GetRequestHeadersToAdd() != nil { + requestHeadersToAdd := make([]*core_legacy.HeaderValue, len(response.GetRequestHeadersToAdd())) + for i, header := range response.GetRequestHeadersToAdd() { + if header != nil { + requestHeadersToAdd[i] = &core_legacy.HeaderValue{ + Key: header.GetKey(), + Value: header.GetValue(), + } + } + } + legacyResponse.RequestHeadersToAdd = requestHeadersToAdd + } + + if response.GetResponseHeadersToAdd() != nil { + responseHeadersToAdd := make([]*core_legacy.HeaderValue, len(response.GetResponseHeadersToAdd())) + for i, header := range response.GetResponseHeadersToAdd() { + if header != nil { + responseHeadersToAdd[i] = &core_legacy.HeaderValue{ + Key: header.GetKey(), + Value: header.GetValue(), + } + } + } + legacyResponse.Headers = responseHeadersToAdd } - return resp, nil + return legacyResponse, nil } diff --git a/src/service_cmd/main.go b/src/service_cmd/main.go index 5df1ab032..a53b362b6 100644 --- a/src/service_cmd/main.go +++ b/src/service_cmd/main.go @@ -1,6 +1,6 @@ package main -import "github.com/lyft/ratelimit/src/service_cmd/runner" +import "github.com/envoyproxy/ratelimit/src/service_cmd/runner" func main() { runner := runner.NewRunner() diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 21c07d47d..80e8e7814 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -4,20 +4,22 @@ import ( "io" "math/rand" "net/http" + "strings" "time" stats "github.com/lyft/gostats" "github.com/coocood/freecache" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" - "github.com/lyft/ratelimit/src/server" - ratelimit "github.com/lyft/ratelimit/src/service" - "github.com/lyft/ratelimit/src/settings" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/server" + ratelimit "github.com/envoyproxy/ratelimit/src/service" + "github.com/envoyproxy/ratelimit/src/settings" logger "github.com/sirupsen/logrus" ) @@ -42,6 +44,16 @@ func (runner *Runner) Run() { } else { logger.SetLevel(logLevel) } + if strings.ToLower(s.LogFormat) == "json" { + logger.SetFormatter(&logger.JSONFormatter{ + TimestampFormat: time.RFC3339Nano, + FieldMap: logger.FieldMap{ + logger.FieldKeyTime: "@timestamp", + logger.FieldKeyMsg: "@message", + }, + }) + } + var localCache *freecache.Cache if s.LocalCacheSizeInBytes != 0 { localCache = freecache.NewCache(s.LocalCacheSizeInBytes) @@ -49,24 +61,19 @@ func (runner *Runner) Run() { srv := server.NewServer("ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) - var perSecondPool redis.Pool - if s.RedisPerSecond { - perSecondPool = redis.NewPoolImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, s.RedisPerSecondUrl, s.RedisPerSecondPoolSize) - } - var otherPool redis.Pool - otherPool = redis.NewPoolImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisUrl, s.RedisPoolSize) - service := ratelimit.NewService( srv.Runtime(), - redis.NewRateLimitCacheImpl( - otherPool, - perSecondPool, - redis.NewTimeSourceImpl(), - rand.New(redis.NewLockedSource(time.Now().Unix())), - s.ExpirationJitterMaxSeconds, - localCache), + redis.NewRateLimiterCacheImplFromSettings( + s, + localCache, + srv, + limiter.NewTimeSourceImpl(), + rand.New(limiter.NewLockedSource(time.Now().Unix())), + s.ExpirationJitterMaxSeconds), config.NewRateLimitConfigLoaderImpl(), - srv.Scope().Scope("service")) + srv.Scope().Scope("service"), + s.RuntimeWatchRoot, + ) srv.AddDebugHttpEndpoint( "/rlconfig", @@ -75,10 +82,12 @@ func (runner *Runner) Run() { io.WriteString(writer, service.GetCurrentConfig().Dump()) }) + srv.AddJsonHandler(service) + // Ratelimit is compatible with two proto definitions - // 1. data-plane-api rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto + // 1. data-plane-api v3 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v3/rls.proto pb.RegisterRateLimitServiceServer(srv.GrpcServer(), service) - // 2. ratelimit.proto defined in this repository: https://github.com/lyft/ratelimit/blob/0ded92a2af8261d43096eba4132e45b99a3b8b14/proto/ratelimit/ratelimit.proto + // 1. data-plane-api v2 rls.proto: https://github.com/envoyproxy/data-plane-api/blob/master/envoy/service/ratelimit/v2/rls.proto pb_legacy.RegisterRateLimitServiceServer(srv.GrpcServer(), service.GetLegacyService()) // (1) is the current definition, and (2) is the legacy definition. diff --git a/src/settings/settings.go b/src/settings/settings.go index 8d1335bbb..5073477cb 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -1,6 +1,8 @@ package settings import ( + "time" + "github.com/kelseyhightower/envconfig" "google.golang.org/grpc" ) @@ -9,29 +11,37 @@ type Settings struct { // runtime options GrpcUnaryInterceptor grpc.ServerOption // env config - Port int `envconfig:"PORT" default:"8080"` - GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` - DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` - UseStatsd bool `envconfig:"USE_STATSD" default:"true"` - StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` - StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` - RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` - RuntimeSubdirectory string `envconfig:"RUNTIME_SUBDIRECTORY"` - RuntimeIgnoreDotFiles bool `envconfig:"RUNTIME_IGNOREDOTFILES" default:"false"` - LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` - RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` - RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` - RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` - RedisAuth string `envconfig:"REDIS_AUTH" default:""` - RedisTls bool `envconfig:"REDIS_TLS" default:"false"` - RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"` - RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"` - RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"` - RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` - RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""` - RedisPerSecondTls bool `envconfig:"REDIS_PERSECOND_TLS" default:"false"` - ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"` - LocalCacheSizeInBytes int `envconfig:"LOCAL_CACHE_SIZE_IN_BYTES" default:"0"` + Port int `envconfig:"PORT" default:"8080"` + GrpcPort int `envconfig:"GRPC_PORT" default:"8081"` + DebugPort int `envconfig:"DEBUG_PORT" default:"6070"` + UseStatsd bool `envconfig:"USE_STATSD" default:"true"` + StatsdHost string `envconfig:"STATSD_HOST" default:"localhost"` + StatsdPort int `envconfig:"STATSD_PORT" default:"8125"` + RuntimePath string `envconfig:"RUNTIME_ROOT" default:"/srv/runtime_data/current"` + RuntimeSubdirectory string `envconfig:"RUNTIME_SUBDIRECTORY"` + RuntimeIgnoreDotFiles bool `envconfig:"RUNTIME_IGNOREDOTFILES" default:"false"` + RuntimeWatchRoot bool `envconfig:"RUNTIME_WATCH_ROOT" default:"true"` + LogLevel string `envconfig:"LOG_LEVEL" default:"WARN"` + LogFormat string `envconfig:"LOG_FORMAT" default:"text"` + RedisSocketType string `envconfig:"REDIS_SOCKET_TYPE" default:"unix"` + RedisType string `envconfig:"REDIS_TYPE" default:"SINGLE"` + RedisUrl string `envconfig:"REDIS_URL" default:"/var/run/nutcracker/ratelimit.sock"` + RedisPoolSize int `envconfig:"REDIS_POOL_SIZE" default:"10"` + RedisAuth string `envconfig:"REDIS_AUTH" default:""` + RedisTls bool `envconfig:"REDIS_TLS" default:"false"` + RedisPipelineWindow time.Duration `envconfig:"REDIS_PIPELINE_WINDOW" default:"0"` + RedisPipelineLimit int `envconfig:"REDIS_PIPELINE_LIMIT" default:"0"` + RedisPerSecond bool `envconfig:"REDIS_PERSECOND" default:"false"` + RedisPerSecondSocketType string `envconfig:"REDIS_PERSECOND_SOCKET_TYPE" default:"unix"` + RedisPerSecondType string `envconfig:"REDIS_PERSECOND_TYPE" default:"SINGLE"` + RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"` + RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` + RedisPerSecondAuth string `envconfig:"REDIS_PERSECOND_AUTH" default:""` + RedisPerSecondTls bool `envconfig:"REDIS_PERSECOND_TLS" default:"false"` + RedisPerSecondPipelineWindow time.Duration `envconfig:"REDIS_PERSECOND_PIPELINE_WINDOW" default:"0"` + RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"0"` + ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"` + LocalCacheSizeInBytes int `envconfig:"LOCAL_CACHE_SIZE_IN_BYTES" default:"0"` } type Option func(*Settings) diff --git a/test/common/common.go b/test/common/common.go index ba3e79045..b15c41d5c 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -1,11 +1,15 @@ package common import ( + "fmt" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" "sync" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" + pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" ) type TestStatSink struct { @@ -57,14 +61,19 @@ func NewRateLimitRequestLegacy(domain string, descriptors [][][2]string, hitsAdd request := &pb_legacy.RateLimitRequest{} request.Domain = domain for _, descriptor := range descriptors { - newDescriptor := &pb_legacy.RateLimitDescriptor{} + newDescriptor := &pb_struct_legacy.RateLimitDescriptor{} for _, entry := range descriptor { newDescriptor.Entries = append( newDescriptor.Entries, - &pb_legacy.RateLimitDescriptor_Entry{Key: entry[0], Value: entry[1]}) + &pb_struct_legacy.RateLimitDescriptor_Entry{Key: entry[0], Value: entry[1]}) } request.Descriptors = append(request.Descriptors, newDescriptor) } request.HitsAddend = hitsAddend return request } + +func AssertProtoEqual(assert *assert.Assertions, expected proto.Message, actual proto.Message) { + assert.True(proto.Equal(expected, actual), + fmt.Sprintf("These two protobuf messages are not equal:\nexpected: %v\nactual: %v", expected, actual)) +} diff --git a/test/config/config_test.go b/test/config/config_test.go index be5ff3473..966381658 100644 --- a/test/config/config_test.go +++ b/test/config/config_test.go @@ -1,13 +1,15 @@ package config_test import ( + "github.com/envoyproxy/ratelimit/test/common" "io/ioutil" "testing" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + pb_type "github.com/envoyproxy/go-control-plane/envoy/type/v3" + "github.com/envoyproxy/ratelimit/src/config" "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/config" "github.com/stretchr/testify/assert" ) @@ -150,6 +152,80 @@ func TestBasicConfig(t *testing.T) { assert.EqualValues(1, stats.NewCounter("test-domain.key4.near_limit").Value()) } +func TestConfigLimitOverride(t *testing.T) { + assert := assert.New(t) + stats := stats.NewStore(stats.NewNullSink(), false) + rlConfig := config.NewRateLimitConfigImpl(loadFile("basic_config.yaml"), stats) + rlConfig.Dump() + // No matching domain + assert.Nil(rlConfig.GetLimit(nil, "foo_domain", &pb_struct.RateLimitDescriptor{ + Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ + RequestsPerUnit: 10, Unit: pb_type.RateLimitUnit_DAY, + }, + })) + rl := rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something"}}, + Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ + RequestsPerUnit: 10, Unit: pb_type.RateLimitUnit_DAY, + }, + }) + assert.Equal("test-domain.key1_value1.subkey1_something", rl.FullKey) + common.AssertProtoEqual(assert, &pb.RateLimitResponse_RateLimit{ + RequestsPerUnit: 10, + Unit: pb.RateLimitResponse_RateLimit_DAY, + }, rl.Limit) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) + + // Change in override value doesn't erase stats + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something"}}, + Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ + RequestsPerUnit: 42, Unit: pb_type.RateLimitUnit_HOUR, + }, + }) + assert.Equal("test-domain.key1_value1.subkey1_something", rl.FullKey) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + common.AssertProtoEqual(assert, &pb.RateLimitResponse_RateLimit{ + RequestsPerUnit: 42, + Unit: pb.RateLimitResponse_RateLimit_HOUR, + }, rl.Limit) + assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.total_hits").Value()) + assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.over_limit").Value()) + assert.EqualValues(2, stats.NewCounter("test-domain.key1_value1.subkey1_something.near_limit").Value()) + + // Different value creates a different counter + rl = rlConfig.GetLimit( + nil, "test-domain", + &pb_struct.RateLimitDescriptor{ + Entries: []*pb_struct.RateLimitDescriptor_Entry{{Key: "key1", Value: "value1"}, {Key: "subkey1", Value: "something_else"}}, + Limit: &pb_struct.RateLimitDescriptor_RateLimitOverride{ + RequestsPerUnit: 42, Unit: pb_type.RateLimitUnit_HOUR, + }, + }) + assert.Equal("test-domain.key1_value1.subkey1_something_else", rl.FullKey) + common.AssertProtoEqual(assert, &pb.RateLimitResponse_RateLimit{ + RequestsPerUnit: 42, + Unit: pb.RateLimitResponse_RateLimit_HOUR, + }, rl.Limit) + rl.Stats.TotalHits.Inc() + rl.Stats.OverLimit.Inc() + rl.Stats.NearLimit.Inc() + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.total_hits").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.over_limit").Value()) + assert.EqualValues(1, stats.NewCounter("test-domain.key1_value1.subkey1_something_else.near_limit").Value()) +} + func expectConfigPanic(t *testing.T, call func(), expectedError string) { assert := assert.New(t) defer func() { diff --git a/test/integration/conf/sentinel-pre-second.conf b/test/integration/conf/sentinel-pre-second.conf new file mode 100644 index 000000000..51188eeb8 --- /dev/null +++ b/test/integration/conf/sentinel-pre-second.conf @@ -0,0 +1,3 @@ +sentinel monitor mymaster 127.0.0.1 6397 2 +sentinel auth-pass mymaster password123 +sentinel down-after-milliseconds mymaster 3000 diff --git a/test/integration/conf/sentinel.conf b/test/integration/conf/sentinel.conf new file mode 100644 index 000000000..b56cefc9a --- /dev/null +++ b/test/integration/conf/sentinel.conf @@ -0,0 +1,3 @@ +sentinel monitor mymaster 127.0.0.1 6392 2 +sentinel auth-pass mymaster password123 +sentinel down-after-milliseconds mymaster 3000 diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 697cf896c..01a2c950e 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -3,17 +3,21 @@ package integration_test import ( + "bytes" "fmt" + "io" + "io/ioutil" "math/rand" + "net/http" "os" "strconv" "testing" "time" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" - "github.com/lyft/ratelimit/src/service_cmd/runner" - "github.com/lyft/ratelimit/test/common" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/service_cmd/runner" + "github.com/envoyproxy/ratelimit/test/common" "github.com/stretchr/testify/assert" "golang.org/x/net/context" "google.golang.org/grpc" @@ -32,16 +36,16 @@ func newDescriptorStatus( func newDescriptorStatusLegacy( status pb_legacy.RateLimitResponse_Code, requestsPerUnit uint32, - unit pb_legacy.RateLimit_Unit, limitRemaining uint32) *pb_legacy.RateLimitResponse_DescriptorStatus { + unit pb_legacy.RateLimitResponse_RateLimit_Unit, limitRemaining uint32) *pb_legacy.RateLimitResponse_DescriptorStatus { return &pb_legacy.RateLimitResponse_DescriptorStatus{ Code: status, - CurrentLimit: &pb_legacy.RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, + CurrentLimit: &pb_legacy.RateLimitResponse_RateLimit{RequestsPerUnit: requestsPerUnit, Unit: unit}, LimitRemaining: limitRemaining, } } -// TODO: Once adding the ability of stopping the server in the runner (https://github.com/lyft/ratelimit/issues/119), +// TODO: Once adding the ability of stopping the server in the runner (https://github.com/envoyproxy/ratelimit/issues/119), // stop the server at the end of each test, thus we can reuse the grpc port among these integration tests. func TestBasicConfig(t *testing.T) { t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0")) @@ -64,6 +68,35 @@ func TestBasicAuthConfig(t *testing.T) { t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuth("18093", "true", "1000")) } +func TestBasicAuthConfigWithRedisCluster(t *testing.T) { + t.Run("WithoutPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8191", "false", "0")) + t.Run("WithPerSecondRedisAuth", testBasicConfigAuthWithRedisCluster("8193", "true", "0")) + t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18191", "false", "1000")) + t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicConfigAuthWithRedisCluster("18193", "true", "1000")) +} + +func TestBasicAuthConfigWithRedisSentinel(t *testing.T) { + t.Run("WithoutPerSecondRedisAuth", testBasicAuthConfigWithRedisSentinel("8291", "false", "0")) + t.Run("WithPerSecondRedisAuth", testBasicAuthConfigWithRedisSentinel("8293", "true", "0")) + t.Run("WithoutPerSecondRedisAuthWithLocalCache", testBasicAuthConfigWithRedisSentinel("18291", "false", "1000")) + t.Run("WithPerSecondRedisAuthWithLocalCache", testBasicAuthConfigWithRedisSentinel("18293", "true", "1000")) +} + +func TestBasicReloadConfig(t *testing.T) { + t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRoot("8095", "false", "0")) + t.Run("ReloadWithoutWatchRoot", testBasicConfigReload("8097", "false", "0", "false")) +} + +func TestBasicReloadConfigWithRedisCluster(t *testing.T) { + t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisCluster("8096", "false", "0")) + t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisCluster("8098", "false", "0", "false")) +} + +func TestBasicReloadConfigWithRedisSentinel(t *testing.T) { + t.Run("BasicWithoutWatchRoot", testBasicConfigWithoutWatchRootWithRedisSentinel("8296", "false", "0")) + t.Run("ReloadWithoutWatchRoot", testBasicConfigReloadWithRedisSentinel("8298", "false", "0", "false")) +} + func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { os.Setenv("REDIS_PERSECOND_URL", "localhost:16382") os.Setenv("REDIS_URL", "localhost:16381") @@ -71,6 +104,9 @@ func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) os.Setenv("REDIS_TLS", "true") os.Setenv("REDIS_PERSECOND_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "true") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } @@ -81,6 +117,9 @@ func testBasicConfig(grpcPort, perSecond string, local_cache_size string) func(* os.Setenv("REDIS_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "") os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } @@ -91,9 +130,122 @@ func testBasicConfigAuth(grpcPort, perSecond string, local_cache_size string) fu os.Setenv("REDIS_AUTH", "password123") os.Setenv("REDIS_PERSECOND_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) } +func testBasicConfigAuthWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "cluster") + os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") + os.Setenv("REDIS_TYPE", "cluster") + os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "8") + os.Setenv("REDIS_PIPELINE_LIMIT", "8") + + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + +func testBasicAuthConfigWithRedisSentinel(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("REDIS_PERSECOND_TYPE", "sentinel") + os.Setenv("REDIS_PERSECOND_URL", "mymaster,localhost:26399,localhost:26400,localhost:26401") + os.Setenv("REDIS_TYPE", "sentinel") + os.Setenv("REDIS_URL", "mymaster,localhost:26394,localhost:26395,localhost:26396") + os.Setenv("REDIS_TLS", "false") + + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigWithoutWatchRoot(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") + os.Setenv("REDIS_URL", "localhost:6379") + os.Setenv("REDIS_AUTH", "") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", "false") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + + +func testBasicConfigWithoutWatchRootWithRedisCluster(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "cluster") + os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") + os.Setenv("REDIS_TYPE", "cluster") + os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") + os.Setenv("REDIS_AUTH", "password123") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", "false") + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "8") + os.Setenv("REDIS_PIPELINE_LIMIT", "8") + + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigWithoutWatchRootWithRedisSentinel(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "sentinel") + os.Setenv("REDIS_PERSECOND_URL", "mymaster,localhost:26399,localhost:26400,localhost:26401") + os.Setenv("REDIS_TYPE", "sentinel") + os.Setenv("REDIS_URL", "mymaster,localhost:26394,localhost:26395,localhost:26396") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", "false") + + return testBasicBaseConfig(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigReload(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") + os.Setenv("REDIS_URL", "localhost:6379") + os.Setenv("REDIS_AUTH", "") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") + + return testConfigReload(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigReloadWithRedisCluster(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "cluster") + os.Setenv("REDIS_PERSECOND_URL", "localhost:6389,localhost:6390,localhost:6391") + os.Setenv("REDIS_TYPE", "cluster") + os.Setenv("REDIS_URL", "localhost:6386,localhost:6387,localhost:6388") + os.Setenv("REDIS_PERSECOND_PIPELINE_LIMIT", "8") + os.Setenv("REDIS_PIPELINE_LIMIT", "8") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_AUTH", "password123") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "password123") + os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + + return testConfigReload(grpcPort, perSecond, local_cache_size) +} + +func testBasicConfigReloadWithRedisSentinel(grpcPort, perSecond string, local_cache_size, runtimeWatchRoot string) func(*testing.T) { + os.Setenv("REDIS_PERSECOND_TYPE", "sentinel") + os.Setenv("REDIS_PERSECOND_URL", "mymaster,localhost:26399,localhost:26400,localhost:26401") + os.Setenv("REDIS_TYPE", "sentinel") + os.Setenv("REDIS_URL", "mymaster,localhost:26394,localhost:26395,localhost:26396") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("RUNTIME_WATCH_ROOT", runtimeWatchRoot) + + return testConfigReload(grpcPort, perSecond, local_cache_size) +} + func getCacheKey(cacheKey string, enableLocalCache bool) string { if enableLocalCache { return cacheKey + "_local" @@ -113,6 +265,7 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu os.Setenv("REDIS_PERSECOND_SOCKET_TYPE", "tcp") os.Setenv("REDIS_SOCKET_TYPE", "tcp") os.Setenv("LOCAL_CACHE_SIZE_IN_BYTES", local_cache_size) + os.Setenv("USE_STATSD", "false") local_cache_size_val, _ := strconv.Atoi(local_cache_size) enable_local_cache := local_cache_size_val > 0 @@ -134,7 +287,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu response, err := c.ShouldRateLimit( context.Background(), common.NewRateLimitRequest("foo", [][][2]string{{{getCacheKey("hello", enable_local_cache), "world"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, @@ -152,7 +306,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu response, err = c.ShouldRateLimit( context.Background(), common.NewRateLimitRequest("basic", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ @@ -192,7 +347,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu limitRemaining = 0 } - assert.Equal( + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ @@ -254,8 +410,9 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu status = pb.RateLimitResponse_OVER_LIMIT limitRemaining2 = 0 } - - assert.Equal( + + common.AssertProtoEqual( + assert, &pb.RateLimitResponse{ OverallCode: status, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ @@ -263,6 +420,7 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}}, response) assert.NoError(err) + key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) assert.Equal(i+26, int(key2HitCounter.Value())) key2OverlimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit", getCacheKey("key2", enable_local_cache))) @@ -333,6 +491,8 @@ func TestBasicConfigLegacy(t *testing.T) { os.Setenv("REDIS_AUTH", "") os.Setenv("REDIS_PERSECOND_TLS", "false") os.Setenv("REDIS_PERSECOND_AUTH", "") + os.Setenv("REDIS_TYPE", "single") + os.Setenv("REDIS_PERSECOND_TYPE", "single") runner := runner.NewRunner() go func() { @@ -344,6 +504,7 @@ func TestBasicConfigLegacy(t *testing.T) { assert := assert.New(t) conn, err := grpc.Dial("localhost:8083", grpc.WithInsecure()) + assert.NoError(err) defer conn.Close() c := pb_legacy.NewRateLimitServiceClient(conn) @@ -351,21 +512,51 @@ func TestBasicConfigLegacy(t *testing.T) { response, err := c.ShouldRateLimit( context.Background(), common.NewRateLimitRequestLegacy("foo", [][][2]string{{{"hello", "world"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OK, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, response) assert.NoError(err) + json_body := []byte(`{ + "domain": "basic", + "descriptors": [ + { + "entries": [ + { + "key": "one_per_minute" + } + ] + } + ] + }`) + http_resp, _ := http.Post("http://localhost:8082/json", "application/json", bytes.NewBuffer(json_body)) + assert.Equal(http_resp.StatusCode, 200) + body, _ := ioutil.ReadAll(http_resp.Body) + http_resp.Body.Close() + assert.Equal(`{"overallCode":"OK","statuses":[{"code":"OK","currentLimit":{"requestsPerUnit":1,"unit":"MINUTE"}}]}`, string(body)) + + http_resp, _ = http.Post("http://localhost:8082/json", "application/json", bytes.NewBuffer(json_body)) + assert.Equal(http_resp.StatusCode, 429) + body, _ = ioutil.ReadAll(http_resp.Body) + http_resp.Body.Close() + assert.Equal(`{"overallCode":"OVER_LIMIT","statuses":[{"code":"OVER_LIMIT","currentLimit":{"requestsPerUnit":1,"unit":"MINUTE"}}]}`, string(body)) + + invalid_json := []byte(`{"unclosed quote: []}`) + http_resp, _ = http.Post("http://localhost:8082/json", "application/json", bytes.NewBuffer(invalid_json)) + assert.Equal(http_resp.StatusCode, 400) + response, err = c.ShouldRateLimit( context.Background(), common.NewRateLimitRequestLegacy("basic_legacy", [][][2]string{{{"key1", "foo"}}}, 1)) - assert.Equal( + common.AssertProtoEqual( + assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OK, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimit_SECOND, 49)}}, + newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimitResponse_RateLimit_SECOND, 49)}}, response) assert.NoError(err) @@ -385,11 +576,12 @@ func TestBasicConfigLegacy(t *testing.T) { limitRemaining = 0 } - assert.Equal( + common.AssertProtoEqual( + assert, &pb_legacy.RateLimitResponse{ OverallCode: status, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimit_MINUTE, limitRemaining)}}, + newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimitResponse_RateLimit_MINUTE, limitRemaining)}}, response) assert.NoError(err) } @@ -413,13 +605,118 @@ func TestBasicConfigLegacy(t *testing.T) { limitRemaining2 = 0 } - assert.Equal( + common.AssertProtoEqual( + assert, &pb_legacy.RateLimitResponse{ OverallCode: status, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimit_MINUTE, limitRemaining1), - newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimit_HOUR, limitRemaining2)}}, + newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimitResponse_RateLimit_MINUTE, limitRemaining1), + newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}}, response) assert.NoError(err) } } + +func testConfigReload(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { + return func(t *testing.T) { + os.Setenv("REDIS_PERSECOND", perSecond) + os.Setenv("PORT", "8082") + os.Setenv("GRPC_PORT", grpcPort) + os.Setenv("DEBUG_PORT", "8084") + os.Setenv("RUNTIME_ROOT", "runtime/current") + os.Setenv("RUNTIME_SUBDIRECTORY", "ratelimit") + os.Setenv("REDIS_PERSECOND_SOCKET_TYPE", "tcp") + os.Setenv("REDIS_SOCKET_TYPE", "tcp") + os.Setenv("LOCAL_CACHE_SIZE_IN_BYTES", local_cache_size) + os.Setenv("USE_STATSD", "false") + + local_cache_size_val, _ := strconv.Atoi(local_cache_size) + enable_local_cache := local_cache_size_val > 0 + runner := runner.NewRunner() + + go func() { + runner.Run() + }() + + // HACK: Wait for the server to come up. Make a hook that we can wait on. + time.Sleep(1 * time.Second) + + assert := assert.New(t) + conn, err := grpc.Dial(fmt.Sprintf("localhost:%s", grpcPort), grpc.WithInsecure()) + assert.NoError(err) + defer conn.Close() + c := pb.NewRateLimitServiceClient(conn) + + response, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest("reload", [][][2]string{{{getCacheKey("block", enable_local_cache), "foo"}}}, 1)) + common.AssertProtoEqual( + assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK}}}, + response) + assert.NoError(err) + + runner.GetStatsStore().Flush() + loadCount1 := runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() + + // Copy a new file to config folder to test config reload functionality + in, err := os.Open("runtime/current/ratelimit/reload.yaml") + if err != nil { + panic(err) + } + defer in.Close() + out, err := os.Create("runtime/current/ratelimit/config/reload.yaml") + if err != nil { + panic(err) + } + defer out.Close() + _, err = io.Copy(out, in) + if err != nil { + panic(err) + } + err = out.Close() + if err != nil { + panic(err) + } + + // Need to wait for config reload to take place and new descriptors to be loaded. + // Shouldn't take more than 5 seconds but wait 120 at most just to be safe. + wait := 120 + reloaded := false + loadCount2 := uint64(0) + + for i := 0; i < wait; i++ { + time.Sleep(1 * time.Second) + runner.GetStatsStore().Flush() + loadCount2 = runner.GetStatsStore().NewCounter("ratelimit.service.config_load_success").Value() + + // Check that successful loads count has increased before continuing. + if loadCount2 > loadCount1 { + reloaded = true + break + } + } + + assert.True(reloaded) + assert.Greater(loadCount2, loadCount1) + + response, err = c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest("reload", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1)) + common.AssertProtoEqual( + assert, + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{ + newDescriptorStatus(pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49)}}, + response) + assert.NoError(err) + + err = os.Remove("runtime/current/ratelimit/config/reload.yaml") + if err != nil { + panic(err) + } + } +} diff --git a/test/integration/runtime/current/ratelimit/config/basic.yaml b/test/integration/runtime/current/ratelimit/config/basic.yaml index 41cd5a31a..843b98873 100644 --- a/test/integration/runtime/current/ratelimit/config/basic.yaml +++ b/test/integration/runtime/current/ratelimit/config/basic.yaml @@ -9,3 +9,8 @@ descriptors: rate_limit: unit: second requests_per_unit: 50 + + - key: one_per_minute + rate_limit: + unit: minute + requests_per_unit: 1 \ No newline at end of file diff --git a/test/integration/runtime/current/ratelimit/reload.yaml b/test/integration/runtime/current/ratelimit/reload.yaml new file mode 100644 index 000000000..5da29e52d --- /dev/null +++ b/test/integration/runtime/current/ratelimit/reload.yaml @@ -0,0 +1,16 @@ +domain: reload +descriptors: + - key: key1 + rate_limit: + unit: second + requests_per_unit: 50 + + - key: block + rate_limit: + unit: second + requests_per_unit: 0 + + - key: one_per_minute + rate_limit: + unit: minute + requests_per_unit: 1 diff --git a/test/mocks/config/config.go b/test/mocks/config/config.go index b58f42687..38d5b347b 100644 --- a/test/mocks/config/config.go +++ b/test/mocks/config/config.go @@ -1,15 +1,15 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/lyft/ratelimit/src/config (interfaces: RateLimitConfig,RateLimitConfigLoader) +// Source: github.com/envoyproxy/ratelimit/src/config (interfaces: RateLimitConfig,RateLimitConfigLoader) // Package mock_config is a generated GoMock package. package mock_config import ( context "context" - ratelimit "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + envoy_extensions_common_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" - gostats "github.com/lyft/gostats" - config "github.com/lyft/ratelimit/src/config" + stats "github.com/lyft/gostats" reflect "reflect" ) @@ -38,6 +38,7 @@ func (m *MockRateLimitConfig) EXPECT() *MockRateLimitConfigMockRecorder { // Dump mocks base method func (m *MockRateLimitConfig) Dump() string { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Dump") ret0, _ := ret[0].(string) return ret0 @@ -45,11 +46,13 @@ func (m *MockRateLimitConfig) Dump() string { // Dump indicates an expected call of Dump func (mr *MockRateLimitConfigMockRecorder) Dump() *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dump", reflect.TypeOf((*MockRateLimitConfig)(nil).Dump)) } // GetLimit mocks base method -func (m *MockRateLimitConfig) GetLimit(arg0 context.Context, arg1 string, arg2 *ratelimit.RateLimitDescriptor) *config.RateLimit { +func (m *MockRateLimitConfig) GetLimit(arg0 context.Context, arg1 string, arg2 *envoy_extensions_common_ratelimit_v3.RateLimitDescriptor) *config.RateLimit { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLimit", arg0, arg1, arg2) ret0, _ := ret[0].(*config.RateLimit) return ret0 @@ -57,6 +60,7 @@ func (m *MockRateLimitConfig) GetLimit(arg0 context.Context, arg1 string, arg2 * // GetLimit indicates an expected call of GetLimit func (mr *MockRateLimitConfigMockRecorder) GetLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLimit", reflect.TypeOf((*MockRateLimitConfig)(nil).GetLimit), arg0, arg1, arg2) } @@ -84,7 +88,8 @@ func (m *MockRateLimitConfigLoader) EXPECT() *MockRateLimitConfigLoaderMockRecor } // Load mocks base method -func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 gostats.Scope) config.RateLimitConfig { +func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, arg1 stats.Scope) config.RateLimitConfig { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Load", arg0, arg1) ret0, _ := ret[0].(config.RateLimitConfig) return ret0 @@ -92,5 +97,6 @@ func (m *MockRateLimitConfigLoader) Load(arg0 []config.RateLimitConfigToLoad, ar // Load indicates an expected call of Load func (mr *MockRateLimitConfigLoaderMockRecorder) Load(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockRateLimitConfigLoader)(nil).Load), arg0, arg1) } diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go new file mode 100644 index 000000000..7e9f3e5b3 --- /dev/null +++ b/test/mocks/limiter/limiter.go @@ -0,0 +1,136 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/envoyproxy/ratelimit/src/limiter (interfaces: RateLimitCache,TimeSource,JitterRandSource) + +// Package mock_limiter is a generated GoMock package. +package mock_limiter + +import ( + context "context" + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + config "github.com/envoyproxy/ratelimit/src/config" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockRateLimitCache is a mock of RateLimitCache interface +type MockRateLimitCache struct { + ctrl *gomock.Controller + recorder *MockRateLimitCacheMockRecorder +} + +// MockRateLimitCacheMockRecorder is the mock recorder for MockRateLimitCache +type MockRateLimitCacheMockRecorder struct { + mock *MockRateLimitCache +} + +// NewMockRateLimitCache creates a new mock instance +func NewMockRateLimitCache(ctrl *gomock.Controller) *MockRateLimitCache { + mock := &MockRateLimitCache{ctrl: ctrl} + mock.recorder = &MockRateLimitCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRateLimitCache) EXPECT() *MockRateLimitCacheMockRecorder { + return m.recorder +} + +// DoLimit mocks base method +func (m *MockRateLimitCache) DoLimit(arg0 context.Context, arg1 *envoy_service_ratelimit_v3.RateLimitRequest, arg2 []*config.RateLimit) []*envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DoLimit", arg0, arg1, arg2) + ret0, _ := ret[0].([]*envoy_service_ratelimit_v3.RateLimitResponse_DescriptorStatus) + return ret0 +} + +// DoLimit indicates an expected call of DoLimit +func (mr *MockRateLimitCacheMockRecorder) DoLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), arg0, arg1, arg2) +} + +// MockTimeSource is a mock of TimeSource interface +type MockTimeSource struct { + ctrl *gomock.Controller + recorder *MockTimeSourceMockRecorder +} + +// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource +type MockTimeSourceMockRecorder struct { + mock *MockTimeSource +} + +// NewMockTimeSource creates a new mock instance +func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { + mock := &MockTimeSource{ctrl: ctrl} + mock.recorder = &MockTimeSourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { + return m.recorder +} + +// UnixNow mocks base method +func (m *MockTimeSource) UnixNow() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnixNow") + ret0, _ := ret[0].(int64) + return ret0 +} + +// UnixNow indicates an expected call of UnixNow +func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) +} + +// MockJitterRandSource is a mock of JitterRandSource interface +type MockJitterRandSource struct { + ctrl *gomock.Controller + recorder *MockJitterRandSourceMockRecorder +} + +// MockJitterRandSourceMockRecorder is the mock recorder for MockJitterRandSource +type MockJitterRandSourceMockRecorder struct { + mock *MockJitterRandSource +} + +// NewMockJitterRandSource creates a new mock instance +func NewMockJitterRandSource(ctrl *gomock.Controller) *MockJitterRandSource { + mock := &MockJitterRandSource{ctrl: ctrl} + mock.recorder = &MockJitterRandSourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockJitterRandSource) EXPECT() *MockJitterRandSourceMockRecorder { + return m.recorder +} + +// Int63 mocks base method +func (m *MockJitterRandSource) Int63() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Int63") + ret0, _ := ret[0].(int64) + return ret0 +} + +// Int63 indicates an expected call of Int63 +func (mr *MockJitterRandSourceMockRecorder) Int63() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int63", reflect.TypeOf((*MockJitterRandSource)(nil).Int63)) +} + +// Seed mocks base method +func (m *MockJitterRandSource) Seed(arg0 int64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Seed", arg0) +} + +// Seed indicates an expected call of Seed +func (mr *MockJitterRandSourceMockRecorder) Seed(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seed", reflect.TypeOf((*MockJitterRandSource)(nil).Seed), arg0) +} diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go index 9b224c88b..9f8b18cec 100644 --- a/test/mocks/mocks.go +++ b/test/mocks/mocks.go @@ -1,6 +1,8 @@ package mocks -//go:generate mockgen -destination ./runtime/snapshot/snapshot.go github.com/lyft/goruntime/snapshot IFace -//go:generate mockgen -destination ./runtime/loader/loader.go github.com/lyft/goruntime/loader IFace -//go:generate mockgen -destination ./config/config.go github.com/lyft/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader -//go:generate mockgen -destination ./redis/redis.go github.com/lyft/ratelimit/src/redis RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource +//go:generate go run github.com/golang/mock/mockgen -destination ./runtime/snapshot/snapshot.go github.com/lyft/goruntime/snapshot IFace +//go:generate go run github.com/golang/mock/mockgen -destination ./runtime/loader/loader.go github.com/lyft/goruntime/loader IFace +//go:generate go run github.com/golang/mock/mockgen -destination ./config/config.go github.com/envoyproxy/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader +//go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis Client +//go:generate go run github.com/golang/mock/mockgen -destination ./limiter/limiter.go github.com/envoyproxy/ratelimit/src/limiter RateLimitCache,TimeSource,JitterRandSource +//go:generate go run github.com/golang/mock/mockgen -destination ./rls/rls.go github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3 RateLimitServiceServer diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/redis.go index ad8f4ec96..032b500dc 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/redis.go @@ -1,227 +1,128 @@ -// Automatically generated by MockGen. DO NOT EDIT! -// Source: github.com/lyft/ratelimit/src/redis (interfaces: RateLimitCache,Pool,Connection,Response,TimeSource,JitterRandSource) +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/envoyproxy/ratelimit/src/redis (interfaces: Client) +// Package mock_redis is a generated GoMock package. package mock_redis import ( - ratelimit "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + redis "github.com/envoyproxy/ratelimit/src/redis" gomock "github.com/golang/mock/gomock" - config "github.com/lyft/ratelimit/src/config" - redis "github.com/lyft/ratelimit/src/redis" - context "golang.org/x/net/context" + reflect "reflect" ) -// Mock of RateLimitCache interface -type MockRateLimitCache struct { +// MockClient is a mock of Client interface +type MockClient struct { ctrl *gomock.Controller - recorder *_MockRateLimitCacheRecorder + recorder *MockClientMockRecorder } -// Recorder for MockRateLimitCache (not exported) -type _MockRateLimitCacheRecorder struct { - mock *MockRateLimitCache +// MockClientMockRecorder is the mock recorder for MockClient +type MockClientMockRecorder struct { + mock *MockClient } -func NewMockRateLimitCache(ctrl *gomock.Controller) *MockRateLimitCache { - mock := &MockRateLimitCache{ctrl: ctrl} - mock.recorder = &_MockRateLimitCacheRecorder{mock} +// NewMockClient creates a new mock instance +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} return mock } -func (_m *MockRateLimitCache) EXPECT() *_MockRateLimitCacheRecorder { - return _m.recorder +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder } -func (_m *MockRateLimitCache) DoLimit(_param0 context.Context, _param1 *ratelimit.RateLimitRequest, _param2 []*config.RateLimit) []*ratelimit.RateLimitResponse_DescriptorStatus { - ret := _m.ctrl.Call(_m, "DoLimit", _param0, _param1, _param2) - ret0, _ := ret[0].([]*ratelimit.RateLimitResponse_DescriptorStatus) +// Close mocks base method +func (m *MockClient) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) return ret0 } -func (_mr *_MockRateLimitCacheRecorder) DoLimit(arg0, arg1, arg2 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "DoLimit", arg0, arg1, arg2) +// Close indicates an expected call of Close +func (mr *MockClientMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClient)(nil).Close)) } -// Mock of Pool interface -type MockPool struct { - ctrl *gomock.Controller - recorder *_MockPoolRecorder -} - -// Recorder for MockPool (not exported) -type _MockPoolRecorder struct { - mock *MockPool -} - -func NewMockPool(ctrl *gomock.Controller) *MockPool { - mock := &MockPool{ctrl: ctrl} - mock.recorder = &_MockPoolRecorder{mock} - return mock -} - -func (_m *MockPool) EXPECT() *_MockPoolRecorder { - return _m.recorder -} - -func (_m *MockPool) Get() redis.Connection { - ret := _m.ctrl.Call(_m, "Get") - ret0, _ := ret[0].(redis.Connection) - return ret0 -} - -func (_mr *_MockPoolRecorder) Get() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Get") -} - -func (_m *MockPool) Put(_param0 redis.Connection) { - _m.ctrl.Call(_m, "Put", _param0) -} - -func (_mr *_MockPoolRecorder) Put(arg0 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Put", arg0) -} - -// Mock of Connection interface -type MockConnection struct { - ctrl *gomock.Controller - recorder *_MockConnectionRecorder -} - -// Recorder for MockConnection (not exported) -type _MockConnectionRecorder struct { - mock *MockConnection -} - -func NewMockConnection(ctrl *gomock.Controller) *MockConnection { - mock := &MockConnection{ctrl: ctrl} - mock.recorder = &_MockConnectionRecorder{mock} - return mock -} - -func (_m *MockConnection) EXPECT() *_MockConnectionRecorder { - return _m.recorder -} - -func (_m *MockConnection) PipeAppend(_param0 string, _param1 ...interface{}) { - _s := []interface{}{_param0} - for _, _x := range _param1 { - _s = append(_s, _x) +// DoCmd mocks base method +func (m *MockClient) DoCmd(arg0 interface{}, arg1, arg2 string, arg3 ...interface{}) error { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) } - _m.ctrl.Call(_m, "PipeAppend", _s...) -} - -func (_mr *_MockConnectionRecorder) PipeAppend(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0}, arg1...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "PipeAppend", _s...) -} - -func (_m *MockConnection) PipeResponse() redis.Response { - ret := _m.ctrl.Call(_m, "PipeResponse") - ret0, _ := ret[0].(redis.Response) + ret := m.ctrl.Call(m, "DoCmd", varargs...) + ret0, _ := ret[0].(error) return ret0 } -func (_mr *_MockConnectionRecorder) PipeResponse() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "PipeResponse") +// DoCmd indicates an expected call of DoCmd +func (mr *MockClientMockRecorder) DoCmd(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoCmd", reflect.TypeOf((*MockClient)(nil).DoCmd), varargs...) } -// Mock of Response interface -type MockResponse struct { - ctrl *gomock.Controller - recorder *_MockResponseRecorder -} - -// Recorder for MockResponse (not exported) -type _MockResponseRecorder struct { - mock *MockResponse -} - -func NewMockResponse(ctrl *gomock.Controller) *MockResponse { - mock := &MockResponse{ctrl: ctrl} - mock.recorder = &_MockResponseRecorder{mock} - return mock -} - -func (_m *MockResponse) EXPECT() *_MockResponseRecorder { - return _m.recorder -} - -func (_m *MockResponse) Int() int64 { - ret := _m.ctrl.Call(_m, "Int") - ret0, _ := ret[0].(int64) +// ImplicitPipeliningEnabled mocks base method +func (m *MockClient) ImplicitPipeliningEnabled() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ImplicitPipeliningEnabled") + ret0, _ := ret[0].(bool) return ret0 } -func (_mr *_MockResponseRecorder) Int() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Int") -} - -// Mock of TimeSource interface -type MockTimeSource struct { - ctrl *gomock.Controller - recorder *_MockTimeSourceRecorder -} - -// Recorder for MockTimeSource (not exported) -type _MockTimeSourceRecorder struct { - mock *MockTimeSource -} - -func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { - mock := &MockTimeSource{ctrl: ctrl} - mock.recorder = &_MockTimeSourceRecorder{mock} - return mock -} - -func (_m *MockTimeSource) EXPECT() *_MockTimeSourceRecorder { - return _m.recorder +// ImplicitPipeliningEnabled indicates an expected call of ImplicitPipeliningEnabled +func (mr *MockClientMockRecorder) ImplicitPipeliningEnabled() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImplicitPipeliningEnabled", reflect.TypeOf((*MockClient)(nil).ImplicitPipeliningEnabled)) } -func (_m *MockTimeSource) UnixNow() int64 { - ret := _m.ctrl.Call(_m, "UnixNow") - ret0, _ := ret[0].(int64) +// NumActiveConns mocks base method +func (m *MockClient) NumActiveConns() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NumActiveConns") + ret0, _ := ret[0].(int) return ret0 } -func (_mr *_MockTimeSourceRecorder) UnixNow() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "UnixNow") -} - -// Mock of JitterRandSource interface -type MockJitterRandSource struct { - ctrl *gomock.Controller - recorder *_MockJitterRandSourceRecorder -} - -// Recorder for MockJitterRandSource (not exported) -type _MockJitterRandSourceRecorder struct { - mock *MockJitterRandSource -} - -func NewMockJitterRandSource(ctrl *gomock.Controller) *MockJitterRandSource { - mock := &MockJitterRandSource{ctrl: ctrl} - mock.recorder = &_MockJitterRandSourceRecorder{mock} - return mock -} - -func (_m *MockJitterRandSource) EXPECT() *_MockJitterRandSourceRecorder { - return _m.recorder +// NumActiveConns indicates an expected call of NumActiveConns +func (mr *MockClientMockRecorder) NumActiveConns() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveConns", reflect.TypeOf((*MockClient)(nil).NumActiveConns)) } -func (_m *MockJitterRandSource) Int63() int64 { - ret := _m.ctrl.Call(_m, "Int63") - ret0, _ := ret[0].(int64) +// PipeAppend mocks base method +func (m *MockClient) PipeAppend(arg0 redis.Pipeline, arg1 interface{}, arg2, arg3 string, arg4 ...interface{}) redis.Pipeline { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1, arg2, arg3} + for _, a := range arg4 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PipeAppend", varargs...) + ret0, _ := ret[0].(redis.Pipeline) return ret0 } -func (_mr *_MockJitterRandSourceRecorder) Int63() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Int63") +// PipeAppend indicates an expected call of PipeAppend +func (mr *MockClientMockRecorder) PipeAppend(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PipeAppend", reflect.TypeOf((*MockClient)(nil).PipeAppend), varargs...) } -func (_m *MockJitterRandSource) Seed(_param0 int64) { - _m.ctrl.Call(_m, "Seed", _param0) +// PipeDo mocks base method +func (m *MockClient) PipeDo(arg0 redis.Pipeline) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PipeDo", arg0) + ret0, _ := ret[0].(error) + return ret0 } -func (_mr *_MockJitterRandSourceRecorder) Seed(arg0 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Seed", arg0) +// PipeDo indicates an expected call of PipeDo +func (mr *MockClientMockRecorder) PipeDo(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PipeDo", reflect.TypeOf((*MockClient)(nil).PipeDo), arg0) } diff --git a/test/mocks/rls/rls.go b/test/mocks/rls/rls.go new file mode 100644 index 000000000..92d79b9ab --- /dev/null +++ b/test/mocks/rls/rls.go @@ -0,0 +1,50 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3 (interfaces: RateLimitServiceServer) + +// Package mock_v3 is a generated GoMock package. +package mock_v3 + +import ( + context "context" + envoy_service_ratelimit_v3 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockRateLimitServiceServer is a mock of RateLimitServiceServer interface +type MockRateLimitServiceServer struct { + ctrl *gomock.Controller + recorder *MockRateLimitServiceServerMockRecorder +} + +// MockRateLimitServiceServerMockRecorder is the mock recorder for MockRateLimitServiceServer +type MockRateLimitServiceServerMockRecorder struct { + mock *MockRateLimitServiceServer +} + +// NewMockRateLimitServiceServer creates a new mock instance +func NewMockRateLimitServiceServer(ctrl *gomock.Controller) *MockRateLimitServiceServer { + mock := &MockRateLimitServiceServer{ctrl: ctrl} + mock.recorder = &MockRateLimitServiceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRateLimitServiceServer) EXPECT() *MockRateLimitServiceServerMockRecorder { + return m.recorder +} + +// ShouldRateLimit mocks base method +func (m *MockRateLimitServiceServer) ShouldRateLimit(arg0 context.Context, arg1 *envoy_service_ratelimit_v3.RateLimitRequest) (*envoy_service_ratelimit_v3.RateLimitResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShouldRateLimit", arg0, arg1) + ret0, _ := ret[0].(*envoy_service_ratelimit_v3.RateLimitResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ShouldRateLimit indicates an expected call of ShouldRateLimit +func (mr *MockRateLimitServiceServerMockRecorder) ShouldRateLimit(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldRateLimit", reflect.TypeOf((*MockRateLimitServiceServer)(nil).ShouldRateLimit), arg0, arg1) +} diff --git a/test/mocks/runtime/loader/loader.go b/test/mocks/runtime/loader/loader.go index 16bbc1099..da00c6498 100644 --- a/test/mocks/runtime/loader/loader.go +++ b/test/mocks/runtime/loader/loader.go @@ -35,16 +35,19 @@ func (m *MockIFace) EXPECT() *MockIFaceMockRecorder { // AddUpdateCallback mocks base method func (m *MockIFace) AddUpdateCallback(arg0 chan<- int) { + m.ctrl.T.Helper() m.ctrl.Call(m, "AddUpdateCallback", arg0) } // AddUpdateCallback indicates an expected call of AddUpdateCallback func (mr *MockIFaceMockRecorder) AddUpdateCallback(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUpdateCallback", reflect.TypeOf((*MockIFace)(nil).AddUpdateCallback), arg0) } // Snapshot mocks base method func (m *MockIFace) Snapshot() snapshot.IFace { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Snapshot") ret0, _ := ret[0].(snapshot.IFace) return ret0 @@ -52,5 +55,6 @@ func (m *MockIFace) Snapshot() snapshot.IFace { // Snapshot indicates an expected call of Snapshot func (mr *MockIFaceMockRecorder) Snapshot() *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockIFace)(nil).Snapshot)) } diff --git a/test/mocks/runtime/snapshot/snapshot.go b/test/mocks/runtime/snapshot/snapshot.go index 432e34693..a56fe5a5b 100644 --- a/test/mocks/runtime/snapshot/snapshot.go +++ b/test/mocks/runtime/snapshot/snapshot.go @@ -36,6 +36,7 @@ func (m *MockIFace) EXPECT() *MockIFaceMockRecorder { // Entries mocks base method func (m *MockIFace) Entries() map[string]*entry.Entry { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Entries") ret0, _ := ret[0].(map[string]*entry.Entry) return ret0 @@ -43,11 +44,13 @@ func (m *MockIFace) Entries() map[string]*entry.Entry { // Entries indicates an expected call of Entries func (mr *MockIFaceMockRecorder) Entries() *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Entries", reflect.TypeOf((*MockIFace)(nil).Entries)) } // FeatureEnabled mocks base method func (m *MockIFace) FeatureEnabled(arg0 string, arg1 uint64) bool { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FeatureEnabled", arg0, arg1) ret0, _ := ret[0].(bool) return ret0 @@ -55,11 +58,13 @@ func (m *MockIFace) FeatureEnabled(arg0 string, arg1 uint64) bool { // FeatureEnabled indicates an expected call of FeatureEnabled func (mr *MockIFaceMockRecorder) FeatureEnabled(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FeatureEnabled", reflect.TypeOf((*MockIFace)(nil).FeatureEnabled), arg0, arg1) } // FeatureEnabledForID mocks base method func (m *MockIFace) FeatureEnabledForID(arg0 string, arg1 uint64, arg2 uint32) bool { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FeatureEnabledForID", arg0, arg1, arg2) ret0, _ := ret[0].(bool) return ret0 @@ -67,11 +72,13 @@ func (m *MockIFace) FeatureEnabledForID(arg0 string, arg1 uint64, arg2 uint32) b // FeatureEnabledForID indicates an expected call of FeatureEnabledForID func (mr *MockIFaceMockRecorder) FeatureEnabledForID(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FeatureEnabledForID", reflect.TypeOf((*MockIFace)(nil).FeatureEnabledForID), arg0, arg1, arg2) } // Get mocks base method func (m *MockIFace) Get(arg0 string) string { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0) ret0, _ := ret[0].(string) return ret0 @@ -79,11 +86,13 @@ func (m *MockIFace) Get(arg0 string) string { // Get indicates an expected call of Get func (mr *MockIFaceMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockIFace)(nil).Get), arg0) } // GetInteger mocks base method func (m *MockIFace) GetInteger(arg0 string, arg1 uint64) uint64 { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetInteger", arg0, arg1) ret0, _ := ret[0].(uint64) return ret0 @@ -91,11 +100,13 @@ func (m *MockIFace) GetInteger(arg0 string, arg1 uint64) uint64 { // GetInteger indicates an expected call of GetInteger func (mr *MockIFaceMockRecorder) GetInteger(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInteger", reflect.TypeOf((*MockIFace)(nil).GetInteger), arg0, arg1) } // GetModified mocks base method func (m *MockIFace) GetModified(arg0 string) time.Time { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetModified", arg0) ret0, _ := ret[0].(time.Time) return ret0 @@ -103,11 +114,13 @@ func (m *MockIFace) GetModified(arg0 string) time.Time { // GetModified indicates an expected call of GetModified func (mr *MockIFaceMockRecorder) GetModified(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetModified", reflect.TypeOf((*MockIFace)(nil).GetModified), arg0) } // Keys mocks base method func (m *MockIFace) Keys() []string { + m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Keys") ret0, _ := ret[0].([]string) return ret0 @@ -115,15 +128,18 @@ func (m *MockIFace) Keys() []string { // Keys indicates an expected call of Keys func (mr *MockIFaceMockRecorder) Keys() *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Keys", reflect.TypeOf((*MockIFace)(nil).Keys)) } // SetEntry mocks base method func (m *MockIFace) SetEntry(arg0 string, arg1 *entry.Entry) { + m.ctrl.T.Helper() m.ctrl.Call(m, "SetEntry", arg0, arg1) } // SetEntry indicates an expected call of SetEntry func (mr *MockIFaceMockRecorder) SetEntry(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEntry", reflect.TypeOf((*MockIFace)(nil).SetEntry), arg0, arg1) } diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go new file mode 100644 index 000000000..ac40631e8 --- /dev/null +++ b/test/redis/bench_test.go @@ -0,0 +1,94 @@ +package redis_test + +import ( + "context" + "runtime" + "testing" + "time" + + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/redis" + stats "github.com/lyft/gostats" + + "math/rand" + + "github.com/envoyproxy/ratelimit/test/common" +) + +func BenchmarkParallelDoLimit(b *testing.B) { + b.Skip("Skip benchmark") + + b.ReportAllocs() + + // See https://github.com/mediocregopher/radix/blob/v3.5.1/bench/bench_test.go#L176 + parallel := runtime.GOMAXPROCS(0) + poolSize := parallel * runtime.GOMAXPROCS(0) + + do := func(b *testing.B, fn func() error) { + b.ResetTimer() + b.SetParallelism(parallel) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := fn(); err != nil { + b.Fatal(err) + } + } + }) + } + + mkDoLimitBench := func(pipelineWindow time.Duration, pipelineLimit int) func(*testing.B) { + return func(b *testing.B) { + statsStore := stats.NewStore(stats.NewNullSink(), false) + client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) + defer client.Close() + + cache := redis.NewRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil) + request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) + limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} + + // wait for the pool to fill up + for { + time.Sleep(50 * time.Millisecond) + if client.NumActiveConns() >= poolSize { + break + } + } + + b.ResetTimer() + + do(b, func() error { + cache.DoLimit(context.Background(), request, limits) + return nil + }) + } + } + + b.Run("no pipeline", mkDoLimitBench(0, 0)) + + b.Run("pipeline 35us 1", mkDoLimitBench(35*time.Microsecond, 1)) + b.Run("pipeline 75us 1", mkDoLimitBench(75*time.Microsecond, 1)) + b.Run("pipeline 150us 1", mkDoLimitBench(150*time.Microsecond, 1)) + b.Run("pipeline 300us 1", mkDoLimitBench(300*time.Microsecond, 1)) + + b.Run("pipeline 35us 2", mkDoLimitBench(35*time.Microsecond, 2)) + b.Run("pipeline 75us 2", mkDoLimitBench(75*time.Microsecond, 2)) + b.Run("pipeline 150us 2", mkDoLimitBench(150*time.Microsecond, 2)) + b.Run("pipeline 300us 2", mkDoLimitBench(300*time.Microsecond, 2)) + + b.Run("pipeline 35us 4", mkDoLimitBench(35*time.Microsecond, 4)) + b.Run("pipeline 75us 4", mkDoLimitBench(75*time.Microsecond, 4)) + b.Run("pipeline 150us 4", mkDoLimitBench(150*time.Microsecond, 4)) + b.Run("pipeline 300us 4", mkDoLimitBench(300*time.Microsecond, 4)) + + b.Run("pipeline 35us 8", mkDoLimitBench(35*time.Microsecond, 8)) + b.Run("pipeline 75us 8", mkDoLimitBench(75*time.Microsecond, 8)) + b.Run("pipeline 150us 8", mkDoLimitBench(150*time.Microsecond, 8)) + b.Run("pipeline 300us 8", mkDoLimitBench(300*time.Microsecond, 8)) + + b.Run("pipeline 35us 16", mkDoLimitBench(35*time.Microsecond, 16)) + b.Run("pipeline 75us 16", mkDoLimitBench(75*time.Microsecond, 16)) + b.Run("pipeline 150us 16", mkDoLimitBench(150*time.Microsecond, 16)) + b.Run("pipeline 300us 16", mkDoLimitBench(300*time.Microsecond, 16)) +} diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index 57581f38d..6b9a4d631 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -2,19 +2,24 @@ package redis_test import ( "testing" + "time" "github.com/coocood/freecache" + "github.com/mediocregopher/radix/v3" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/redis" stats "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" "math/rand" + "github.com/alicebob/miniredis/v2" + "github.com/envoyproxy/ratelimit/test/common" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" + mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" "github.com/golang/mock/gomock" - "github.com/lyft/ratelimit/test/common" - mock_redis "github.com/lyft/ratelimit/test/mocks/redis" "github.com/stretchr/testify/assert" ) @@ -23,46 +28,38 @@ func TestRedis(t *testing.T) { t.Run("WithPerSecondRedis", testRedis(true)) } +func pipeAppend(pipeline redis.Pipeline, rcv interface{}, cmd, key string, args ...interface{}) redis.Pipeline { + return append(pipeline, radix.FlatCmd(rcv, cmd, key, args...)) +} + func testRedis(usePerSecondRedis bool) func(*testing.T) { return func(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) defer controller.Finish() - pool := mock_redis.NewMockPool(controller) - perSecondPool := mock_redis.NewMockPool(controller) - timeSource := mock_redis.NewMockTimeSource(controller) - connection := mock_redis.NewMockConnection(controller) - perSecondConnection := mock_redis.NewMockConnection(controller) - response := mock_redis.NewMockResponse(controller) - var cache redis.RateLimitCache + client := mock_redis.NewMockClient(controller) + perSecondClient := mock_redis.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + var cache limiter.RateLimitCache if usePerSecondRedis { - cache = redis.NewRateLimitCacheImpl(pool, perSecondPool, timeSource, rand.New(rand.NewSource(1)), 0, nil) + cache = redis.NewRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil) } else { - cache = redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) + cache = redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) } statsStore := stats.NewStore(stats.NewNullSink(), false) - if usePerSecondRedis { - perSecondPool.EXPECT().Get().Return(perSecondConnection) - } - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - var connUsed *mock_redis.MockConnection + var clientUsed *mock_redis.MockClient if usePerSecondRedis { - connUsed = perSecondConnection + clientUsed = perSecondClient } else { - connUsed = connection + clientUsed = client } - connUsed.EXPECT().PipeAppend("INCRBY", "domain_key_value_1234", uint32(1)) - connUsed.EXPECT().PipeAppend("EXPIRE", "domain_key_value_1234", int64(1)) - connUsed.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(5)) - connUsed.EXPECT().PipeResponse() - if usePerSecondRedis { - perSecondPool.EXPECT().Put(perSecondConnection) - } - pool.EXPECT().Put(connection) + + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(1)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -74,21 +71,12 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) - if usePerSecondRedis { - perSecondPool.EXPECT().Get().Return(perSecondConnection) - } - pool.EXPECT().Get().Return(connection) + clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint32(1)) - connection.EXPECT().PipeAppend( - "EXPIRE", "domain_key2_value2_subkey2_subvalue2_1200", int64(60)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(11)) - connection.EXPECT().PipeResponse() - if usePerSecondRedis { - perSecondPool.EXPECT().Put(perSecondConnection) - } - pool.EXPECT().Put(connection) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key2_value2_subkey2_subvalue2_1200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key2_value2_subkey2_subvalue2_1200", int64(60)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest( "domain", @@ -107,27 +95,15 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { assert.Equal(uint64(1), limits[1].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[1].Stats.NearLimit.Value()) - if usePerSecondRedis { - perSecondPool.EXPECT().Get().Return(perSecondConnection) - } - pool.EXPECT().Get().Return(connection) + clientUsed = client timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key3_value3_997200", uint32(1)) - connection.EXPECT().PipeAppend( - "EXPIRE", "domain_key3_value3_997200", int64(3600)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint32(1)) - connection.EXPECT().PipeAppend( - "EXPIRE", "domain_key3_value3_subkey3_subvalue3_950400", int64(86400)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(11)) - connection.EXPECT().PipeResponse() - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(13)) - connection.EXPECT().PipeResponse() - if usePerSecondRedis { - perSecondPool.EXPECT().Put(perSecondConnection) - } - pool.EXPECT().Put(connection) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key3_value3_997200", int64(3600)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key3_value3_subkey3_subvalue3_950400", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key3_value3_subkey3_subvalue3_950400", int64(86400)).DoAndReturn(pipeAppend) + clientUsed.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest( "domain", @@ -193,26 +169,20 @@ func TestOverLimitWithLocalCache(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - pool := mock_redis.NewMockPool(controller) - timeSource := mock_redis.NewMockTimeSource(controller) - connection := mock_redis.NewMockConnection(controller) - response := mock_redis.NewMockResponse(controller) + client := mock_redis.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) localCache := freecache.NewCache(100) - cache := redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache) + cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) - localCacheStats := redis.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( - "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(11)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) @@ -232,15 +202,11 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) // Test Near Limit Stats. At Near Limit Ratio, still OK - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( - "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(13)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -255,15 +221,11 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) // Test Over limit stats - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( - "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(16)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -278,14 +240,10 @@ func TestOverLimitWithLocalCache(t *testing.T) { testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) // Test Over limit stats with local cache - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) - connection.EXPECT().PipeAppend( + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).Times(0) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key4_value4_997200", int64(3600)).Times(0) - connection.EXPECT().PipeResponse().Times(0) - response.EXPECT().Int().Times(0) - pool.EXPECT().Put(connection) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}}, @@ -304,23 +262,17 @@ func TestNearLimit(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - pool := mock_redis.NewMockPool(controller) - timeSource := mock_redis.NewMockTimeSource(controller) - connection := mock_redis.NewMockConnection(controller) - response := mock_redis.NewMockResponse(controller) - cache := redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) + client := mock_redis.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) statsStore := stats.NewStore(stats.NewNullSink(), false) // Test Near Limit Stats. Under Near Limit Ratio - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( - "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(11)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(11)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key4", "value4"}}}, 1) @@ -336,15 +288,11 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // Test Near Limit Stats. At Near Limit Ratio, still OK - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( - "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(13)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(13)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -356,15 +304,11 @@ func TestNearLimit(t *testing.T) { // Test Near Limit Stats. We went OVER_LIMIT, but the near_limit counter only increases // when we are near limit, not after we have passed the limit. - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key4_value4_997200", uint32(1)) - connection.EXPECT().PipeAppend( - "EXPIRE", "domain_key4_value4_997200", int64(3600)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(16)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key4_value4_997200", uint32(1)).SetArg(1, uint32(16)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), + "EXPIRE", "domain_key4_value4_997200", int64(3600)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) assert.Equal( []*pb.RateLimitResponse_DescriptorStatus{ @@ -376,14 +320,10 @@ func TestNearLimit(t *testing.T) { // Now test hitsAddend that is greater than 1 // All of it under limit, under near limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key5_value5_1234", uint32(3)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key5_value5_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(5)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key5_value5_1234", uint32(3)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key5_value5_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key5", "value5"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key5_value5", statsStore)} @@ -396,14 +336,10 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) // All of it under limit, some over near limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key6_value6_1234", uint32(2)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key6_value6_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(7)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key6_value6_1234", uint32(2)).SetArg(1, uint32(7)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key6_value6_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key6", "value6"}}}, 2) limits = []*config.RateLimit{config.NewRateLimit(8, pb.RateLimitResponse_RateLimit_SECOND, "key6_value6", statsStore)} @@ -416,14 +352,10 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) // All of it under limit, all of it over near limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key7_value7_1234", uint32(3)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key7_value7_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(19)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key7_value7_1234", uint32(3)).SetArg(1, uint32(19)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key7_value7_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key7", "value7"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key7_value7", statsStore)} @@ -436,14 +368,10 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(3), limits[0].Stats.NearLimit.Value()) // Some of it over limit, all of it over near limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key8_value8_1234", uint32(3)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key8_value8_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(22)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key8_value8_1234", uint32(3)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key8_value8_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key8", "value8"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key8_value8", statsStore)} @@ -456,14 +384,10 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) // Some of it in all three places - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key9_value9_1234", uint32(7)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key9_value9_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(22)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key9_value9_1234", uint32(7)).SetArg(1, uint32(22)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key9_value9_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key9", "value9"}}}, 7) limits = []*config.RateLimit{config.NewRateLimit(20, pb.RateLimitResponse_RateLimit_SECOND, "key9_value9", statsStore)} @@ -476,14 +400,10 @@ func TestNearLimit(t *testing.T) { assert.Equal(uint64(4), limits[0].Stats.NearLimit.Value()) // all of it over limit - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key10_value10_1234", uint32(3)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key10_value10_1234", int64(1)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(30)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key10_value10_1234", uint32(3)).SetArg(1, uint32(30)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key10_value10_1234", int64(1)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request = common.NewRateLimitRequest("domain", [][][2]string{{{"key10", "value10"}}}, 3) limits = []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key10_value10", statsStore)} @@ -501,23 +421,17 @@ func TestRedisWithJitter(t *testing.T) { controller := gomock.NewController(t) defer controller.Finish() - pool := mock_redis.NewMockPool(controller) - timeSource := mock_redis.NewMockTimeSource(controller) - connection := mock_redis.NewMockConnection(controller) - response := mock_redis.NewMockResponse(controller) - jitterSource := mock_redis.NewMockJitterRandSource(controller) - cache := redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(jitterSource), 3600, nil) + client := mock_redis.NewMockClient(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + jitterSource := mock_limiter.NewMockJitterRandSource(controller) + cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil) statsStore := stats.NewStore(stats.NewNullSink(), false) - pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1234)) jitterSource.EXPECT().Int63().Return(int64(100)) - connection.EXPECT().PipeAppend("INCRBY", "domain_key_value_1234", uint32(1)) - connection.EXPECT().PipeAppend("EXPIRE", "domain_key_value_1234", int64(101)) - connection.EXPECT().PipeResponse().Return(response) - response.EXPECT().Int().Return(int64(5)) - connection.EXPECT().PipeResponse() - pool.EXPECT().Put(connection) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "INCRBY", "domain_key_value_1234", uint32(1)).SetArg(1, uint32(5)).DoAndReturn(pipeAppend) + client.EXPECT().PipeAppend(gomock.Any(), gomock.Any(), "EXPIRE", "domain_key_value_1234", int64(101)).DoAndReturn(pipeAppend) + client.EXPECT().PipeDo(gomock.Any()).Return(nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} @@ -529,3 +443,197 @@ func TestRedisWithJitter(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) } + +func mustNewRedisServer() *miniredis.Miniredis { + srv, err := miniredis.Run() + if err != nil { + panic(err) + } + + return srv +} + +func expectPanicError(t *testing.T, f assert.PanicTestFunc) (result error) { + t.Helper() + defer func() { + panicResult := recover() + assert.NotNil(t, panicResult, "Expected a panic") + result = panicResult.(error) + }() + f() + return +} + +func testNewClientImpl(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) func(t *testing.T) { + return func(t *testing.T) { + redisAuth := "123" + statsStore := stats.NewStore(stats.NewNullSink(), false) + + mkRedisClient := func(auth, addr string) redis.Client { + return redis.NewClientImpl(statsStore, false, auth, "single", addr, 1, pipelineWindow, pipelineLimit) + } + + t.Run("connection refused", func(t *testing.T) { + // It's possible there is a redis server listening on 6379 in ci environment, so + // use a random port. + panicErr := expectPanicError(t, func() { mkRedisClient("", "localhost:12345") }) + assert.Contains(t, panicErr.Error(), "connection refused") + }) + + t.Run("ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + var client redis.Client + assert.NotPanics(t, func() { + client = mkRedisClient("", redisSrv.Addr()) + }) + assert.NotNil(t, client) + }) + + t.Run("auth fail", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + redisSrv.RequireAuth(redisAuth) + + assert.PanicsWithError(t, "NOAUTH Authentication required.", func() { + mkRedisClient("", redisSrv.Addr()) + }) + }) + + t.Run("auth pass", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + redisSrv.RequireAuth(redisAuth) + + assert.NotPanics(t, func() { + mkRedisClient(redisAuth, redisSrv.Addr()) + }) + }) + + t.Run("ImplicitPipeliningEnabled() return expected value", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient("", redisSrv.Addr()) + + if pipelineWindow == 0 && pipelineLimit == 0 { + assert.False(t, client.ImplicitPipeliningEnabled()) + } else { + assert.True(t, client.ImplicitPipeliningEnabled()) + } + }) + } +} + +func TestNewClientImpl(t *testing.T) { + t.Run("ImplicitPipeliningEnabled", testNewClientImpl(t, 2*time.Millisecond, 2)) + t.Run("ImplicitPipeliningDisabled", testNewClientImpl(t, 0, 0)) +} + +func TestDoCmd(t *testing.T) { + statsStore := stats.NewStore(stats.NewNullSink(), false) + + mkRedisClient := func(addr string) redis.Client { + return redis.NewClientImpl(statsStore, false, "", "single", addr, 1, 0, 0) + } + + t.Run("SETGET ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient(redisSrv.Addr()) + var res string + + assert.Nil(t, client.DoCmd(nil, "SET", "foo", "bar")) + assert.Nil(t, client.DoCmd(&res, "GET", "foo")) + assert.Equal(t, "bar", res) + }) + + t.Run("INCRBY ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient(redisSrv.Addr()) + var res uint32 + hits := uint32(1) + + assert.Nil(t, client.DoCmd(&res, "INCRBY", "a", hits)) + assert.Equal(t, hits, res) + assert.Nil(t, client.DoCmd(&res, "INCRBY", "a", hits)) + assert.Equal(t, uint32(2), res) + }) + + t.Run("connection broken", func(t *testing.T) { + redisSrv := mustNewRedisServer() + client := mkRedisClient(redisSrv.Addr()) + + assert.Nil(t, client.DoCmd(nil, "SET", "foo", "bar")) + + redisSrv.Close() + assert.EqualError(t, client.DoCmd(nil, "GET", "foo"), "EOF") + }) +} + +func testPipeDo(t *testing.T, pipelineWindow time.Duration, pipelineLimit int) func(t *testing.T) { + return func(t *testing.T) { + statsStore := stats.NewStore(stats.NewNullSink(), false) + + mkRedisClient := func(addr string) redis.Client { + return redis.NewClientImpl(statsStore, false, "", "single", addr, 1, pipelineWindow, pipelineLimit) + } + + t.Run("SETGET ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient(redisSrv.Addr()) + var res string + + pipeline := redis.Pipeline{} + pipeline = client.PipeAppend(pipeline, nil, "SET", "foo", "bar") + pipeline = client.PipeAppend(pipeline, &res, "GET", "foo") + + assert.Nil(t, client.PipeDo(pipeline)) + assert.Equal(t, "bar", res) + }) + + t.Run("INCRBY ok", func(t *testing.T) { + redisSrv := mustNewRedisServer() + defer redisSrv.Close() + + client := mkRedisClient(redisSrv.Addr()) + var res uint32 + hits := uint32(1) + + assert.Nil(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, &res, "INCRBY", "a", hits))) + assert.Equal(t, hits, res) + + assert.Nil(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, &res, "INCRBY", "a", hits))) + assert.Equal(t, uint32(2), res) + }) + + t.Run("connection broken", func(t *testing.T) { + redisSrv := mustNewRedisServer() + client := mkRedisClient(redisSrv.Addr()) + + assert.Nil(t, nil, client.PipeDo(client.PipeAppend(redis.Pipeline{}, nil, "SET", "foo", "bar"))) + + redisSrv.Close() + + expectErrContainEOF := func(t *testing.T, err error) { + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "EOF") + } + + expectErrContainEOF(t, client.PipeDo(client.PipeAppend(redis.Pipeline{}, nil, "GET", "foo"))) + }) + } +} + +func TestPipeDo(t *testing.T) { + t.Run("ImplicitPipeliningEnabled", testPipeDo(t, 10*time.Millisecond, 2)) + t.Run("ImplicitPipeliningDisabled", testPipeDo(t, 0, 0)) +} diff --git a/test/server/health_test.go b/test/server/health_test.go index 21030ddde..a79e3642e 100644 --- a/test/server/health_test.go +++ b/test/server/health_test.go @@ -8,7 +8,7 @@ import ( "syscall" "testing" - "github.com/lyft/ratelimit/src/server" + "github.com/envoyproxy/ratelimit/src/server" "google.golang.org/grpc" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" diff --git a/test/server/server_impl_test.go b/test/server/server_impl_test.go new file mode 100644 index 000000000..8ee221610 --- /dev/null +++ b/test/server/server_impl_test.go @@ -0,0 +1,83 @@ +package server_test + +import ( + "fmt" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/mock" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + + "github.com/envoyproxy/ratelimit/src/server" + mock_v3 "github.com/envoyproxy/ratelimit/test/mocks/rls" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func assertHttpResponse(t *testing.T, + handler http.HandlerFunc, + requestBody string, + expectedStatusCode int, + expectedContentType string, + expectedResponseBody string) { + + t.Helper() + assert := assert.New(t) + + req := httptest.NewRequest("METHOD_NOT_CHECKED", "/path_not_checked", strings.NewReader(requestBody)) + w := httptest.NewRecorder() + handler(w, req) + + resp := w.Result() + actualBody, _ := ioutil.ReadAll(resp.Body) + assert.Equal(expectedContentType, resp.Header.Get("Content-Type")) + assert.Equal(expectedStatusCode, resp.StatusCode) + assert.Equal(expectedResponseBody, string(actualBody)) +} + +func TestJsonHandler(t *testing.T) { + controller := gomock.NewController(t) + defer controller.Finish() + + rls := mock_v3.NewMockRateLimitServiceServer(controller) + handler := server.NewJsonHandler(rls) + requestMatcher := mock.MatchedBy(func(req *pb.RateLimitRequest) bool { + return proto.Equal(req, &pb.RateLimitRequest{ + Domain: "foo", + }) + }) + + // Missing request body + assertHttpResponse(t, handler, "", 400, "text/plain; charset=utf-8", "EOF\n") + + // Request body is not valid json + assertHttpResponse(t, handler, "}", 400, "text/plain; charset=utf-8", "invalid character '}' looking for beginning of value\n") + + // Unknown response code + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(&pb.RateLimitResponse{}, nil) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "application/json", "{}") + + // ratelimit service error + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(nil, fmt.Errorf("some error")) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 400, "text/plain; charset=utf-8", "some error\n") + + // json unmarshaling error + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(nil, nil) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 500, "text/plain; charset=utf-8", "error marshaling proto3 to json: Marshal called with nil\n") + + // successful request, not rate limited + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(&pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + }, nil) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 200, "application/json", `{"overallCode":"OK"}`) + + // successful request, rate limited + rls.EXPECT().ShouldRateLimit(nil, requestMatcher).Return(&pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + }, nil) + assertHttpResponse(t, handler, `{"domain": "foo"}`, 429, "application/json", `{"overallCode":"OVER_LIMIT"}`) +} diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index f0510f3d2..a51ddbe90 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -3,46 +3,40 @@ package ratelimit_test import ( "testing" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + core_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + pb_struct_legacy "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/extensions/common/ratelimit/v3" + pb_legacy "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" + "github.com/envoyproxy/ratelimit/src/service" + "github.com/envoyproxy/ratelimit/test/common" "github.com/golang/mock/gomock" - "github.com/golang/protobuf/jsonpb" "github.com/lyft/gostats" - pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" - "github.com/lyft/ratelimit/src/service" - "github.com/lyft/ratelimit/test/common" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) -func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.RateLimit, error) { +func convertRatelimit(ratelimit *pb.RateLimitResponse_RateLimit) (*pb_legacy.RateLimitResponse_RateLimit, error) { if ratelimit == nil { return nil, nil } - m := &jsonpb.Marshaler{} - s, err := m.MarshalToString(ratelimit) - if err != nil { - return nil, err - } - - rl := &pb_legacy.RateLimit{} - err = jsonpb.UnmarshalString(s, rl) - if err != nil { - return nil, err - } - - return rl, nil + return &pb_legacy.RateLimitResponse_RateLimit{ + Name: ratelimit.GetName(), + RequestsPerUnit: ratelimit.GetRequestsPerUnit(), + Unit: pb_legacy.RateLimitResponse_RateLimit_Unit(ratelimit.GetUnit()), + }, nil } -func convertRatelimits(ratelimits []*config.RateLimit) ([]*pb_legacy.RateLimit, error) { +func convertRatelimits(ratelimits []*config.RateLimit) ([]*pb_legacy.RateLimitResponse_RateLimit, error) { if ratelimits == nil { return nil, nil } - ret := make([]*pb_legacy.RateLimit, 0) + ret := make([]*pb_legacy.RateLimitResponse_RateLimit, 0) for _, rl := range ratelimits { if rl == nil { ret = append(ret, nil) @@ -74,7 +68,8 @@ func TestServiceLegacy(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) response, err := service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OK, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, @@ -111,7 +106,8 @@ func TestServiceLegacy(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) response, err = service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ @@ -146,7 +142,8 @@ func TestServiceLegacy(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}}) response, err = service.GetLegacyService().ShouldRateLimit(nil, legacyRequest) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb_legacy.RateLimitResponse{ OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ @@ -224,7 +221,7 @@ func TestInitialLoadErrorLegacy(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Scope) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.GetLegacyService().ShouldRateLimit(nil, request) @@ -260,13 +257,13 @@ func TestConvertLegacyRequest(test *testing.T) { assert.FailNow(test, err.Error()) } - assert.Equal(test, expectedRequest, req) + common.AssertProtoEqual(assert.New(test), expectedRequest, req) } { request := &pb_legacy.RateLimitRequest{ Domain: "test", - Descriptors: []*pb_legacy.RateLimitDescriptor{}, + Descriptors: []*pb_struct_legacy.RateLimitDescriptor{}, HitsAddend: 10, } @@ -281,13 +278,13 @@ func TestConvertLegacyRequest(test *testing.T) { assert.FailNow(test, err.Error()) } - assert.Equal(test, expectedRequest, req) + common.AssertProtoEqual(assert.New(test), expectedRequest, req) } { - descriptors := []*pb_legacy.RateLimitDescriptor{ + descriptors := []*pb_struct_legacy.RateLimitDescriptor{ { - Entries: []*pb_legacy.RateLimitDescriptor_Entry{ + Entries: []*pb_struct_legacy.RateLimitDescriptor_Entry{ { Key: "foo", Value: "foo_value", @@ -296,7 +293,7 @@ func TestConvertLegacyRequest(test *testing.T) { }, }, { - Entries: []*pb_legacy.RateLimitDescriptor_Entry{}, + Entries: []*pb_struct_legacy.RateLimitDescriptor_Entry{}, }, { Entries: nil, @@ -340,7 +337,7 @@ func TestConvertLegacyRequest(test *testing.T) { assert.FailNow(test, err.Error()) } - assert.Equal(test, expectedRequest, req) + common.AssertProtoEqual(assert.New(test), expectedRequest, req) } } @@ -370,14 +367,26 @@ func TestConvertResponse(test *testing.T) { }, } + requestHeadersToAdd := []*core.HeaderValue{{ + Key: "test_request", + Value: "test_request_value", + }, nil} + + responseHeadersToAdd := []*core.HeaderValue{{ + Key: "test_response", + Value: "test_response", + }, nil} + response := &pb.RateLimitResponse{ - OverallCode: pb.RateLimitResponse_OVER_LIMIT, - Statuses: statuses, + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + Statuses: statuses, + RequestHeadersToAdd: requestHeadersToAdd, + ResponseHeadersToAdd: responseHeadersToAdd, } - expectedRl := &pb_legacy.RateLimit{ + expectedRl := &pb_legacy.RateLimitResponse_RateLimit{ RequestsPerUnit: 10, - Unit: pb_legacy.RateLimit_DAY, + Unit: pb_legacy.RateLimitResponse_RateLimit_DAY, } expectedStatuses := []*pb_legacy.RateLimitResponse_DescriptorStatus{ @@ -394,9 +403,21 @@ func TestConvertResponse(test *testing.T) { }, } + expectedRequestHeadersToAdd := []*core_legacy.HeaderValue{{ + Key: "test_request", + Value: "test_request_value", + }, nil} + + expecpectedResponseHeadersToAdd := []*core_legacy.HeaderValue{{ + Key: "test_response", + Value: "test_response", + }, nil} + expectedResponse := &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, - Statuses: expectedStatuses, + OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, + Statuses: expectedStatuses, + RequestHeadersToAdd: expectedRequestHeadersToAdd, + Headers: expecpectedResponseHeadersToAdd, } resp, err = ratelimit.ConvertResponse(response) @@ -404,5 +425,5 @@ func TestConvertResponse(test *testing.T) { assert.FailNow(test, err.Error()) } - assert.Equal(test, expectedResponse, resp) + common.AssertProtoEqual(assert.New(test), expectedResponse, resp) } diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index df5de3eb4..12c77926a 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -4,17 +4,17 @@ import ( "sync" "testing" - pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v3" + "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/redis" + ratelimit "github.com/envoyproxy/ratelimit/src/service" + "github.com/envoyproxy/ratelimit/test/common" + mock_config "github.com/envoyproxy/ratelimit/test/mocks/config" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" + mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" + mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" "github.com/golang/mock/gomock" - "github.com/lyft/gostats" - "github.com/lyft/ratelimit/src/config" - "github.com/lyft/ratelimit/src/redis" - "github.com/lyft/ratelimit/src/service" - "github.com/lyft/ratelimit/test/common" - "github.com/lyft/ratelimit/test/mocks/config" - "github.com/lyft/ratelimit/test/mocks/redis" - "github.com/lyft/ratelimit/test/mocks/runtime/loader" - "github.com/lyft/ratelimit/test/mocks/runtime/snapshot" + stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -51,7 +51,7 @@ type rateLimitServiceTestSuite struct { controller *gomock.Controller runtime *mock_loader.MockIFace snapshot *mock_snapshot.MockIFace - cache *mock_redis.MockRateLimitCache + cache *mock_limiter.MockRateLimitCache configLoader *mock_config.MockRateLimitConfigLoader config *mock_config.MockRateLimitConfig runtimeUpdateCallback chan<- int @@ -64,7 +64,7 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.controller = gomock.NewController(t) ret.runtime = mock_loader.NewMockIFace(ret.controller) ret.snapshot = mock_snapshot.NewMockIFace(ret.controller) - ret.cache = mock_redis.NewMockRateLimitCache(ret.controller) + ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) ret.statStore = stats.NewStore(stats.NewNullSink(), false) @@ -82,7 +82,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Return(this.config) - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statStore) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statStore, true) } func TestService(test *testing.T) { @@ -97,7 +97,8 @@ func TestService(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) response, err := service.ShouldRateLimit(nil, request) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OK, Statuses: []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, @@ -124,7 +125,8 @@ func TestService(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[0].Limit, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}) response, err = service.ShouldRateLimit(nil, request) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OVER_LIMIT, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ @@ -154,7 +156,8 @@ func TestService(test *testing.T) { []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, {Code: pb.RateLimitResponse_OVER_LIMIT, CurrentLimit: limits[1].Limit, LimitRemaining: 0}}) response, err = service.ShouldRateLimit(nil, request) - t.assert.Equal( + common.AssertProtoEqual( + t.assert, &pb.RateLimitResponse{ OverallCode: pb.RateLimitResponse_OVER_LIMIT, Statuses: []*pb.RateLimitResponse_DescriptorStatus{ @@ -225,7 +228,7 @@ func TestInitialLoadError(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Scope) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, true) request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(nil, request)