From 9deac5fee68a8d7fb5bc1bc0d56b6d24bf060dda Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Fri, 6 Dec 2024 21:58:12 +0000 Subject: [PATCH 01/27] ratelimit: option to excute action on stream done Signed-off-by: Takeshi Yoneda --- .../filters/http/ratelimit/v3/rate_limit.proto | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 3e33536b228a5..b735121767a58 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -304,6 +304,18 @@ message RateLimitConfig { // [#extension-category: envoy.rate_limit_descriptors] config.core.v3.TypedExtensionConfig extension = 9; } + + // If true, the action will be applied when the HTTP stream completes. The default value is false. + // This is useful when the rate limit action needs to be applied based on the response context. + // + // For example, let's say the upstream service calculates the usage statistics and returns them in the response, + // and we want to utilize these numbers to apply the rate limit action for the subsequent requests with this option. + // Combined with another filter that can set the descriptor value based on the response (e.g. Lua filter), this can be used + // to subtract the usage statistics from the rate limit descriptor. + // + // This is "lazy" by nature, and the rate limit won't be applied to the current request, but to the + // subsequent requests that match the descriptor. + bool apply_on_stream_done = 10; } message Override { From 5fe037085f6534a94dbb66c740f887dc00172b17 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Fri, 6 Dec 2024 22:27:24 +0000 Subject: [PATCH 02/27] format Signed-off-by: Takeshi Yoneda --- api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index b735121767a58..367425b00dc6c 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -140,7 +140,7 @@ message RateLimit { // Also applies to Local rate limiting :ref:`using descriptors `. // [#not-implemented-hide:] message RateLimitConfig { - // [#next-free-field: 10] + // [#next-free-field: 11] message Action { // The following descriptor entry is appended to the descriptor: // From 32ea3e9f977281e8c32fa30ee89b68a8e52fc287 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Fri, 6 Dec 2024 22:57:36 +0000 Subject: [PATCH 03/27] more clarify Signed-off-by: Takeshi Yoneda --- .../extensions/filters/http/ratelimit/v3/rate_limit.proto | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 367425b00dc6c..19cf9af7fa811 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -308,10 +308,10 @@ message RateLimitConfig { // If true, the action will be applied when the HTTP stream completes. The default value is false. // This is useful when the rate limit action needs to be applied based on the response context. // - // For example, let's say the upstream service calculates the usage statistics and returns them in the response, - // and we want to utilize these numbers to apply the rate limit action for the subsequent requests with this option. - // Combined with another filter that can set the descriptor value based on the response (e.g. Lua filter), this can be used - // to subtract the usage statistics from the rate limit descriptor. + // For example, let's say the upstream service calculates the usage statistics and returns them in the response + // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. + // Combined with another filter that can set ``envoy.ratelimit.hits_addend`` based on the response (e.g. Lua filter), + // this can be used to subtract the usage statistics from the rate limit budget. // // This is "lazy" by nature, and the rate limit won't be applied to the current request, but to the // subsequent requests that match the descriptor. From e346217d5bfee16e08ef8b5d487d5d66fc8472e4 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Sat, 7 Dec 2024 18:25:04 +0000 Subject: [PATCH 04/27] clarify more Signed-off-by: Takeshi Yoneda --- .../filters/http/ratelimit/v3/rate_limit.proto | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 19cf9af7fa811..c325c9e8d9e6f 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -305,17 +305,18 @@ message RateLimitConfig { config.core.v3.TypedExtensionConfig extension = 9; } - // If true, the action will be applied when the HTTP stream completes. The default value is false. - // This is useful when the rate limit action needs to be applied based on the response context. + // If true, the action will be also applied when the HTTP stream completes. The default value is false. + // This is useful when the rate limit budget needs to reflect the response context that is not available + // on the request path. // // For example, let's say the upstream service calculates the usage statistics and returns them in the response // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. // Combined with another filter that can set ``envoy.ratelimit.hits_addend`` based on the response (e.g. Lua filter), // this can be used to subtract the usage statistics from the rate limit budget. // - // This is "lazy" by nature, and the rate limit won't be applied to the current request, but to the - // subsequent requests that match the descriptor. - bool apply_on_stream_done = 10; + // The action taken place on the stream completion is "lazy" by nature, and the rate limit won't be applied to + // the current request, but to the subsequent requests that match the descriptor. + bool extra_addend_on_stream_done = 10; } message Override { From c8e0b736e1feac7ae24396b36ea6e37240c8581b Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Mon, 9 Dec 2024 17:25:59 +0000 Subject: [PATCH 05/27] Apply feedback in offline: action is either request or response, not both for clarity Signed-off-by: Takeshi Yoneda --- .../filters/http/ratelimit/v3/rate_limit.proto | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index c325c9e8d9e6f..513101d19fa99 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -305,7 +305,7 @@ message RateLimitConfig { config.core.v3.TypedExtensionConfig extension = 9; } - // If true, the action will be also applied when the HTTP stream completes. The default value is false. + // If true, the action will be applied when the HTTP stream completes. The default value is false. // This is useful when the rate limit budget needs to reflect the response context that is not available // on the request path. // @@ -314,9 +314,10 @@ message RateLimitConfig { // Combined with another filter that can set ``envoy.ratelimit.hits_addend`` based on the response (e.g. Lua filter), // this can be used to subtract the usage statistics from the rate limit budget. // - // The action taken place on the stream completion is "lazy" by nature, and the rate limit won't be applied to - // the current request, but to the subsequent requests that match the descriptor. - bool extra_addend_on_stream_done = 10; + // The action applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this action. + // Users should ensure that the rate limit is enforced by the actions applied on the request path, i.e. the ones + // with this field set to false. + bool apply_on_stream_done = 10; } message Override { From 6166b901cf203490e3a949af66df4f4ac6be1690 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Mon, 9 Dec 2024 17:34:09 +0000 Subject: [PATCH 06/27] Apply offline review: remove the mention of envoy.ratelimit.hits_addend for future extension Signed-off-by: Takeshi Yoneda --- .../extensions/filters/http/ratelimit/v3/rate_limit.proto | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 513101d19fa99..35a7cfcf96844 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -309,12 +309,12 @@ message RateLimitConfig { // This is useful when the rate limit budget needs to reflect the response context that is not available // on the request path. // - // For example, let's say the upstream service calculates the usage statistics and returns them in the response + // For example, let's say the upstream service calculates the usage statistics and returns them in the response body // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. - // Combined with another filter that can set ``envoy.ratelimit.hits_addend`` based on the response (e.g. Lua filter), + // Combined with another filter that can set the desired addend based on the response (e.g. Lua filter), // this can be used to subtract the usage statistics from the rate limit budget. // - // The action applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this action. + // An action applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this action. // Users should ensure that the rate limit is enforced by the actions applied on the request path, i.e. the ones // with this field set to false. bool apply_on_stream_done = 10; From cab0154c4fa6d1b4a3b9c8316bdbc8c3b1547d19 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Mon, 9 Dec 2024 17:39:43 +0000 Subject: [PATCH 07/27] more comments Signed-off-by: Takeshi Yoneda --- api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto | 2 ++ 1 file changed, 2 insertions(+) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 35a7cfcf96844..2546660fe8c0a 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -315,6 +315,8 @@ message RateLimitConfig { // this can be used to subtract the usage statistics from the rate limit budget. // // An action applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this action. + // In other words, the current request won't be blocked by the rate limit action applied on the stream completion, but the + // budget will be updated for the subsequent requests based on the action with this field set to true. // Users should ensure that the rate limit is enforced by the actions applied on the request path, i.e. the ones // with this field set to false. bool apply_on_stream_done = 10; From 371e4c64b31ff98499cbba806decdf7cb2d7acd6 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Mon, 9 Dec 2024 19:52:57 +0000 Subject: [PATCH 08/27] Put it in the correct place Signed-off-by: Takeshi Yoneda --- .../config/route/v3/route_components.proto | 18 +++++++++++++++++- .../filters/http/ratelimit/v3/rate_limit.proto | 18 +----------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index ce781d100c9c4..290ed128afcd6 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1871,7 +1871,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 12] + // [#next-free-field: 13] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -2148,6 +2148,22 @@ message RateLimit { // Rate limit on the existence of query parameters. QueryParameterValueMatch query_parameter_value_match = 11; } + + // If true, the action will be applied when the stream completes. The default value is false. + // This is useful when the rate limit budget needs to reflect the response context that is not available + // on the request path. + // + // For example, let's say the upstream service calculates the usage statistics and returns them in the response body + // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. + // Combined with another filter that can set the desired addend based on the response (e.g. Lua filter), + // this can be used to subtract the usage statistics from the rate limit budget. + // + // An action applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this action. + // In other words, the current request won't be blocked by the rate limit action applied on the stream completion, but the + // budget will be updated for the subsequent requests based on the action with this field set to true. + // Users should ensure that the rate limit is enforced by the actions applied on the request path, i.e. the ones + // with this field set to false. + bool apply_on_stream_done = 12; } message Override { diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 2546660fe8c0a..3e33536b228a5 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -140,7 +140,7 @@ message RateLimit { // Also applies to Local rate limiting :ref:`using descriptors `. // [#not-implemented-hide:] message RateLimitConfig { - // [#next-free-field: 11] + // [#next-free-field: 10] message Action { // The following descriptor entry is appended to the descriptor: // @@ -304,22 +304,6 @@ message RateLimitConfig { // [#extension-category: envoy.rate_limit_descriptors] config.core.v3.TypedExtensionConfig extension = 9; } - - // If true, the action will be applied when the HTTP stream completes. The default value is false. - // This is useful when the rate limit budget needs to reflect the response context that is not available - // on the request path. - // - // For example, let's say the upstream service calculates the usage statistics and returns them in the response body - // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. - // Combined with another filter that can set the desired addend based on the response (e.g. Lua filter), - // this can be used to subtract the usage statistics from the rate limit budget. - // - // An action applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this action. - // In other words, the current request won't be blocked by the rate limit action applied on the stream completion, but the - // budget will be updated for the subsequent requests based on the action with this field set to true. - // Users should ensure that the rate limit is enforced by the actions applied on the request path, i.e. the ones - // with this field set to false. - bool apply_on_stream_done = 10; } message Override { From 7e77a7fc586f2f3e56abc43cfca746b5de8212ff Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Mon, 9 Dec 2024 22:15:10 +0000 Subject: [PATCH 09/27] Filter config level Signed-off-by: Takeshi Yoneda --- .../config/route/v3/route_components.proto | 18 +------- .../http/ratelimit/v3/rate_limit.proto | 17 ++++++- .../filters/http/ratelimit/ratelimit.cc | 46 ++++++++++++------- .../filters/http/ratelimit/ratelimit.h | 9 +++- 4 files changed, 53 insertions(+), 37 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 290ed128afcd6..ce781d100c9c4 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1871,7 +1871,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 13] + // [#next-free-field: 12] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -2148,22 +2148,6 @@ message RateLimit { // Rate limit on the existence of query parameters. QueryParameterValueMatch query_parameter_value_match = 11; } - - // If true, the action will be applied when the stream completes. The default value is false. - // This is useful when the rate limit budget needs to reflect the response context that is not available - // on the request path. - // - // For example, let's say the upstream service calculates the usage statistics and returns them in the response body - // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. - // Combined with another filter that can set the desired addend based on the response (e.g. Lua filter), - // this can be used to subtract the usage statistics from the rate limit budget. - // - // An action applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this action. - // In other words, the current request won't be blocked by the rate limit action applied on the stream completion, but the - // budget will be updated for the subsequent requests based on the action with this field set to true. - // Users should ensure that the rate limit is enforced by the actions applied on the request path, i.e. the ones - // with this field set to false. - bool apply_on_stream_done = 12; } message Override { diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 3e33536b228a5..0a9127838e548 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -25,7 +25,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 14] +// [#next-free-field: 15] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; @@ -134,6 +134,21 @@ message RateLimit { // Optional additional prefix to use when emitting statistics. This allows to distinguish // emitted statistics between configured ``ratelimit`` filters in an HTTP filter chain. string stat_prefix = 13; + + // If true, rate limit requests will also be sent to the rate limit service when the stream completes. + // This is useful when the rate limit budget needs to reflect the response context that is not available + // on the request path. + // + // For example, let's say the upstream service calculates the usage statistics, returns them in the response body + // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. + // Combined with another filter that can set the desired addend based on the response (e.g. Lua filter), + // this can be used to subtract the usage statistics from the rate limit budget. + // + // The rate limit requests sent on the stream completion are "fire-and-forget" by nature, and rate limit is not enforced + // on the current HTTP stream being completed. The filter will only update the budget for the subsequent requests at + // that point. Hence the effect of the rate limit requests made during the stream completion is not visible in the current + // but only in the subsequent requests. + bool apply_on_stream_done = 14; } // Global rate limiting :ref:`architecture overview `. diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 7052f8f793edf..a84808cb4df62 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -65,11 +65,9 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { return; } - std::vector descriptors; - const Router::RouteEntry* route_entry = route->routeEntry(); // Get all applicable rate limit policy entries for the route. - populateRateLimitDescriptors(route_entry->rateLimitPolicy(), descriptors, headers); + populateRateLimitDescriptors(route_entry->rateLimitPolicy(), descriptors_, headers); VhRateLimitOptions vh_rate_limit_option = getVirtualHostRateLimitOption(route); @@ -77,27 +75,31 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { case VhRateLimitOptions::Ignore: break; case VhRateLimitOptions::Include: - populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors, headers); + populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors_, headers); break; case VhRateLimitOptions::Override: if (route_entry->rateLimitPolicy().empty()) { - populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors, headers); + populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors_, headers); } break; } + makeRateLimitRequest(); +} - const StreamInfo::UInt32Accessor* hits_addend_filter_state = - callbacks_->streamInfo().filterState()->getDataReadOnly( - HitsAddendFilterStateKey); - double hits_addend = 0; - if (hits_addend_filter_state != nullptr) { - hits_addend = hits_addend_filter_state->value(); - } +void Filter::makeRateLimitRequest() { + if (!descriptors_.empty()) { + // TODO: Make addend configirable via substituion command. + const StreamInfo::UInt32Accessor* hits_addend_filter_state = + callbacks_->streamInfo().filterState()->getDataReadOnly( + HitsAddendFilterStateKey); + double hits_addend = 0; + if (hits_addend_filter_state != nullptr) { + hits_addend = hits_addend_filter_state->value(); + } - if (!descriptors.empty()) { state_ = State::Calling; initiating_call_ = true; - client_->limit(*this, getDomain(), descriptors, callbacks_->activeSpan(), + client_->limit(*this, getDomain(), descriptors_, callbacks_->activeSpan(), callbacks_->streamInfo(), hits_addend); initiating_call_ = false; } @@ -158,9 +160,14 @@ Http::FilterMetadataStatus Filter::encodeMetadata(Http::MetadataMap&) { void Filter::setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks&) {} void Filter::onDestroy() { - if (state_ == State::Calling) { - state_ = State::Complete; - client_->cancel(); + if (config_->applyOnStreamDone()) { + state_ = State::OnStreamDone; + makeRateLimitRequest(); + } else { + if (state_ == State::Calling) { + state_ = State::Complete; + client_->cancel(); + } } } @@ -170,6 +177,11 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, Http::RequestHeaderMapPtr&& request_headers_to_add, const std::string& response_body, Filters::Common::RateLimit::DynamicMetadataPtr&& dynamic_metadata) { + if (state_ == State::OnStreamDone) { + // We have no more work to do as the rate limit request made during on completion is + // fire-and-forget. + return; + } state_ = State::Complete; response_headers_to_add_ = std::move(response_headers_to_add); Http::HeaderMapPtr req_headers_to_add = std::move(request_headers_to_add); diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 889e79a95957a..f689711ae41d3 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -61,7 +61,8 @@ class FilterConfig { response_headers_parser_(THROW_OR_RETURN_VALUE( Envoy::Router::HeaderParser::configure(config.response_headers_to_add()), Router::HeaderParserPtr)), - status_on_error_(toRatelimitServerErrorCode(config.status_on_error().code())) {} + status_on_error_(toRatelimitServerErrorCode(config.status_on_error().code())), + apply_on_stream_done_(config.apply_on_stream_done()) {} const std::string& domain() const { return domain_; } const LocalInfo::LocalInfo& localInfo() const { return local_info_; } uint64_t stage() const { return stage_; } @@ -79,6 +80,7 @@ class FilterConfig { Http::Code rateLimitedStatus() { return rate_limited_status_; } const Router::HeaderParser& responseHeadersParser() const { return *response_headers_parser_; } Http::Code statusOnError() const { return status_on_error_; } + bool applyOnStreamDone() const { return apply_on_stream_done_; } private: static FilterRequestType stringToType(const std::string& request_type) { @@ -123,6 +125,7 @@ class FilterConfig { const Http::Code rate_limited_status_; Router::HeaderParserPtr response_headers_parser_; const Http::Code status_on_error_; + const bool apply_on_stream_done_; }; using FilterConfigSharedPtr = std::shared_ptr; @@ -185,6 +188,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req private: void initiateCall(const Http::RequestHeaderMap& headers); + void makeRateLimitRequest(); void populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_limit_policy, std::vector& descriptors, const Http::RequestHeaderMap& headers) const; @@ -195,7 +199,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req Http::Context& httpContext() { return config_->httpContext(); } - enum class State { NotStarted, Calling, Complete, Responded }; + enum class State { NotStarted, Calling, Complete, Responded, OnStreamDone }; FilterConfigSharedPtr config_; Filters::Common::RateLimit::ClientPtr client_; @@ -206,6 +210,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req bool initiating_call_{}; Http::ResponseHeaderMapPtr response_headers_to_add_; Http::RequestHeaderMap* request_headers_{}; + std::vector descriptors_{}; }; } // namespace RateLimitFilter From 8f295631f14dba23deeb040721b0312b21c1f951 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 11 Dec 2024 17:37:39 +0000 Subject: [PATCH 10/27] mention envoy.ratelimit.hits_addend Signed-off-by: Takeshi Yoneda --- api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 0a9127838e548..33a1e9b1c5805 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -141,7 +141,7 @@ message RateLimit { // // For example, let's say the upstream service calculates the usage statistics, returns them in the response body // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. - // Combined with another filter that can set the desired addend based on the response (e.g. Lua filter), + // Combined with another filter that can set ``envoy.ratelimit.hits_addend`` based on the response (e.g. Lua filter), // this can be used to subtract the usage statistics from the rate limit budget. // // The rate limit requests sent on the stream completion are "fire-and-forget" by nature, and rate limit is not enforced From 6598d4493d002e386169eef5b74d13b90365d252 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 11 Dec 2024 17:59:58 +0000 Subject: [PATCH 11/27] Add more comments Signed-off-by: Takeshi Yoneda --- .../extensions/filters/http/ratelimit/v3/rate_limit.proto | 5 +++++ source/extensions/filters/http/ratelimit/ratelimit.cc | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 33a1e9b1c5805..0a71d7730dd96 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -139,6 +139,11 @@ message RateLimit { // This is useful when the rate limit budget needs to reflect the response context that is not available // on the request path. // + // On the stream completion, the filter will reuse the exact same descriptors matched during the request path. + // In other words, the descriptors are not recalculated on the stream completion, but the rate limit requests + // are sent with the same descriptors as the original request sent during the request path. + // For example, request header matching descriptors are available on the stream completion. + // // For example, let's say the upstream service calculates the usage statistics, returns them in the response body // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. // Combined with another filter that can set ``envoy.ratelimit.hits_addend`` based on the response (e.g. Lua filter), diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index a84808cb4df62..89d3f6e000450 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -88,7 +88,6 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { void Filter::makeRateLimitRequest() { if (!descriptors_.empty()) { - // TODO: Make addend configirable via substituion command. const StreamInfo::UInt32Accessor* hits_addend_filter_state = callbacks_->streamInfo().filterState()->getDataReadOnly( HitsAddendFilterStateKey); From 59c2d1db3200015216be1299b87a21524140902b Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 11 Dec 2024 21:20:18 +0000 Subject: [PATCH 12/27] impl Signed-off-by: Takeshi Yoneda --- .../filters/common/ratelimit/ratelimit.h | 2 +- .../common/ratelimit/ratelimit_impl.cc | 13 ++-- .../filters/common/ratelimit/ratelimit_impl.h | 2 +- .../filters/http/ratelimit/config.cc | 6 +- .../filters/http/ratelimit/ratelimit.cc | 59 +++++++++++------ .../filters/http/ratelimit/ratelimit.h | 66 +++++++++++++++++-- .../filters/common/ratelimit/mocks.h | 2 +- test/extensions/filters/http/ratelimit/BUILD | 1 + .../filters/http/ratelimit/ratelimit_test.cc | 4 +- 9 files changed, 116 insertions(+), 39 deletions(-) diff --git a/source/extensions/filters/common/ratelimit/ratelimit.h b/source/extensions/filters/common/ratelimit/ratelimit.h index 11267cc7db03b..44a29d13da6ac 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit.h +++ b/source/extensions/filters/common/ratelimit/ratelimit.h @@ -90,7 +90,7 @@ class Client { */ virtual void limit(RequestCallbacks& callbacks, const std::string& domain, const std::vector& descriptors, - Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info, + Tracing::Span& parent_span, OptRef stream_info, uint32_t hits_addend) PURE; }; diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 3350e132562a5..4b8b1866d005d 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -59,18 +59,19 @@ void GrpcClientImpl::createRequest(envoy::service::ratelimit::v3::RateLimitReque void GrpcClientImpl::limit(RequestCallbacks& callbacks, const std::string& domain, const std::vector& descriptors, - Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info, - uint32_t hits_addend) { + Tracing::Span& parent_span, + OptRef stream_info, uint32_t hits_addend) { ASSERT(callbacks_ == nullptr); callbacks_ = &callbacks; envoy::service::ratelimit::v3::RateLimitRequest request; createRequest(request, domain, descriptors, hits_addend); - request_ = - async_client_->send(service_method_, request, *this, parent_span, - Http::AsyncClient::RequestOptions().setTimeout(timeout_).setParentContext( - Http::AsyncClient::ParentContext{&stream_info})); + auto options = Http::AsyncClient::RequestOptions().setTimeout(timeout_); + if (stream_info) { + options.setParentContext(Http::AsyncClient::ParentContext{&*stream_info}); + } + request_ = async_client_->send(service_method_, request, *this, parent_span, options); } void GrpcClientImpl::onSuccess( diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.h b/source/extensions/filters/common/ratelimit/ratelimit_impl.h index 79502ec2ef787..61a6c1c5ec880 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.h +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.h @@ -57,7 +57,7 @@ class GrpcClientImpl : public Client, void cancel() override; void limit(RequestCallbacks& callbacks, const std::string& domain, const std::vector& descriptors, - Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info, + Tracing::Span& parent_span, OptRef stream_info, uint32_t hits_addend = 0) override; // Grpc::AsyncRequestCallbacks diff --git a/source/extensions/filters/http/ratelimit/config.cc b/source/extensions/filters/http/ratelimit/config.cc index 59711a014b82a..a9dfb12117ef1 100644 --- a/source/extensions/filters/http/ratelimit/config.cc +++ b/source/extensions/filters/http/ratelimit/config.cc @@ -23,9 +23,9 @@ Http::FilterFactoryCb RateLimitFilterConfig::createFilterFactoryFromProtoTyped( auto& server_context = context.serverFactoryContext(); ASSERT(!proto_config.domain().empty()); - FilterConfigSharedPtr filter_config(new FilterConfig(proto_config, server_context.localInfo(), - context.scope(), server_context.runtime(), - server_context.httpContext())); + FilterConfigSharedPtr filter_config(new FilterConfig( + proto_config, server_context.localInfo(), context.scope(), server_context.runtime(), + server_context.httpContext(), server_context.threadLocal())); const std::chrono::milliseconds timeout = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20)); diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 89d3f6e000450..eb5e919bc6366 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -1,5 +1,6 @@ #include "source/extensions/filters/http/ratelimit/ratelimit.h" +#include #include #include @@ -83,27 +84,27 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { } break; } - makeRateLimitRequest(); -} -void Filter::makeRateLimitRequest() { if (!descriptors_.empty()) { - const StreamInfo::UInt32Accessor* hits_addend_filter_state = - callbacks_->streamInfo().filterState()->getDataReadOnly( - HitsAddendFilterStateKey); - double hits_addend = 0; - if (hits_addend_filter_state != nullptr) { - hits_addend = hits_addend_filter_state->value(); - } - state_ = State::Calling; initiating_call_ = true; client_->limit(*this, getDomain(), descriptors_, callbacks_->activeSpan(), - callbacks_->streamInfo(), hits_addend); + callbacks_->streamInfo(), getHitAddend()); initiating_call_ = false; } } +double Filter::getHitAddend() { + const StreamInfo::UInt32Accessor* hits_addend_filter_state = + callbacks_->streamInfo().filterState()->getDataReadOnly( + HitsAddendFilterStateKey); + double hits_addend = 0; + if (hits_addend_filter_state != nullptr) { + hits_addend = hits_addend_filter_state->value(); + } + return hits_addend; +} + Http::FilterHeadersStatus Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { if (!config_->runtime().snapshot().featureEnabled("ratelimit.http_filter_enabled", 100)) { return Http::FilterHeadersStatus::Continue; @@ -159,13 +160,20 @@ Http::FilterMetadataStatus Filter::encodeMetadata(Http::MetadataMap&) { void Filter::setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks&) {} void Filter::onDestroy() { - if (config_->applyOnStreamDone()) { - state_ = State::OnStreamDone; - makeRateLimitRequest(); + if (state_ == State::Calling) { + state_ = State::Complete; + client_->cancel(); } else { - if (state_ == State::Calling) { - state_ = State::Complete; - client_->cancel(); + // If the filter doesn't have a outstanding limit request (made during decodeHeaders) and has descriptors, + // then we can apply the rate limit on stream done if the config allows it. + if (config_->applyOnStreamDone() && !descriptors_.empty()) { + client_->cancel(); // Clears the internal state of the client, so that we can reuse it. + client_->limit(*this, getDomain(), descriptors_, Tracing::NullSpan::instance(), absl::nullopt, + getHitAddend()); + state_ = State::PendingReuqestOnStreamDone; + // Since this filter is being destroyed, we need to keep the client alive until the request + // is complete. So we add this filter to the destroy pending list at the filter config level. + config_->addDestroyPendingFilter(shared_from_this()); } } } @@ -176,9 +184,11 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, Http::RequestHeaderMapPtr&& request_headers_to_add, const std::string& response_body, Filters::Common::RateLimit::DynamicMetadataPtr&& dynamic_metadata) { - if (state_ == State::OnStreamDone) { - // We have no more work to do as the rate limit request made during on completion is - // fire-and-forget. + if (state_ == State::PendingReuqestOnStreamDone) { + // Since this filter is already destroyed from HCM perspective, there's nothing to do here. + // Simply remove it from the destroy pending list which in turn will release the filter shared + // pointer. + config_->removeDestroyPendingFilter(*this); return; } state_ = State::Complete; @@ -341,6 +351,13 @@ std::string Filter::getDomain() { return config_->domain(); } +DestroyPendingFilterThreadLocal::~DestroyPendingFilterThreadLocal() { + for (const auto& filter : map_) { + filter.second->client()->cancel(); + } + map_.clear(); +} + } // namespace RateLimitFilter } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index f689711ae41d3..ede30e08b690e 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -35,6 +35,37 @@ enum class FilterRequestType { Internal, External, Both }; */ enum class VhRateLimitOptions { Override, Include, Ignore }; +class Filter; +using FilterSharedPtr = std::shared_ptr; + +/** + * Thread local storage for destroy pending filters. + */ +class DestroyPendingFilterThreadLocal : public ThreadLocal::ThreadLocalObject { +public: + DestroyPendingFilterThreadLocal() = default; + // When this class is destructed, it will cancel all the pending filters and release the shared + // pointers. + ~DestroyPendingFilterThreadLocal() override; + /** + * Add the filter to the destroy pending list. + * @param filter the filter to add. + */ + void addDestroyPendingFilter(FilterSharedPtr filter) { + map_.emplace(filter.get(), std::move(filter)); + } + /** + * Remove the filter from the destroy pending list. + * @param filter the const reference to the filter to remove. + */ + void removeDestroyPendingFilter(const Filter& filter) { map_.erase(&filter); } + +private: + // We use a raw pointer as the key to be able to remove filters when the callback is called where + // shared_from_this() is not available. + absl::flat_hash_map map_ = {}; +}; + /** * Global configuration for the HTTP rate limit filter. */ @@ -42,7 +73,8 @@ class FilterConfig { public: FilterConfig(const envoy::extensions::filters::http::ratelimit::v3::RateLimit& config, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, - Runtime::Loader& runtime, Http::Context& http_context) + Runtime::Loader& runtime, Http::Context& http_context, + ThreadLocal::SlotAllocator& tls_allocator) : domain_(config.domain()), stage_(static_cast(config.stage())), request_type_(config.request_type().empty() ? stringToType("both") : stringToType(config.request_type())), @@ -62,7 +94,16 @@ class FilterConfig { Envoy::Router::HeaderParser::configure(config.response_headers_to_add()), Router::HeaderParserPtr)), status_on_error_(toRatelimitServerErrorCode(config.status_on_error().code())), - apply_on_stream_done_(config.apply_on_stream_done()) {} + apply_on_stream_done_(config.apply_on_stream_done()) { + if (config.apply_on_stream_done()) { + pending_filter_slot_ = + ThreadLocal::TypedSlot::makeUnique(tls_allocator); + pending_filter_slot_->set( + [](Event::Dispatcher&) -> std::shared_ptr { + return std::make_shared(); + }); + } + } const std::string& domain() const { return domain_; } const LocalInfo::LocalInfo& localInfo() const { return local_info_; } uint64_t stage() const { return stage_; } @@ -81,6 +122,14 @@ class FilterConfig { const Router::HeaderParser& responseHeadersParser() const { return *response_headers_parser_; } Http::Code statusOnError() const { return status_on_error_; } bool applyOnStreamDone() const { return apply_on_stream_done_; } + void addDestroyPendingFilter(FilterSharedPtr filter) { + ASSERT(pending_filter_slot_); + (*pending_filter_slot_)->addDestroyPendingFilter(std::move(filter)); + } + void removeDestroyPendingFilter(const Filter& filter) { + ASSERT(pending_filter_slot_); + (*pending_filter_slot_)->removeDestroyPendingFilter(filter); + } private: static FilterRequestType stringToType(const std::string& request_type) { @@ -110,6 +159,9 @@ class FilterConfig { return Http::Code::InternalServerError; } + // Thread local storage for destory pending Filter by a hash map of shared pointers. + ThreadLocal::TypedSlotPtr pending_filter_slot_ = nullptr; + const std::string domain_; const uint64_t stage_; const FilterRequestType request_type_; @@ -154,7 +206,9 @@ class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { * HTTP rate limit filter. Depending on the route configuration, this filter calls the global * rate limiting service before allowing further filter iteration. */ -class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::RequestCallbacks { +class Filter : public Http::StreamFilter, + public Filters::Common::RateLimit::RequestCallbacks, + std::enable_shared_from_this { public: Filter(FilterConfigSharedPtr config, Filters::Common::RateLimit::ClientPtr&& client) : config_(config), client_(std::move(client)) {} @@ -186,20 +240,22 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req const std::string& response_body, Filters::Common::RateLimit::DynamicMetadataPtr&& dynamic_metadata) override; + Filters::Common::RateLimit::ClientPtr& client() { return client_; } + private: void initiateCall(const Http::RequestHeaderMap& headers); - void makeRateLimitRequest(); void populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_limit_policy, std::vector& descriptors, const Http::RequestHeaderMap& headers) const; void populateResponseHeaders(Http::HeaderMap& response_headers, bool from_local_reply); void appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add); + double getHitAddend(); VhRateLimitOptions getVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route); std::string getDomain(); Http::Context& httpContext() { return config_->httpContext(); } - enum class State { NotStarted, Calling, Complete, Responded, OnStreamDone }; + enum class State { NotStarted, Calling, Complete, Responded, PendingReuqestOnStreamDone }; FilterConfigSharedPtr config_; Filters::Common::RateLimit::ClientPtr client_; diff --git a/test/extensions/filters/common/ratelimit/mocks.h b/test/extensions/filters/common/ratelimit/mocks.h index 5155d335f057d..259fa3f08881a 100644 --- a/test/extensions/filters/common/ratelimit/mocks.h +++ b/test/extensions/filters/common/ratelimit/mocks.h @@ -26,7 +26,7 @@ class MockClient : public Client { MOCK_METHOD(void, limit, (RequestCallbacks & callbacks, const std::string& domain, const std::vector& descriptors, - Tracing::Span& parent_span, const StreamInfo::StreamInfo& stream_info, + Tracing::Span& parent_span, OptRef stream_info, uint32_t hits_addend)); }; diff --git a/test/extensions/filters/http/ratelimit/BUILD b/test/extensions/filters/http/ratelimit/BUILD index db8ba366d5c5e..be0479341c9b1 100644 --- a/test/extensions/filters/http/ratelimit/BUILD +++ b/test/extensions/filters/http/ratelimit/BUILD @@ -29,6 +29,7 @@ envoy_extension_cc_test( "//test/mocks/local_info:local_info_mocks", "//test/mocks/ratelimit:ratelimit_mocks", "//test/mocks/runtime:runtime_mocks", + "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/tracing:tracing_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 6caa81e8eb5f1..047e190f914cb 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -18,6 +18,7 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/ratelimit/mocks.h" #include "test/mocks/runtime/mocks.h" +#include "test/mocks/thread_local/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" @@ -56,7 +57,7 @@ class HttpRateLimitFilterTest : public testing::Test { TestUtility::loadFromYaml(yaml, proto_config); config_ = std::make_shared(proto_config, local_info_, *stats_store_.rootScope(), - runtime_, http_context_); + runtime_, http_context_, thread_local_); client_ = new Filters::Common::RateLimit::MockClient(); filter_ = std::make_unique(config_, Filters::Common::RateLimit::ClientPtr{client_}); @@ -134,6 +135,7 @@ class HttpRateLimitFilterTest : public testing::Test { FilterConfigSharedPtr config_; std::unique_ptr filter_; NiceMock runtime_; + NiceMock thread_local_; NiceMock route_rate_limit_; NiceMock vh_rate_limit_; std::vector descriptor_{{{{"descriptor_key", "descriptor_value"}}}}; From 3047603238b0d6c67f81c7771d5481d861495d42 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 11 Dec 2024 21:54:31 +0000 Subject: [PATCH 13/27] format Signed-off-by: Takeshi Yoneda --- source/extensions/filters/http/ratelimit/ratelimit.cc | 4 ++-- source/extensions/filters/http/ratelimit/ratelimit.h | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index eb5e919bc6366..9360919feb75b 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -164,8 +164,8 @@ void Filter::onDestroy() { state_ = State::Complete; client_->cancel(); } else { - // If the filter doesn't have a outstanding limit request (made during decodeHeaders) and has descriptors, - // then we can apply the rate limit on stream done if the config allows it. + // If the filter doesn't have a outstanding limit request (made during decodeHeaders) and has + // descriptors, then we can apply the rate limit on stream done if the config allows it. if (config_->applyOnStreamDone() && !descriptors_.empty()) { client_->cancel(); // Clears the internal state of the client, so that we can reuse it. client_->limit(*this, getDomain(), descriptors_, Tracing::NullSpan::instance(), absl::nullopt, diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index ede30e08b690e..236c358a0c280 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -159,7 +159,8 @@ class FilterConfig { return Http::Code::InternalServerError; } - // Thread local storage for destory pending Filter by a hash map of shared pointers. + // Only used when apply_on_stream_done_ is true to hold the pending filters to outlive the + // OnDestroy callback. ThreadLocal::TypedSlotPtr pending_filter_slot_ = nullptr; const std::string domain_; From c812eecab130eef44680c13e2ad0c2a2a8444cbc Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 11 Dec 2024 23:59:35 +0000 Subject: [PATCH 14/27] adds tests Signed-off-by: Takeshi Yoneda --- .../filters/http/ratelimit/ratelimit.cc | 6 +-- .../filters/http/ratelimit/ratelimit.h | 36 +++++++------- .../filters/http/ratelimit/ratelimit_test.cc | 49 ++++++++++++++++--- 3 files changed, 62 insertions(+), 29 deletions(-) diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 9360919feb75b..ecf4ac1358db2 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -173,7 +173,7 @@ void Filter::onDestroy() { state_ = State::PendingReuqestOnStreamDone; // Since this filter is being destroyed, we need to keep the client alive until the request // is complete. So we add this filter to the destroy pending list at the filter config level. - config_->addDestroyPendingFilter(shared_from_this()); + config_->destroyPendingFilters().add(shared_from_this()); } } } @@ -188,7 +188,7 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, // Since this filter is already destroyed from HCM perspective, there's nothing to do here. // Simply remove it from the destroy pending list which in turn will release the filter shared // pointer. - config_->removeDestroyPendingFilter(*this); + config_->destroyPendingFilters().remove(*this); return; } state_ = State::Complete; @@ -351,7 +351,7 @@ std::string Filter::getDomain() { return config_->domain(); } -DestroyPendingFilterThreadLocal::~DestroyPendingFilterThreadLocal() { +DestroyPendingFiltersThreadLocal::~DestroyPendingFiltersThreadLocal() { for (const auto& filter : map_) { filter.second->client()->cancel(); } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 236c358a0c280..3cec828e9b858 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -41,24 +41,27 @@ using FilterSharedPtr = std::shared_ptr; /** * Thread local storage for destroy pending filters. */ -class DestroyPendingFilterThreadLocal : public ThreadLocal::ThreadLocalObject { +class DestroyPendingFiltersThreadLocal : public ThreadLocal::ThreadLocalObject { public: - DestroyPendingFilterThreadLocal() = default; + DestroyPendingFiltersThreadLocal() = default; // When this class is destructed, it will cancel all the pending filters and release the shared // pointers. - ~DestroyPendingFilterThreadLocal() override; + ~DestroyPendingFiltersThreadLocal() override; /** * Add the filter to the destroy pending list. * @param filter the filter to add. */ - void addDestroyPendingFilter(FilterSharedPtr filter) { - map_.emplace(filter.get(), std::move(filter)); - } + void add(FilterSharedPtr filter) { map_.emplace(filter.get(), std::move(filter)); } /** * Remove the filter from the destroy pending list. * @param filter the const reference to the filter to remove. */ - void removeDestroyPendingFilter(const Filter& filter) { map_.erase(&filter); } + void remove(const Filter& filter) { map_.erase(&filter); } + + /** + * @return the size of the destroy pending list. + */ + size_t sizeForTesting() const { return map_.size(); } private: // We use a raw pointer as the key to be able to remove filters when the callback is called where @@ -97,10 +100,10 @@ class FilterConfig { apply_on_stream_done_(config.apply_on_stream_done()) { if (config.apply_on_stream_done()) { pending_filter_slot_ = - ThreadLocal::TypedSlot::makeUnique(tls_allocator); + ThreadLocal::TypedSlot::makeUnique(tls_allocator); pending_filter_slot_->set( - [](Event::Dispatcher&) -> std::shared_ptr { - return std::make_shared(); + [](Event::Dispatcher&) -> std::shared_ptr { + return std::make_shared(); }); } } @@ -122,13 +125,10 @@ class FilterConfig { const Router::HeaderParser& responseHeadersParser() const { return *response_headers_parser_; } Http::Code statusOnError() const { return status_on_error_; } bool applyOnStreamDone() const { return apply_on_stream_done_; } - void addDestroyPendingFilter(FilterSharedPtr filter) { - ASSERT(pending_filter_slot_); - (*pending_filter_slot_)->addDestroyPendingFilter(std::move(filter)); - } - void removeDestroyPendingFilter(const Filter& filter) { + + DestroyPendingFiltersThreadLocal& destroyPendingFilters() { ASSERT(pending_filter_slot_); - (*pending_filter_slot_)->removeDestroyPendingFilter(filter); + return *(*pending_filter_slot_); } private: @@ -161,7 +161,7 @@ class FilterConfig { // Only used when apply_on_stream_done_ is true to hold the pending filters to outlive the // OnDestroy callback. - ThreadLocal::TypedSlotPtr pending_filter_slot_ = nullptr; + ThreadLocal::TypedSlotPtr pending_filter_slot_ = nullptr; const std::string domain_; const uint64_t stage_; @@ -209,7 +209,7 @@ class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { */ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::RequestCallbacks, - std::enable_shared_from_this { + public std::enable_shared_from_this { public: Filter(FilterConfigSharedPtr config, Filters::Common::RateLimit::ClientPtr&& client) : config_(config), client_(std::move(client)) {} diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 047e190f914cb..92608c9fc76ad 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -60,7 +60,7 @@ class HttpRateLimitFilterTest : public testing::Test { runtime_, http_context_, thread_local_); client_ = new Filters::Common::RateLimit::MockClient(); - filter_ = std::make_unique(config_, Filters::Common::RateLimit::ClientPtr{client_}); + filter_ = std::make_shared(config_, Filters::Common::RateLimit::ClientPtr{client_}); filter_->setDecoderFilterCallbacks(filter_callbacks_); filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear(); filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.emplace_back( @@ -96,6 +96,11 @@ class HttpRateLimitFilterTest : public testing::Test { domain: foo )EOF"; + const std::string filter_config_apply_on_stream_done_ = R"EOF( + domain: foo + apply_on_stream_done: true + )EOF"; + const std::string rate_limited_status_config_ = R"EOF( domain: foo rate_limited_status: @@ -133,7 +138,7 @@ class HttpRateLimitFilterTest : public testing::Test { Buffer::OwnedImpl response_data_; NiceMock stats_store_; FilterConfigSharedPtr config_; - std::unique_ptr filter_; + std::shared_ptr filter_; NiceMock runtime_; NiceMock thread_local_; NiceMock route_rate_limit_; @@ -262,12 +267,12 @@ TEST_F(HttpRateLimitFilterTest, OkResponse) { } TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { - setUpTest(filter_config_); + setUpTest(filter_config_apply_on_stream_done_); InSequence s; filter_callbacks_.stream_info_.filter_state_->setData( "envoy.ratelimit.hits_addend", std::make_unique(5), - StreamInfo::FilterState::StateType::ReadOnly); + StreamInfo::FilterState::StateType::Mutable); EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _)) @@ -276,10 +281,9 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { EXPECT_CALL(filter_callbacks_.route_->virtual_host_.rate_limit_policy_, getApplicableRateLimit(0)); - EXPECT_CALL(*client_, limit(_, "foo", - testing::ContainerEq(std::vector{ - {{{"descriptor_key", "descriptor_value"}}}}), - _, _, 5)) + const auto descriptors = + std::vector{{{{"descriptor_key", "descriptor_value"}}}}; + EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(descriptors), _, _, 5)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -306,6 +310,23 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); + + // Test the behavior when apply_on_stream_done is true. + testing::Mock::VerifyAndClearExpectations(client_); + filter_callbacks_.stream_info_.filter_state_->setData( + // Ensures that addend can be set differently than the request path. + "envoy.ratelimit.hits_addend", std::make_unique(100), + StreamInfo::FilterState::StateType::Mutable); + // Before making the call, the client should be canceled. + EXPECT_CALL(*client_, cancel()); + EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(descriptors), _, _, 100)); + filter_->onDestroy(); + // At this point, the filter should be added to the pending list. + EXPECT_EQ(config_->destroyPendingFilters().sizeForTesting(), 1); + // Calling complete callbacke should make the filter removed from the destroy pending map. + filter_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, nullptr, "", + nullptr); + EXPECT_EQ(config_->destroyPendingFilters().sizeForTesting(), 0); } TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { @@ -1730,6 +1751,18 @@ TEST(ObjectFactory, HitsAddend) { EXPECT_EQ(hits_addend, object->serializeAsString()); } +TEST_F(HttpRateLimitFilterTest, DestroyPendingFiltersThreadLocal_Destructor) { + setUpTest(filter_config_apply_on_stream_done_); + config_->destroyPendingFilters().add(filter_); + EXPECT_EQ(1, config_->destroyPendingFilters().sizeForTesting()); + filter_.reset(); + + // The client should be reset when the thread local is destroyed, which is done by the + // destructor of the config. During the destruction, client_ cancel() should be called. + EXPECT_CALL(*client_, cancel()); + config_.reset(); +} + } // namespace } // namespace RateLimitFilter } // namespace HttpFilters From d4448c401c4e173fc56ba252ef3e1ebb262c38d9 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Thu, 12 Dec 2024 00:03:58 +0000 Subject: [PATCH 15/27] ok Signed-off-by: Takeshi Yoneda --- test/extensions/filters/http/ratelimit/ratelimit_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 92608c9fc76ad..8620df7fc53bf 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -323,7 +323,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { filter_->onDestroy(); // At this point, the filter should be added to the pending list. EXPECT_EQ(config_->destroyPendingFilters().sizeForTesting(), 1); - // Calling complete callbacke should make the filter removed from the destroy pending map. + // Calling complete callback should make the filter removed from the destroy pending map. filter_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, nullptr, "", nullptr); EXPECT_EQ(config_->destroyPendingFilters().sizeForTesting(), 0); From 7b9bd5754f156f36516d2059eed13e55e59a263b Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Thu, 12 Dec 2024 01:09:50 +0000 Subject: [PATCH 16/27] simplifies Signed-off-by: Takeshi Yoneda --- .../filters/http/ratelimit/config.cc | 6 +- .../filters/http/ratelimit/ratelimit.cc | 28 +++---- .../filters/http/ratelimit/ratelimit.h | 84 ++++++------------- .../filters/http/ratelimit/ratelimit_test.cc | 34 +++----- 4 files changed, 50 insertions(+), 102 deletions(-) diff --git a/source/extensions/filters/http/ratelimit/config.cc b/source/extensions/filters/http/ratelimit/config.cc index a9dfb12117ef1..59711a014b82a 100644 --- a/source/extensions/filters/http/ratelimit/config.cc +++ b/source/extensions/filters/http/ratelimit/config.cc @@ -23,9 +23,9 @@ Http::FilterFactoryCb RateLimitFilterConfig::createFilterFactoryFromProtoTyped( auto& server_context = context.serverFactoryContext(); ASSERT(!proto_config.domain().empty()); - FilterConfigSharedPtr filter_config(new FilterConfig( - proto_config, server_context.localInfo(), context.scope(), server_context.runtime(), - server_context.httpContext(), server_context.threadLocal())); + FilterConfigSharedPtr filter_config(new FilterConfig(proto_config, server_context.localInfo(), + context.scope(), server_context.runtime(), + server_context.httpContext())); const std::chrono::milliseconds timeout = std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(proto_config, timeout, 20)); diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index ecf4ac1358db2..f6767102d8013 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -168,12 +168,12 @@ void Filter::onDestroy() { // descriptors, then we can apply the rate limit on stream done if the config allows it. if (config_->applyOnStreamDone() && !descriptors_.empty()) { client_->cancel(); // Clears the internal state of the client, so that we can reuse it. - client_->limit(*this, getDomain(), descriptors_, Tracing::NullSpan::instance(), absl::nullopt, - getHitAddend()); - state_ = State::PendingReuqestOnStreamDone; // Since this filter is being destroyed, we need to keep the client alive until the request - // is complete. So we add this filter to the destroy pending list at the filter config level. - config_->destroyPendingFilters().add(shared_from_this()); + // is complete. + auto callback = new OnStreamDoneCallBack(std::move(client_)); + auto& ref = *callback; + callback->client()->limit(ref, getDomain(), descriptors_, Tracing::NullSpan::instance(), + absl::nullopt, getHitAddend()); } } } @@ -184,13 +184,6 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, Http::RequestHeaderMapPtr&& request_headers_to_add, const std::string& response_body, Filters::Common::RateLimit::DynamicMetadataPtr&& dynamic_metadata) { - if (state_ == State::PendingReuqestOnStreamDone) { - // Since this filter is already destroyed from HCM perspective, there's nothing to do here. - // Simply remove it from the destroy pending list which in turn will release the filter shared - // pointer. - config_->destroyPendingFilters().remove(*this); - return; - } state_ = State::Complete; response_headers_to_add_ = std::move(response_headers_to_add); Http::HeaderMapPtr req_headers_to_add = std::move(request_headers_to_add); @@ -351,11 +344,12 @@ std::string Filter::getDomain() { return config_->domain(); } -DestroyPendingFiltersThreadLocal::~DestroyPendingFiltersThreadLocal() { - for (const auto& filter : map_) { - filter.second->client()->cancel(); - } - map_.clear(); +void OnStreamDoneCallBack::complete(Filters::Common::RateLimit::LimitStatus, + Filters::Common::RateLimit::DescriptorStatusListPtr&&, + Http::ResponseHeaderMapPtr&&, Http::RequestHeaderMapPtr&&, + const std::string&, + Filters::Common::RateLimit::DynamicMetadataPtr&&) { + delete this; } } // namespace RateLimitFilter diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 3cec828e9b858..1931820364eb7 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -35,40 +35,6 @@ enum class FilterRequestType { Internal, External, Both }; */ enum class VhRateLimitOptions { Override, Include, Ignore }; -class Filter; -using FilterSharedPtr = std::shared_ptr; - -/** - * Thread local storage for destroy pending filters. - */ -class DestroyPendingFiltersThreadLocal : public ThreadLocal::ThreadLocalObject { -public: - DestroyPendingFiltersThreadLocal() = default; - // When this class is destructed, it will cancel all the pending filters and release the shared - // pointers. - ~DestroyPendingFiltersThreadLocal() override; - /** - * Add the filter to the destroy pending list. - * @param filter the filter to add. - */ - void add(FilterSharedPtr filter) { map_.emplace(filter.get(), std::move(filter)); } - /** - * Remove the filter from the destroy pending list. - * @param filter the const reference to the filter to remove. - */ - void remove(const Filter& filter) { map_.erase(&filter); } - - /** - * @return the size of the destroy pending list. - */ - size_t sizeForTesting() const { return map_.size(); } - -private: - // We use a raw pointer as the key to be able to remove filters when the callback is called where - // shared_from_this() is not available. - absl::flat_hash_map map_ = {}; -}; - /** * Global configuration for the HTTP rate limit filter. */ @@ -76,8 +42,7 @@ class FilterConfig { public: FilterConfig(const envoy::extensions::filters::http::ratelimit::v3::RateLimit& config, const LocalInfo::LocalInfo& local_info, Stats::Scope& scope, - Runtime::Loader& runtime, Http::Context& http_context, - ThreadLocal::SlotAllocator& tls_allocator) + Runtime::Loader& runtime, Http::Context& http_context) : domain_(config.domain()), stage_(static_cast(config.stage())), request_type_(config.request_type().empty() ? stringToType("both") : stringToType(config.request_type())), @@ -97,16 +62,7 @@ class FilterConfig { Envoy::Router::HeaderParser::configure(config.response_headers_to_add()), Router::HeaderParserPtr)), status_on_error_(toRatelimitServerErrorCode(config.status_on_error().code())), - apply_on_stream_done_(config.apply_on_stream_done()) { - if (config.apply_on_stream_done()) { - pending_filter_slot_ = - ThreadLocal::TypedSlot::makeUnique(tls_allocator); - pending_filter_slot_->set( - [](Event::Dispatcher&) -> std::shared_ptr { - return std::make_shared(); - }); - } - } + apply_on_stream_done_(config.apply_on_stream_done()) {} const std::string& domain() const { return domain_; } const LocalInfo::LocalInfo& localInfo() const { return local_info_; } uint64_t stage() const { return stage_; } @@ -126,11 +82,6 @@ class FilterConfig { Http::Code statusOnError() const { return status_on_error_; } bool applyOnStreamDone() const { return apply_on_stream_done_; } - DestroyPendingFiltersThreadLocal& destroyPendingFilters() { - ASSERT(pending_filter_slot_); - return *(*pending_filter_slot_); - } - private: static FilterRequestType stringToType(const std::string& request_type) { if (request_type == "internal") { @@ -159,10 +110,6 @@ class FilterConfig { return Http::Code::InternalServerError; } - // Only used when apply_on_stream_done_ is true to hold the pending filters to outlive the - // OnDestroy callback. - ThreadLocal::TypedSlotPtr pending_filter_slot_ = nullptr; - const std::string domain_; const uint64_t stage_; const FilterRequestType request_type_; @@ -207,9 +154,7 @@ class FilterConfigPerRoute : public Router::RouteSpecificFilterConfig { * HTTP rate limit filter. Depending on the route configuration, this filter calls the global * rate limiting service before allowing further filter iteration. */ -class Filter : public Http::StreamFilter, - public Filters::Common::RateLimit::RequestCallbacks, - public std::enable_shared_from_this { +class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::RequestCallbacks { public: Filter(FilterConfigSharedPtr config, Filters::Common::RateLimit::ClientPtr&& client) : config_(config), client_(std::move(client)) {} @@ -256,7 +201,7 @@ class Filter : public Http::StreamFilter, Http::Context& httpContext() { return config_->httpContext(); } - enum class State { NotStarted, Calling, Complete, Responded, PendingReuqestOnStreamDone }; + enum class State { NotStarted, Calling, Complete, Responded }; FilterConfigSharedPtr config_; Filters::Common::RateLimit::ClientPtr client_; @@ -270,6 +215,27 @@ class Filter : public Http::StreamFilter, std::vector descriptors_{}; }; +/** + * This implements the rate limit callback that outlives the filter holding the client. + * On completion, it deletes itself. + */ +class OnStreamDoneCallBack : public Filters::Common::RateLimit::RequestCallbacks { +public: + OnStreamDoneCallBack(Filters::Common::RateLimit::ClientPtr client) : client_(std::move(client)) {} + ~OnStreamDoneCallBack() override = default; + + // RateLimit::RequestCallbacks + void complete(Filters::Common::RateLimit::LimitStatus, + Filters::Common::RateLimit::DescriptorStatusListPtr&&, Http::ResponseHeaderMapPtr&&, + Http::RequestHeaderMapPtr&&, const std::string&, + Filters::Common::RateLimit::DynamicMetadataPtr&&) override; + + Filters::Common::RateLimit::ClientPtr& client() { return client_; } + +private: + Filters::Common::RateLimit::ClientPtr client_; +}; + } // namespace RateLimitFilter } // namespace HttpFilters } // namespace Extensions diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 8620df7fc53bf..8fb46fe720a36 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -1,3 +1,4 @@ +#include #include #include #include @@ -57,10 +58,10 @@ class HttpRateLimitFilterTest : public testing::Test { TestUtility::loadFromYaml(yaml, proto_config); config_ = std::make_shared(proto_config, local_info_, *stats_store_.rootScope(), - runtime_, http_context_, thread_local_); + runtime_, http_context_); client_ = new Filters::Common::RateLimit::MockClient(); - filter_ = std::make_shared(config_, Filters::Common::RateLimit::ClientPtr{client_}); + filter_ = std::make_unique(config_, Filters::Common::RateLimit::ClientPtr{client_}); filter_->setDecoderFilterCallbacks(filter_callbacks_); filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.clear(); filter_callbacks_.route_->route_entry_.rate_limit_policy_.rate_limit_policy_entry_.emplace_back( @@ -138,7 +139,7 @@ class HttpRateLimitFilterTest : public testing::Test { Buffer::OwnedImpl response_data_; NiceMock stats_store_; FilterConfigSharedPtr config_; - std::shared_ptr filter_; + std::unique_ptr filter_; NiceMock runtime_; NiceMock thread_local_; NiceMock route_rate_limit_; @@ -317,16 +318,15 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { // Ensures that addend can be set differently than the request path. "envoy.ratelimit.hits_addend", std::make_unique(100), StreamInfo::FilterState::StateType::Mutable); - // Before making the call, the client should be canceled. EXPECT_CALL(*client_, cancel()); - EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(descriptors), _, _, 100)); + EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(descriptors), _, _, 100)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); filter_->onDestroy(); - // At this point, the filter should be added to the pending list. - EXPECT_EQ(config_->destroyPendingFilters().sizeForTesting(), 1); - // Calling complete callback should make the filter removed from the destroy pending map. - filter_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, nullptr, "", - nullptr); - EXPECT_EQ(config_->destroyPendingFilters().sizeForTesting(), 0); + request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, + nullptr, "", nullptr); } TEST_F(HttpRateLimitFilterTest, OkResponseWithHeaders) { @@ -1751,18 +1751,6 @@ TEST(ObjectFactory, HitsAddend) { EXPECT_EQ(hits_addend, object->serializeAsString()); } -TEST_F(HttpRateLimitFilterTest, DestroyPendingFiltersThreadLocal_Destructor) { - setUpTest(filter_config_apply_on_stream_done_); - config_->destroyPendingFilters().add(filter_); - EXPECT_EQ(1, config_->destroyPendingFilters().sizeForTesting()); - filter_.reset(); - - // The client should be reset when the thread local is destroyed, which is done by the - // destructor of the config. During the destruction, client_ cancel() should be called. - EXPECT_CALL(*client_, cancel()); - config_.reset(); -} - } // namespace } // namespace RateLimitFilter } // namespace HttpFilters From 011a816bbfef556b534795228ee9267df1aced2c Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Thu, 12 Dec 2024 01:14:24 +0000 Subject: [PATCH 17/27] more Signed-off-by: Takeshi Yoneda --- source/extensions/filters/http/ratelimit/ratelimit.h | 2 -- test/extensions/filters/http/ratelimit/ratelimit_test.cc | 1 - 2 files changed, 3 deletions(-) diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 1931820364eb7..d6554b0dc50a1 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -186,8 +186,6 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req const std::string& response_body, Filters::Common::RateLimit::DynamicMetadataPtr&& dynamic_metadata) override; - Filters::Common::RateLimit::ClientPtr& client() { return client_; } - private: void initiateCall(const Http::RequestHeaderMap& headers); void populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_limit_policy, diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 8fb46fe720a36..f622d16d58282 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -1,4 +1,3 @@ -#include #include #include #include From 57564f4525416df1ac7527ebc1585c5ce90d9c2a Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Thu, 12 Dec 2024 18:03:45 +0000 Subject: [PATCH 18/27] review: flag on descriptor/policy level Signed-off-by: Takeshi Yoneda --- .../config/route/v3/route_components.proto | 18 ++++++++++ .../http/ratelimit/v3/rate_limit.proto | 22 +----------- envoy/router/router_ratelimit.h | 5 +++ source/common/router/router_ratelimit.cc | 3 +- source/common/router/router_ratelimit.h | 2 ++ .../filters/http/ratelimit/ratelimit.cc | 34 ++++++++++++------- .../filters/http/ratelimit/ratelimit.h | 11 +++--- .../filters/http/ratelimit/ratelimit_test.cc | 21 ++++++------ test/mocks/router/mocks.h | 1 + 9 files changed, 67 insertions(+), 50 deletions(-) diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index ce781d100c9c4..a3efeafd1c202 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1868,6 +1868,7 @@ message VirtualCluster { // Global rate limiting :ref:`architecture overview `. // Also applies to Local rate limiting :ref:`using descriptors `. +// [#next-free-field: 6] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; @@ -2193,6 +2194,23 @@ message RateLimit { // from metadata, no override is provided. See :ref:`rate limit override // ` for more information. Override limit = 4; + + // If true, the rate limit request will be applied when the stream completes. The default value is false. + // This is useful when the rate limit budget needs to reflect the response context that is not available + // on the request path. + // + // For example, let's say the upstream service calculates the usage statistics and returns them in the response body + // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. + // Combined with another filter that can set the desired addend based on the response (e.g. Lua filter), + // this can be used to subtract the usage statistics from the rate limit budget. + // + // A rate limit applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this config. + // In other words, the current request won't be blocked when this is true, but the budget will be updated for the subsequent + // requests based on the action with this field set to true. Users should ensure that the rate limit is enforced by the actions + // applied on the request path, i.e. the ones with this field set to false. + // + // Currently, this is only supported by the HTTP global rate filter. + bool apply_on_stream_done = 5; } // .. attention:: diff --git a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index 0a71d7730dd96..3e33536b228a5 100644 --- a/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -25,7 +25,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 15] +// [#next-free-field: 14] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; @@ -134,26 +134,6 @@ message RateLimit { // Optional additional prefix to use when emitting statistics. This allows to distinguish // emitted statistics between configured ``ratelimit`` filters in an HTTP filter chain. string stat_prefix = 13; - - // If true, rate limit requests will also be sent to the rate limit service when the stream completes. - // This is useful when the rate limit budget needs to reflect the response context that is not available - // on the request path. - // - // On the stream completion, the filter will reuse the exact same descriptors matched during the request path. - // In other words, the descriptors are not recalculated on the stream completion, but the rate limit requests - // are sent with the same descriptors as the original request sent during the request path. - // For example, request header matching descriptors are available on the stream completion. - // - // For example, let's say the upstream service calculates the usage statistics, returns them in the response body - // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. - // Combined with another filter that can set ``envoy.ratelimit.hits_addend`` based on the response (e.g. Lua filter), - // this can be used to subtract the usage statistics from the rate limit budget. - // - // The rate limit requests sent on the stream completion are "fire-and-forget" by nature, and rate limit is not enforced - // on the current HTTP stream being completed. The filter will only update the budget for the subsequent requests at - // that point. Hence the effect of the rate limit requests made during the stream completion is not visible in the current - // but only in the subsequent requests. - bool apply_on_stream_done = 14; } // Global rate limiting :ref:`architecture overview `. diff --git a/envoy/router/router_ratelimit.h b/envoy/router/router_ratelimit.h index 199b3248130e6..7b3dcdbedc72f 100644 --- a/envoy/router/router_ratelimit.h +++ b/envoy/router/router_ratelimit.h @@ -48,6 +48,11 @@ class RateLimitPolicyEntry { */ virtual const std::string& disableKey() const PURE; + /** + * @return true if this rate limit policy should be applied on stream done. + */ + virtual bool applyOnStreamDone() const PURE; + /** * Potentially populate the descriptor array with new descriptors to query. * @param descriptors supplies the descriptor array to optionally fill. diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index 77c285a85a24d..386374d397254 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -267,7 +267,8 @@ RateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl( const envoy::config::route::v3::RateLimit& config, Server::Configuration::CommonFactoryContext& context, absl::Status& creation_status) : disable_key_(config.disable_key()), - stage_(static_cast(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, stage, 0))) { + stage_(static_cast(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, stage, 0))), + apply_on_stream_done_(config.apply_on_stream_done()) { for (const auto& action : config.actions()) { switch (action.action_specifier_case()) { case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kSourceCluster: diff --git a/source/common/router/router_ratelimit.h b/source/common/router/router_ratelimit.h index 3fb5149a4cc25..0d2774e443c4e 100644 --- a/source/common/router/router_ratelimit.h +++ b/source/common/router/router_ratelimit.h @@ -256,12 +256,14 @@ class RateLimitPolicyEntryImpl : public RateLimitPolicyEntry { const std::string& local_service_cluster, const Http::RequestHeaderMap&, const StreamInfo::StreamInfo& info) const override; + bool applyOnStreamDone() const override { return apply_on_stream_done_; } private: const std::string disable_key_; uint64_t stage_; std::vector actions_; absl::optional limit_override_ = absl::nullopt; + bool apply_on_stream_done_; }; /** diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index f6767102d8013..e374b5f967c14 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -66,9 +66,11 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { return; } + std::vector descriptors; + const Router::RouteEntry* route_entry = route->routeEntry(); // Get all applicable rate limit policy entries for the route. - populateRateLimitDescriptors(route_entry->rateLimitPolicy(), descriptors_, headers); + populateRateLimitDescriptors(route_entry->rateLimitPolicy(), descriptors, headers); VhRateLimitOptions vh_rate_limit_option = getVirtualHostRateLimitOption(route); @@ -76,19 +78,19 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { case VhRateLimitOptions::Ignore: break; case VhRateLimitOptions::Include: - populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors_, headers); + populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors, headers); break; case VhRateLimitOptions::Override: if (route_entry->rateLimitPolicy().empty()) { - populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors_, headers); + populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors, headers); } break; } - if (!descriptors_.empty()) { + if (!descriptors.empty()) { state_ = State::Calling; initiating_call_ = true; - client_->limit(*this, getDomain(), descriptors_, callbacks_->activeSpan(), + client_->limit(*this, getDomain(), descriptors, callbacks_->activeSpan(), callbacks_->streamInfo(), getHitAddend()); initiating_call_ = false; } @@ -164,16 +166,16 @@ void Filter::onDestroy() { state_ = State::Complete; client_->cancel(); } else { - // If the filter doesn't have a outstanding limit request (made during decodeHeaders) and has + // If the filter doesn't have an outstanding limit request (made during decodeHeaders) and has // descriptors, then we can apply the rate limit on stream done if the config allows it. - if (config_->applyOnStreamDone() && !descriptors_.empty()) { + if (!descriptors_apply_on_stream_done_.empty()) { client_->cancel(); // Clears the internal state of the client, so that we can reuse it. // Since this filter is being destroyed, we need to keep the client alive until the request // is complete. auto callback = new OnStreamDoneCallBack(std::move(client_)); auto& ref = *callback; - callback->client()->limit(ref, getDomain(), descriptors_, Tracing::NullSpan::instance(), - absl::nullopt, getHitAddend()); + callback->client()->limit(ref, getDomain(), descriptors_apply_on_stream_done_, + Tracing::NullSpan::instance(), absl::nullopt, getHitAddend()); } } } @@ -272,7 +274,7 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, void Filter::populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_limit_policy, std::vector& descriptors, - const Http::RequestHeaderMap& headers) const { + const Http::RequestHeaderMap& headers) { for (const Router::RateLimitPolicyEntry& rate_limit : rate_limit_policy.getApplicableRateLimit(config_->stage())) { const std::string& disable_key = rate_limit.disableKey(); @@ -281,8 +283,16 @@ void Filter::populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_li fmt::format("ratelimit.{}.http_filter_enabled", disable_key), 100)) { continue; } - rate_limit.populateDescriptors(descriptors, config_->localInfo().clusterName(), headers, - callbacks_->streamInfo()); + if (rate_limit.applyOnStreamDone()) { + // If the rate limit policy is to be applied on stream done, we populate the descriptors in a + // separate vector. This vector will be used to apply the rate limit on stream done. + rate_limit.populateDescriptors(descriptors_apply_on_stream_done_, + config_->localInfo().clusterName(), headers, + callbacks_->streamInfo()); + } else { + rate_limit.populateDescriptors(descriptors, config_->localInfo().clusterName(), headers, + callbacks_->streamInfo()); + } } } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index d6554b0dc50a1..ef2b2f1b61dcd 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -61,8 +61,7 @@ class FilterConfig { response_headers_parser_(THROW_OR_RETURN_VALUE( Envoy::Router::HeaderParser::configure(config.response_headers_to_add()), Router::HeaderParserPtr)), - status_on_error_(toRatelimitServerErrorCode(config.status_on_error().code())), - apply_on_stream_done_(config.apply_on_stream_done()) {} + status_on_error_(toRatelimitServerErrorCode(config.status_on_error().code())) {} const std::string& domain() const { return domain_; } const LocalInfo::LocalInfo& localInfo() const { return local_info_; } uint64_t stage() const { return stage_; } @@ -80,7 +79,6 @@ class FilterConfig { Http::Code rateLimitedStatus() { return rate_limited_status_; } const Router::HeaderParser& responseHeadersParser() const { return *response_headers_parser_; } Http::Code statusOnError() const { return status_on_error_; } - bool applyOnStreamDone() const { return apply_on_stream_done_; } private: static FilterRequestType stringToType(const std::string& request_type) { @@ -125,7 +123,6 @@ class FilterConfig { const Http::Code rate_limited_status_; Router::HeaderParserPtr response_headers_parser_; const Http::Code status_on_error_; - const bool apply_on_stream_done_; }; using FilterConfigSharedPtr = std::shared_ptr; @@ -190,7 +187,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req void initiateCall(const Http::RequestHeaderMap& headers); void populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_limit_policy, std::vector& descriptors, - const Http::RequestHeaderMap& headers) const; + const Http::RequestHeaderMap& headers); void populateResponseHeaders(Http::HeaderMap& response_headers, bool from_local_reply); void appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add); double getHitAddend(); @@ -210,7 +207,9 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req bool initiating_call_{}; Http::ResponseHeaderMapPtr response_headers_to_add_; Http::RequestHeaderMap* request_headers_{}; - std::vector descriptors_{}; + // Holds the descriptors that should be applied on stream done which will be populated during + // decodeHeaders. + std::vector descriptors_apply_on_stream_done_{}; }; /** diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index f622d16d58282..3009fc0f719ed 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -96,11 +96,6 @@ class HttpRateLimitFilterTest : public testing::Test { domain: foo )EOF"; - const std::string filter_config_apply_on_stream_done_ = R"EOF( - domain: foo - apply_on_stream_done: true - )EOF"; - const std::string rate_limited_status_config_ = R"EOF( domain: foo rate_limited_status: @@ -267,7 +262,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponse) { } TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { - setUpTest(filter_config_apply_on_stream_done_); + setUpTest(filter_config_); InSequence s; filter_callbacks_.stream_info_.filter_state_->setData( @@ -281,9 +276,15 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { EXPECT_CALL(filter_callbacks_.route_->virtual_host_.rate_limit_policy_, getApplicableRateLimit(0)); - const auto descriptors = - std::vector{{{{"descriptor_key", "descriptor_value"}}}}; - EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(descriptors), _, _, 5)) + // Make sure that descriptor_two_ will be applied on stream done. + EXPECT_CALL(vh_rate_limit_, applyOnStreamDone()).WillOnce(Return(true)); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _)) + .WillOnce(SetArgReferee<0>(descriptor_two_)); + + EXPECT_CALL(*client_, limit(_, "foo", + testing::ContainerEq(std::vector{ + {{{"descriptor_key", "descriptor_value"}}}}), + _, _, 5)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; @@ -318,7 +319,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { "envoy.ratelimit.hits_addend", std::make_unique(100), StreamInfo::FilterState::StateType::Mutable); EXPECT_CALL(*client_, cancel()); - EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(descriptors), _, _, 100)) + EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(descriptor_two_), _, _, 100)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 9c21895dcad9a..e4e3ac4093571 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -250,6 +250,7 @@ class MockRateLimitPolicyEntry : public RateLimitPolicyEntry { const std::string& local_service_cluster, const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& info), (const)); + MOCK_METHOD(bool, applyOnStreamDone, (), (const)); uint64_t stage_{}; std::string disable_key_; From 146af55bbc196afaa992769e1be97d30574f5f3b Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Thu, 12 Dec 2024 18:06:01 +0000 Subject: [PATCH 19/27] more Signed-off-by: Takeshi Yoneda --- test/extensions/filters/http/ratelimit/BUILD | 1 - test/extensions/filters/http/ratelimit/ratelimit_test.cc | 2 -- 2 files changed, 3 deletions(-) diff --git a/test/extensions/filters/http/ratelimit/BUILD b/test/extensions/filters/http/ratelimit/BUILD index be0479341c9b1..db8ba366d5c5e 100644 --- a/test/extensions/filters/http/ratelimit/BUILD +++ b/test/extensions/filters/http/ratelimit/BUILD @@ -29,7 +29,6 @@ envoy_extension_cc_test( "//test/mocks/local_info:local_info_mocks", "//test/mocks/ratelimit:ratelimit_mocks", "//test/mocks/runtime:runtime_mocks", - "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/tracing:tracing_mocks", "//test/test_common:utility_lib", "@envoy_api//envoy/extensions/filters/http/ratelimit/v3:pkg_cc_proto", diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index 3009fc0f719ed..c9c3f0c091601 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -18,7 +18,6 @@ #include "test/mocks/local_info/mocks.h" #include "test/mocks/ratelimit/mocks.h" #include "test/mocks/runtime/mocks.h" -#include "test/mocks/thread_local/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" @@ -135,7 +134,6 @@ class HttpRateLimitFilterTest : public testing::Test { FilterConfigSharedPtr config_; std::unique_ptr filter_; NiceMock runtime_; - NiceMock thread_local_; NiceMock route_rate_limit_; NiceMock vh_rate_limit_; std::vector descriptor_{{{{"descriptor_key", "descriptor_value"}}}}; From 702e427b258e73405533549db3661a3765a5a5e5 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Tue, 17 Dec 2024 23:18:29 +0000 Subject: [PATCH 20/27] reviews: apply comments Signed-off-by: Takeshi Yoneda --- source/extensions/filters/common/ratelimit/ratelimit_impl.cc | 4 ++-- source/extensions/filters/http/ratelimit/ratelimit.cc | 5 ++--- source/extensions/filters/http/ratelimit/ratelimit.h | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 4b8b1866d005d..23ef3321a9d37 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -68,8 +68,8 @@ void GrpcClientImpl::limit(RequestCallbacks& callbacks, const std::string& domai createRequest(request, domain, descriptors, hits_addend); auto options = Http::AsyncClient::RequestOptions().setTimeout(timeout_); - if (stream_info) { - options.setParentContext(Http::AsyncClient::ParentContext{&*stream_info}); + if (stream_info.has_value()) { + options.setParentContext(Http::AsyncClient::ParentContext{stream_info.ptr()}); } request_ = async_client_->send(service_method_, request, *this, parent_span, options); } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index e374b5f967c14..a9655f091cc84 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -173,9 +173,8 @@ void Filter::onDestroy() { // Since this filter is being destroyed, we need to keep the client alive until the request // is complete. auto callback = new OnStreamDoneCallBack(std::move(client_)); - auto& ref = *callback; - callback->client()->limit(ref, getDomain(), descriptors_apply_on_stream_done_, - Tracing::NullSpan::instance(), absl::nullopt, getHitAddend()); + callback->client().limit(*callback, getDomain(), descriptors_apply_on_stream_done_, + Tracing::NullSpan::instance(), absl::nullopt, getHitAddend()); } } } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index ef2b2f1b61dcd..20fcd316245a8 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -227,7 +227,7 @@ class OnStreamDoneCallBack : public Filters::Common::RateLimit::RequestCallbacks Http::RequestHeaderMapPtr&&, const std::string&, Filters::Common::RateLimit::DynamicMetadataPtr&&) override; - Filters::Common::RateLimit::ClientPtr& client() { return client_; } + Filters::Common::RateLimit::Client& client() { return *client_; } private: Filters::Common::RateLimit::ClientPtr client_; From f3c7b32c3a76ad841b52ae1b5807090c420a3d8b Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 18 Dec 2024 00:43:51 +0000 Subject: [PATCH 21/27] review: get descriptors in done Signed-off-by: Takeshi Yoneda --- .../filters/http/ratelimit/ratelimit.cc | 71 ++++++++++--------- .../filters/http/ratelimit/ratelimit.h | 14 ++-- .../filters/http/ratelimit/ratelimit_test.cc | 21 +++--- 3 files changed, 59 insertions(+), 47 deletions(-) diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index a9655f091cc84..09f10599523e7 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -1,6 +1,5 @@ #include "source/extensions/filters/http/ratelimit/ratelimit.h" -#include #include #include @@ -56,6 +55,20 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { return; } + std::vector descriptors; + populateRateLimitDescriptors(descriptors, headers, false); + if (!descriptors.empty()) { + state_ = State::Calling; + initiating_call_ = true; + client_->limit(*this, getDomain(), descriptors, callbacks_->activeSpan(), + callbacks_->streamInfo(), getHitAddend()); + initiating_call_ = false; + } +} + +void Filter::populateRateLimitDescriptors(std::vector& descriptors, + const Http::RequestHeaderMap& headers, + bool on_stream_done) { Router::RouteConstSharedPtr route = callbacks_->route(); if (!route || !route->routeEntry()) { return; @@ -66,11 +79,10 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { return; } - std::vector descriptors; - const Router::RouteEntry* route_entry = route->routeEntry(); // Get all applicable rate limit policy entries for the route. - populateRateLimitDescriptors(route_entry->rateLimitPolicy(), descriptors, headers); + populateRateLimitDescriptorsForPolicy(route_entry->rateLimitPolicy(), descriptors, headers, + on_stream_done); VhRateLimitOptions vh_rate_limit_option = getVirtualHostRateLimitOption(route); @@ -78,22 +90,16 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { case VhRateLimitOptions::Ignore: break; case VhRateLimitOptions::Include: - populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors, headers); + populateRateLimitDescriptorsForPolicy(route->virtualHost().rateLimitPolicy(), descriptors, + headers, on_stream_done); break; case VhRateLimitOptions::Override: if (route_entry->rateLimitPolicy().empty()) { - populateRateLimitDescriptors(route->virtualHost().rateLimitPolicy(), descriptors, headers); + populateRateLimitDescriptorsForPolicy(route->virtualHost().rateLimitPolicy(), descriptors, + headers, on_stream_done); } break; } - - if (!descriptors.empty()) { - state_ = State::Calling; - initiating_call_ = true; - client_->limit(*this, getDomain(), descriptors, callbacks_->activeSpan(), - callbacks_->streamInfo(), getHitAddend()); - initiating_call_ = false; - } } double Filter::getHitAddend() { @@ -167,14 +173,18 @@ void Filter::onDestroy() { client_->cancel(); } else { // If the filter doesn't have an outstanding limit request (made during decodeHeaders) and has - // descriptors, then we can apply the rate limit on stream done if the config allows it. - if (!descriptors_apply_on_stream_done_.empty()) { - client_->cancel(); // Clears the internal state of the client, so that we can reuse it. - // Since this filter is being destroyed, we need to keep the client alive until the request - // is complete. - auto callback = new OnStreamDoneCallBack(std::move(client_)); - callback->client().limit(*callback, getDomain(), descriptors_apply_on_stream_done_, - Tracing::NullSpan::instance(), absl::nullopt, getHitAddend()); + // at least one policiy with apply_on_stream_done=true, then we apply the rate limit here. + if (has_apply_on_stream_done_policy_) { + std::vector descriptors; + populateRateLimitDescriptors(descriptors, *request_headers_, true); + if (!descriptors.empty()) { + client_->cancel(); // Clears the internal state of the client, so that we can reuse it. + // Since this filter is being destroyed, we need to keep the client alive until the request + // is complete. + auto callback = new OnStreamDoneCallBack(std::move(client_)); + callback->client().limit(*callback, getDomain(), descriptors, Tracing::NullSpan::instance(), + absl::nullopt, getHitAddend()); + } } } } @@ -271,9 +281,10 @@ void Filter::complete(Filters::Common::RateLimit::LimitStatus status, } } -void Filter::populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_limit_policy, - std::vector& descriptors, - const Http::RequestHeaderMap& headers) { +void Filter::populateRateLimitDescriptorsForPolicy(const Router::RateLimitPolicy& rate_limit_policy, + std::vector& descriptors, + const Http::RequestHeaderMap& headers, + bool on_stream_done) { for (const Router::RateLimitPolicyEntry& rate_limit : rate_limit_policy.getApplicableRateLimit(config_->stage())) { const std::string& disable_key = rate_limit.disableKey(); @@ -282,13 +293,9 @@ void Filter::populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_li fmt::format("ratelimit.{}.http_filter_enabled", disable_key), 100)) { continue; } - if (rate_limit.applyOnStreamDone()) { - // If the rate limit policy is to be applied on stream done, we populate the descriptors in a - // separate vector. This vector will be used to apply the rate limit on stream done. - rate_limit.populateDescriptors(descriptors_apply_on_stream_done_, - config_->localInfo().clusterName(), headers, - callbacks_->streamInfo()); - } else { + const bool apply_on_stream_done = rate_limit.applyOnStreamDone(); + has_apply_on_stream_done_policy_ |= apply_on_stream_done; + if ((on_stream_done && apply_on_stream_done) || (!on_stream_done && !apply_on_stream_done)) { rate_limit.populateDescriptors(descriptors, config_->localInfo().clusterName(), headers, callbacks_->streamInfo()); } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 20fcd316245a8..6239e840ebb7b 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -173,6 +173,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override; Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override; Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override; + void encodeComplete() override {} void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override; // RateLimit::RequestCallbacks @@ -185,9 +186,12 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req private: void initiateCall(const Http::RequestHeaderMap& headers); - void populateRateLimitDescriptors(const Router::RateLimitPolicy& rate_limit_policy, - std::vector& descriptors, - const Http::RequestHeaderMap& headers); + void populateRateLimitDescriptors(std::vector& descriptors, + const Http::RequestHeaderMap& headers, bool on_stream_done); + void populateRateLimitDescriptorsForPolicy(const Router::RateLimitPolicy& rate_limit_policy, + std::vector& descriptors, + const Http::RequestHeaderMap& headers, + bool on_stream_done); void populateResponseHeaders(Http::HeaderMap& response_headers, bool from_local_reply); void appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add); double getHitAddend(); @@ -207,9 +211,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req bool initiating_call_{}; Http::ResponseHeaderMapPtr response_headers_to_add_; Http::RequestHeaderMap* request_headers_{}; - // Holds the descriptors that should be applied on stream done which will be populated during - // decodeHeaders. - std::vector descriptors_apply_on_stream_done_{}; + bool has_apply_on_stream_done_policy_ = false; }; /** diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index c9c3f0c091601..c634533f1dd77 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -271,14 +271,7 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { EXPECT_CALL(route_rate_limit_, populateDescriptors(_, _, _, _)) .WillOnce(SetArgReferee<0>(descriptor_)); - EXPECT_CALL(filter_callbacks_.route_->virtual_host_.rate_limit_policy_, - getApplicableRateLimit(0)); - - // Make sure that descriptor_two_ will be applied on stream done. - EXPECT_CALL(vh_rate_limit_, applyOnStreamDone()).WillOnce(Return(true)); - EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _)) - .WillOnce(SetArgReferee<0>(descriptor_two_)); - + EXPECT_CALL(vh_rate_limit_, applyOnStreamDone()).WillRepeatedly(Return(true)); EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector{ {{{"descriptor_key", "descriptor_value"}}}}), @@ -310,18 +303,28 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { EXPECT_EQ( 1U, filter_callbacks_.clusterInfo()->statsScope().counterFromStatName(ratelimit_ok_).value()); - // Test the behavior when apply_on_stream_done is true. + // Test the behavior for the apply_on_stream_done flag. testing::Mock::VerifyAndClearExpectations(client_); + testing::Mock::VerifyAndClearExpectations(&filter_callbacks_); + testing::Mock::VerifyAndClearExpectations( + &filter_callbacks_.route_->route_entry_.rate_limit_policy_); + testing::Mock::VerifyAndClearExpectations(&route_rate_limit_); + testing::Mock::VerifyAndClearExpectations(&vh_rate_limit_); filter_callbacks_.stream_info_.filter_state_->setData( // Ensures that addend can be set differently than the request path. "envoy.ratelimit.hits_addend", std::make_unique(100), StreamInfo::FilterState::StateType::Mutable); + EXPECT_CALL(filter_callbacks_.route_->route_entry_.rate_limit_policy_, getApplicableRateLimit(0)); + EXPECT_CALL(vh_rate_limit_, applyOnStreamDone()).WillRepeatedly(Return(true)); + EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _)) + .WillOnce(SetArgReferee<0>(descriptor_two_)); EXPECT_CALL(*client_, cancel()); EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(descriptor_two_), _, _, 100)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); + filter_->onDestroy(); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, nullptr, "", nullptr); From 74a6cc9799b30ce5ed1f9f89fe3473cce1c931d2 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 18 Dec 2024 04:33:29 +0000 Subject: [PATCH 22/27] typo Signed-off-by: Takeshi Yoneda --- source/extensions/filters/http/ratelimit/ratelimit.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 09f10599523e7..7c05e63e247d7 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -173,7 +173,7 @@ void Filter::onDestroy() { client_->cancel(); } else { // If the filter doesn't have an outstanding limit request (made during decodeHeaders) and has - // at least one policiy with apply_on_stream_done=true, then we apply the rate limit here. + // at least one policy with apply_on_stream_done=true, then we apply the rate limit here. if (has_apply_on_stream_done_policy_) { std::vector descriptors; populateRateLimitDescriptors(descriptors, *request_headers_, true); From 0ba3b255a10e9eb44a87dd972a7646ce6c937afc Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 18 Dec 2024 04:56:41 +0000 Subject: [PATCH 23/27] remove unnecessary change Signed-off-by: Takeshi Yoneda --- source/extensions/filters/http/ratelimit/ratelimit.h | 1 - test/extensions/filters/http/ratelimit/ratelimit_test.cc | 1 - 2 files changed, 2 deletions(-) diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 6239e840ebb7b..ad9d32b5858e9 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -173,7 +173,6 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req Http::FilterDataStatus encodeData(Buffer::Instance& data, bool end_stream) override; Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap& trailers) override; Http::FilterMetadataStatus encodeMetadata(Http::MetadataMap&) override; - void encodeComplete() override {} void setEncoderFilterCallbacks(Http::StreamEncoderFilterCallbacks& callbacks) override; // RateLimit::RequestCallbacks diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index c634533f1dd77..e5b52d23c779b 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -324,7 +324,6 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); - filter_->onDestroy(); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, nullptr, "", nullptr); From 8823652503cb75cbc454fa5ba00e9d8c22503aa0 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 18 Dec 2024 08:21:50 +0000 Subject: [PATCH 24/27] Apply review comments Signed-off-by: Takeshi Yoneda --- source/common/router/router_ratelimit.h | 2 +- .../common/ratelimit/ratelimit_impl.cc | 5 +++- .../filters/http/ratelimit/ratelimit.cc | 26 +++++++------------ .../filters/http/ratelimit/ratelimit.h | 1 - 4 files changed, 15 insertions(+), 19 deletions(-) diff --git a/source/common/router/router_ratelimit.h b/source/common/router/router_ratelimit.h index 0d2774e443c4e..686f15a6938d9 100644 --- a/source/common/router/router_ratelimit.h +++ b/source/common/router/router_ratelimit.h @@ -263,7 +263,7 @@ class RateLimitPolicyEntryImpl : public RateLimitPolicyEntry { uint64_t stage_; std::vector actions_; absl::optional limit_override_ = absl::nullopt; - bool apply_on_stream_done_; + const bool apply_on_stream_done_ = false; }; /** diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index 23ef3321a9d37..a774734f432e5 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -119,8 +119,11 @@ void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::strin ASSERT(status != Grpc::Status::WellKnownGrpcStatus::Ok); ENVOY_LOG_TO_LOGGER(Logger::Registry::getLog(Logger::Id::filter), debug, "rate limit fail, status={} msg={}", status, msg); - callbacks_->complete(LimitStatus::Error, nullptr, nullptr, nullptr, EMPTY_STRING, nullptr); + // The rate limit requests applied on stream-done will destroy the client inside the complete + // callback, so we release the callback here to make the destructor happy. + auto call_backs = callbacks_; callbacks_ = nullptr; + call_backs->complete(LimitStatus::Error, nullptr, nullptr, nullptr, EMPTY_STRING, nullptr); } ClientPtr rateLimitClient(Server::Configuration::FactoryContext& context, diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 7c05e63e247d7..6412de403c694 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -171,20 +171,15 @@ void Filter::onDestroy() { if (state_ == State::Calling) { state_ = State::Complete; client_->cancel(); - } else { - // If the filter doesn't have an outstanding limit request (made during decodeHeaders) and has - // at least one policy with apply_on_stream_done=true, then we apply the rate limit here. - if (has_apply_on_stream_done_policy_) { - std::vector descriptors; - populateRateLimitDescriptors(descriptors, *request_headers_, true); - if (!descriptors.empty()) { - client_->cancel(); // Clears the internal state of the client, so that we can reuse it. - // Since this filter is being destroyed, we need to keep the client alive until the request - // is complete. - auto callback = new OnStreamDoneCallBack(std::move(client_)); - callback->client().limit(*callback, getDomain(), descriptors, Tracing::NullSpan::instance(), - absl::nullopt, getHitAddend()); - } + } else if (client_ != nullptr) { + std::vector descriptors; + populateRateLimitDescriptors(descriptors, *request_headers_, true); + if (!descriptors.empty()) { + // Since this filter is being destroyed, we need to keep the client alive until the request + // is complete by leaking the client with OnStreamDoneCallBack. + auto callback = new OnStreamDoneCallBack(std::move(client_)); + callback->client().limit(*callback, getDomain(), descriptors, Tracing::NullSpan::instance(), + absl::nullopt, getHitAddend()); } } } @@ -294,8 +289,7 @@ void Filter::populateRateLimitDescriptorsForPolicy(const Router::RateLimitPolicy continue; } const bool apply_on_stream_done = rate_limit.applyOnStreamDone(); - has_apply_on_stream_done_policy_ |= apply_on_stream_done; - if ((on_stream_done && apply_on_stream_done) || (!on_stream_done && !apply_on_stream_done)) { + if (on_stream_done == apply_on_stream_done) { rate_limit.populateDescriptors(descriptors, config_->localInfo().clusterName(), headers, callbacks_->streamInfo()); } diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index ad9d32b5858e9..68547fb487c1f 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -210,7 +210,6 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req bool initiating_call_{}; Http::ResponseHeaderMapPtr response_headers_to_add_; Http::RequestHeaderMap* request_headers_{}; - bool has_apply_on_stream_done_policy_ = false; }; /** From 946ce6fe4814a0d5fb4824db030e9a2687667bb4 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 18 Dec 2024 09:25:08 +0000 Subject: [PATCH 25/27] review: save route_ and per-route vh_rate_limits_ to for consistency Signed-off-by: Takeshi Yoneda --- .../common/ratelimit/ratelimit_impl.cc | 7 ++-- .../filters/http/ratelimit/ratelimit.cc | 34 ++++++++++++------- .../filters/http/ratelimit/ratelimit.h | 3 +- 3 files changed, 28 insertions(+), 16 deletions(-) diff --git a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc index a774734f432e5..7f1b07e8f6f39 100644 --- a/source/extensions/filters/common/ratelimit/ratelimit_impl.cc +++ b/source/extensions/filters/common/ratelimit/ratelimit_impl.cc @@ -108,10 +108,13 @@ void GrpcClientImpl::onSuccess( response->has_dynamic_metadata() ? std::make_unique(response->dynamic_metadata()) : nullptr; - callbacks_->complete(status, std::move(descriptor_statuses), std::move(response_headers_to_add), + // The rate limit requests applied on stream-done will destroy the client inside the complete + // callback, so we release the callback here to make the destructor happy. + auto call_backs = callbacks_; + callbacks_ = nullptr; + call_backs->complete(status, std::move(descriptor_statuses), std::move(response_headers_to_add), std::move(request_headers_to_add), response->raw_body(), std::move(dynamic_metadata)); - callbacks_ = nullptr; } void GrpcClientImpl::onFailure(Grpc::Status::GrpcStatus status, const std::string& msg, diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index 6412de403c694..f6b8060138b56 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -69,33 +69,42 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { void Filter::populateRateLimitDescriptors(std::vector& descriptors, const Http::RequestHeaderMap& headers, bool on_stream_done) { - Router::RouteConstSharedPtr route = callbacks_->route(); - if (!route || !route->routeEntry()) { - return; + if (!on_stream_done) { + route_ = callbacks_->route(); + if (!route_) { + return; + } } - cluster_ = callbacks_->clusterInfo(); - if (!cluster_) { - return; + if (!on_stream_done) { + cluster_ = callbacks_->clusterInfo(); + if (!cluster_) { + return; + } } - const Router::RouteEntry* route_entry = route->routeEntry(); + const Router::RouteEntry* route_entry = route_->routeEntry(); + if (!route_entry) { + return; + } // Get all applicable rate limit policy entries for the route. populateRateLimitDescriptorsForPolicy(route_entry->rateLimitPolicy(), descriptors, headers, on_stream_done); - VhRateLimitOptions vh_rate_limit_option = getVirtualHostRateLimitOption(route); + if (!on_stream_done) { + initializeVirtualHostRateLimitOption(route_); + } - switch (vh_rate_limit_option) { + switch (vh_rate_limits_) { case VhRateLimitOptions::Ignore: break; case VhRateLimitOptions::Include: - populateRateLimitDescriptorsForPolicy(route->virtualHost().rateLimitPolicy(), descriptors, + populateRateLimitDescriptorsForPolicy(route_->virtualHost().rateLimitPolicy(), descriptors, headers, on_stream_done); break; case VhRateLimitOptions::Override: if (route_entry->rateLimitPolicy().empty()) { - populateRateLimitDescriptorsForPolicy(route->virtualHost().rateLimitPolicy(), descriptors, + populateRateLimitDescriptorsForPolicy(route_->virtualHost().rateLimitPolicy(), descriptors, headers, on_stream_done); } break; @@ -320,7 +329,7 @@ void Filter::appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add) { } } -VhRateLimitOptions Filter::getVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route) { +void Filter::initializeVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route) { if (route->routeEntry()->includeVirtualHostRateLimits()) { vh_rate_limits_ = VhRateLimitOptions::Include; } else { @@ -342,7 +351,6 @@ VhRateLimitOptions Filter::getVirtualHostRateLimitOption(const Router::RouteCons vh_rate_limits_ = VhRateLimitOptions::Override; } } - return vh_rate_limits_; } std::string Filter::getDomain() { diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index 68547fb487c1f..a531293357bdb 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -194,7 +194,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req void populateResponseHeaders(Http::HeaderMap& response_headers, bool from_local_reply); void appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add); double getHitAddend(); - VhRateLimitOptions getVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route); + void initializeVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route); std::string getDomain(); Http::Context& httpContext() { return config_->httpContext(); } @@ -207,6 +207,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req State state_{State::NotStarted}; VhRateLimitOptions vh_rate_limits_; Upstream::ClusterInfoConstSharedPtr cluster_; + Router::RouteConstSharedPtr route_ = nullptr; bool initiating_call_{}; Http::ResponseHeaderMapPtr response_headers_to_add_; Http::RequestHeaderMap* request_headers_{}; From 5dec975063512a3fb971fe69c6508bc2839de6ef Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 18 Dec 2024 09:34:46 +0000 Subject: [PATCH 26/27] simplify Signed-off-by: Takeshi Yoneda --- source/extensions/filters/http/ratelimit/ratelimit.cc | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index f6b8060138b56..fc96f68538e28 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -69,18 +69,18 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { void Filter::populateRateLimitDescriptors(std::vector& descriptors, const Http::RequestHeaderMap& headers, bool on_stream_done) { + // To use the exact same context for both request and on_stream_done rate limiting descriptors, + // we save the route and per-route configuration here and use it later. if (!on_stream_done) { route_ = callbacks_->route(); if (!route_) { return; } - } - - if (!on_stream_done) { cluster_ = callbacks_->clusterInfo(); if (!cluster_) { return; } + initializeVirtualHostRateLimitOption(route_); } const Router::RouteEntry* route_entry = route_->routeEntry(); @@ -91,10 +91,6 @@ void Filter::populateRateLimitDescriptors(std::vectorrateLimitPolicy(), descriptors, headers, on_stream_done); - if (!on_stream_done) { - initializeVirtualHostRateLimitOption(route_); - } - switch (vh_rate_limits_) { case VhRateLimitOptions::Ignore: break; From b76e3d7ad622838705dc42f596ab1e9a584ecc0c Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Wed, 18 Dec 2024 10:59:36 +0000 Subject: [PATCH 27/27] apply review commments Signed-off-by: Takeshi Yoneda --- .../filters/http/ratelimit/ratelimit.cc | 21 +++++++++---------- .../filters/http/ratelimit/ratelimit.h | 2 +- .../filters/http/ratelimit/ratelimit_test.cc | 1 - 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/source/extensions/filters/http/ratelimit/ratelimit.cc b/source/extensions/filters/http/ratelimit/ratelimit.cc index fc96f68538e28..858b1548810a6 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.cc +++ b/source/extensions/filters/http/ratelimit/ratelimit.cc @@ -69,24 +69,23 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { void Filter::populateRateLimitDescriptors(std::vector& descriptors, const Http::RequestHeaderMap& headers, bool on_stream_done) { - // To use the exact same context for both request and on_stream_done rate limiting descriptors, - // we save the route and per-route configuration here and use it later. if (!on_stream_done) { + // To use the exact same context for both request and on_stream_done rate limiting descriptors, + // we save the route and per-route configuration here and use them later. route_ = callbacks_->route(); - if (!route_) { - return; - } cluster_ = callbacks_->clusterInfo(); - if (!cluster_) { - return; - } - initializeVirtualHostRateLimitOption(route_); + } + if (!route_ || !cluster_) { + return; } const Router::RouteEntry* route_entry = route_->routeEntry(); if (!route_entry) { return; } + if (!on_stream_done) { + initializeVirtualHostRateLimitOption(route_entry); + } // Get all applicable rate limit policy entries for the route. populateRateLimitDescriptorsForPolicy(route_entry->rateLimitPolicy(), descriptors, headers, on_stream_done); @@ -325,8 +324,8 @@ void Filter::appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add) { } } -void Filter::initializeVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route) { - if (route->routeEntry()->includeVirtualHostRateLimits()) { +void Filter::initializeVirtualHostRateLimitOption(const Router::RouteEntry* route_entry) { + if (route_entry->includeVirtualHostRateLimits()) { vh_rate_limits_ = VhRateLimitOptions::Include; } else { const auto* specific_per_route_config = diff --git a/source/extensions/filters/http/ratelimit/ratelimit.h b/source/extensions/filters/http/ratelimit/ratelimit.h index a531293357bdb..60b110a505849 100644 --- a/source/extensions/filters/http/ratelimit/ratelimit.h +++ b/source/extensions/filters/http/ratelimit/ratelimit.h @@ -194,7 +194,7 @@ class Filter : public Http::StreamFilter, public Filters::Common::RateLimit::Req void populateResponseHeaders(Http::HeaderMap& response_headers, bool from_local_reply); void appendRequestHeaders(Http::HeaderMapPtr& request_headers_to_add); double getHitAddend(); - void initializeVirtualHostRateLimitOption(const Router::RouteConstSharedPtr& route); + void initializeVirtualHostRateLimitOption(const Router::RouteEntry* route_entry); std::string getDomain(); Http::Context& httpContext() { return config_->httpContext(); } diff --git a/test/extensions/filters/http/ratelimit/ratelimit_test.cc b/test/extensions/filters/http/ratelimit/ratelimit_test.cc index e5b52d23c779b..63f14004ef3d3 100644 --- a/test/extensions/filters/http/ratelimit/ratelimit_test.cc +++ b/test/extensions/filters/http/ratelimit/ratelimit_test.cc @@ -318,7 +318,6 @@ TEST_F(HttpRateLimitFilterTest, OkResponseWithAdditionalHitsAddend) { EXPECT_CALL(vh_rate_limit_, applyOnStreamDone()).WillRepeatedly(Return(true)); EXPECT_CALL(vh_rate_limit_, populateDescriptors(_, _, _, _)) .WillOnce(SetArgReferee<0>(descriptor_two_)); - EXPECT_CALL(*client_, cancel()); EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(descriptor_two_), _, _, 100)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void {