diff --git a/.gitignore b/.gitignore index 5578c1c91acf0..6cb9287cfad5b 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ BROWSE *.bzlc .cache .classpath +.clwb/ /ci/bazel-* /ci/prebuilt/thirdparty /ci/prebuilt/thirdparty_build diff --git a/WORKSPACE b/WORKSPACE index ecd0a358f1639..ec06147ab36f5 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -15,7 +15,7 @@ load("@envoy_api//bazel:repositories.bzl", "api_dependencies") api_dependencies() -load("@io_bazel_rules_go//go:def.bzl", "go_register_toolchains", "go_rules_dependencies") +load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") go_rules_dependencies() diff --git a/api/XDS_PROTOCOL.md b/api/XDS_PROTOCOL.md index fe0362ef16f18..c7587b2f283ce 100644 --- a/api/XDS_PROTOCOL.md +++ b/api/XDS_PROTOCOL.md @@ -288,35 +288,33 @@ admin: ### Incremental xDS -Incremental xDS is a separate xDS endpoint available for ADS, CDS and RDS that -allows: - - * Incremental updates of the list of tracked resources by the xDS client. - This supports Envoy on-demand / lazily requesting additional resources. For - example, this may occur when a request corresponding to an unknown cluster - arrives. - * The xDS server can incrementally update the resources on the client. - This supports the goal of scalability of xDS resources. Rather than deliver - all 100k clusters when a single cluster is modified, the management server - only needs to deliver the single cluster that changed. - -An xDS incremental session is always in the context of a gRPC bidirectional +Incremental xDS is a separate xDS endpoint that: + + * Allows the protocol to communicate on the wire in terms of resource/resource + name deltas ("Delta xDS"). This supports the goal of scalability of xDS + resources. Rather than deliver all 100k clusters when a single cluster is + modified, the management server only needs to deliver the single cluster + that changed. + * Allows the Envoy to on-demand / lazily request additional resources. For + example, requesting a cluster only when a request for that cluster arrives. + +An Incremental xDS session is always in the context of a gRPC bidirectional stream. This allows the xDS server to keep track of the state of xDS clients -connected to it. There is no REST version of Incremental xDS. +connected to it. There is no REST version of Incremental xDS yet. -In incremental xDS the nonce field is required and used to pair a -[`IncrementalDiscoveryResponse`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryrequest) -to a [`IncrementalDiscoveryRequest`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryrequest) +In the delta xDS wire protocol, the nonce field is required and used to pair a +[`DeltaDiscoveryResponse`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#deltadiscoveryresponse) +to a [`DeltaDiscoveryRequest`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#deltadiscoveryrequest) ACK or NACK. Optionally, a response message level system_version_info is present for debugging purposes only. -`IncrementalDiscoveryRequest` can be sent in 3 situations: +`DeltaDiscoveryRequest` can be sent in 3 situations: 1. Initial message in a xDS bidirectional gRPC stream. - 2. As an ACK or NACK response to a previous `IncrementalDiscoveryResponse`. + 2. As an ACK or NACK response to a previous `DeltaDiscoveryResponse`. In this case the `response_nonce` is set to the nonce value in the Response. ACK or NACK is determined by the absence or presence of `error_detail`. - 3. Spontaneous `IncrementalDiscoveryRequest` from the client. + 3. Spontaneous `DeltaDiscoveryRequest` from the client. This can be done to dynamically add or remove elements from the tracked `resource_names` set. In this case `response_nonce` must be omitted. @@ -326,8 +324,8 @@ client spontaneously requests the "wc" resource. ![Incremental session example](diagrams/incremental.svg) -On reconnect the xDS Incremental client may tell the server of its known resources -to avoid resending them over the network. +On reconnect the Incremental xDS client may tell the server of its known +resources to avoid resending them over the network. ![Incremental reconnect example](diagrams/incremental-reconnect.svg) diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 29ca98ac3b731..6d68524399fad 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -1,8 +1,8 @@ -BAZEL_SKYLIB_RELEASE = "0.6.0" -BAZEL_SKYLIB_SHA256 = "eb5c57e4c12e68c0c20bc774bfbc60a568e800d025557bc4ea022c6479acc867" +BAZEL_SKYLIB_RELEASE = "0.7.0" +BAZEL_SKYLIB_SHA256 = "2c62d8cd4ab1e65c08647eb4afe38f51591f43f7f0885e7769832fa137633dcb" -GOGOPROTO_RELEASE = "1.2.0" -GOGOPROTO_SHA256 = "957c8f03cf595525d2a667035d9865a0930b3d446be0ab6eb76972934f925b00" +GOGOPROTO_RELEASE = "1.2.1" +GOGOPROTO_SHA256 = "99e423905ba8921e86817607a5294ffeedb66fdd4a85efce5eb2848f715fdb3a" OPENCENSUS_RELEASE = "0.1.0" OPENCENSUS_SHA256 = "4fd21cc6de63d7cb979fd749d8101ff425905aa0826fed26019d1c311fcf19a7" diff --git a/api/envoy/admin/v2alpha/server_info.proto b/api/envoy/admin/v2alpha/server_info.proto index 18dcc70b805cc..13dea7ae8eb76 100644 --- a/api/envoy/admin/v2alpha/server_info.proto +++ b/api/envoy/admin/v2alpha/server_info.proto @@ -128,4 +128,7 @@ message CommandLineOptions { // See :option:`--restart-epoch` for details. uint32 restart_epoch = 24; + + // See :option:`--cpuset-threads` for details. + bool cpuset_threads = 25; } diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto index 28fa6aaba9f2b..206704ebe574d 100644 --- a/api/envoy/api/v2/cds.proto +++ b/api/envoy/api/v2/cds.proto @@ -36,8 +36,7 @@ service ClusterDiscoveryService { rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } - rpc IncrementalClusters(stream IncrementalDiscoveryRequest) - returns (stream IncrementalDiscoveryResponse) { + rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) { @@ -51,7 +50,7 @@ service ClusterDiscoveryService { // [#protodoc-title: Clusters] // Configuration for a single upstream cluster. -// [#comment:next free field: 38] +// [#comment:next free field: 39] message Cluster { // Supplies the name of the cluster which must be unique across all clusters. // The cluster name is used when emitting @@ -95,9 +94,25 @@ message Cluster { // for an explanation. ORIGINAL_DST = 4; } - // The :ref:`service discovery type ` - // to use for resolving the cluster. - DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; + + // Extended cluster type. + message CustomClusterType { + // The type of the cluster to instantiate. The name must match a supported cluster type. + string name = 1 [(validate.rules).string.min_bytes = 1]; + + // Cluster specific configuration which depends on the cluster being instantiated. + // See the supported cluster for further documentation. + google.protobuf.Any typed_config = 2; + } + + oneof cluster_discovery_type { + // The :ref:`service discovery type ` + // to use for resolving the cluster. + DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; + + // The custom cluster type. + CustomClusterType cluster_type = 38; + } // Only valid when discovery type is EDS. message EdsClusterConfig { diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto index 3be59c1886b5f..0b5a1ec4bf9e5 100644 --- a/api/envoy/api/v2/core/config_source.proto +++ b/api/envoy/api/v2/core/config_source.proto @@ -32,6 +32,13 @@ message ApiConfigSource { REST = 1; // gRPC v2 API. GRPC = 2; + // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} + // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state + // with every update, the xDS server only sends what has changed since the last update. + // + // DELTA_GRPC is not yet entirely implemented! Initially, only CDS is available. + // Do not use for other xDSes. TODO(fredlas) update/remove this warning when appropriate. + DELTA_GRPC = 3; } ApiType api_type = 1 [(validate.rules).enum.defined_only = true]; // Cluster names should be used only with REST. If > 1 diff --git a/api/envoy/api/v2/discovery.proto b/api/envoy/api/v2/discovery.proto index ff04dc20f8b41..5bff0ddae2ee3 100644 --- a/api/envoy/api/v2/discovery.proto +++ b/api/envoy/api/v2/discovery.proto @@ -102,33 +102,32 @@ message DiscoveryResponse { core.ControlPlane control_plane = 6; } -// IncrementalDiscoveryRequest and IncrementalDiscoveryResponse are used in a -// new gRPC endpoint for Incremental xDS. The feature is not supported for REST -// management servers. +// DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC +// endpoint for Delta xDS. // -// With Incremental xDS, the IncrementalDiscoveryResponses do not need to -// include a full snapshot of the tracked resources. Instead -// IncrementalDiscoveryResponses are a diff to the state of a xDS client. -// In Incremental XDS there are per resource versions which allows to track -// state at the resource granularity. -// An xDS Incremental session is always in the context of a gRPC bidirectional +// With Delta xDS, the DeltaDiscoveryResponses do not need to include a full +// snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a +// diff to the state of a xDS client. +// In Delta XDS there are per resource versions, which allow tracking state at +// the resource granularity. +// An xDS Delta session is always in the context of a gRPC bidirectional // stream. This allows the xDS server to keep track of the state of xDS clients // connected to it. // -// In Incremental xDS the nonce field is required and used to pair -// IncrementalDiscoveryResponse to a IncrementalDiscoveryRequest ACK or NACK. +// In Delta xDS the nonce field is required and used to pair +// DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. // Optionally, a response message level system_version_info is present for // debugging purposes only. // -// IncrementalDiscoveryRequest can be sent in 3 situations: +// DeltaDiscoveryRequest can be sent in 3 situations: // 1. Initial message in a xDS bidirectional gRPC stream. -// 2. As a ACK or NACK response to a previous IncrementalDiscoveryResponse. +// 2. As a ACK or NACK response to a previous DeltaDiscoveryResponse. // In this case the response_nonce is set to the nonce value in the Response. // ACK or NACK is determined by the absence or presence of error_detail. -// 3. Spontaneous IncrementalDiscoveryRequest from the client. +// 3. Spontaneous DeltaDiscoveryRequest from the client. // This can be done to dynamically add or remove elements from the tracked // resource_names set. In this case response_nonce must be omitted. -message IncrementalDiscoveryRequest { +message DeltaDiscoveryRequest { // The node making the request. core.Node node = 1; @@ -138,18 +137,18 @@ message IncrementalDiscoveryRequest { // required for ADS. string type_url = 2; - // IncrementalDiscoveryRequests allow the client to add or remove individual + // DeltaDiscoveryRequests allow the client to add or remove individual // resources to the set of tracked resources in the context of a stream. // All resource names in the resource_names_subscribe list are added to the // set of tracked resources and all resource names in the resource_names_unsubscribe // list are removed from the set of tracked resources. - // Unlike in non incremental xDS, an empty resource_names_subscribe or + // Unlike in state-of-the-world xDS, an empty resource_names_subscribe or // resource_names_unsubscribe list simply means that no resources are to be // added or removed to the resource list. // The xDS server must send updates for all tracked resources but can also // send updates for resources the client has not subscribed to. This behavior - // is similar to non incremental xDS. - // These two fields can be set for all types of IncrementalDiscoveryRequests + // is similar to state-of-the-world xDS. + // These two fields can be set for all types of DeltaDiscoveryRequests // (initial, ACK/NACK or spontaneous). // // A list of Resource names to add to the list of tracked resources. @@ -158,15 +157,17 @@ message IncrementalDiscoveryRequest { // A list of Resource names to remove from the list of tracked resources. repeated string resource_names_unsubscribe = 4; - // This map must be populated when the IncrementalDiscoveryRequest is the - // first in a stream. The keys are the resources names of the xDS resources + // This map must be populated when the DeltaDiscoveryRequest is the + // first in a stream (assuming there are any resources - this field's purpose is to enable + // a session to continue in a reconnected gRPC stream, and so will not be used in the very + // first stream of a session). The keys are the resources names of the xDS resources // known to the xDS client. The values in the map are the associated resource // level version info. map initial_resource_versions = 5; - // When the IncrementalDiscoveryRequest is a ACK or NACK message in response - // to a previous IncrementalDiscoveryResponse, the response_nonce must be the - // nonce in the IncrementalDiscoveryResponse. + // When the DeltaDiscoveryRequest is a ACK or NACK message in response + // to a previous DeltaDiscoveryResponse, the response_nonce must be the + // nonce in the DeltaDiscoveryResponse. // Otherwise response_nonce must be omitted. string response_nonce = 6; @@ -176,24 +177,27 @@ message IncrementalDiscoveryRequest { google.rpc.Status error_detail = 7; } -message IncrementalDiscoveryResponse { +message DeltaDiscoveryResponse { // The version of the response data (used for debugging). string system_version_info = 1; // The response resources. These are typed resources that match the type url - // in the IncrementalDiscoveryRequest. + // in the DeltaDiscoveryRequest. repeated Resource resources = 2 [(gogoproto.nullable) = false]; // Resources names of resources that have be deleted and to be removed from the xDS Client. // Removed resources for missing resources can be ignored. repeated string removed_resources = 6; - // The nonce provides a way for IncrementalDiscoveryRequests to uniquely - // reference a IncrementalDiscoveryResponse. The nonce is required. + // The nonce provides a way for DeltaDiscoveryRequests to uniquely + // reference a DeltaDiscoveryResponse. The nonce is required. string nonce = 5; } message Resource { + // The resource's name, to distinguish it from others of the same type of resource. + string name = 3; + // The resource level version. It allows xDS to track the state of individual // resources. string version = 1; diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto index 8d41b384ba9bd..d75b68af6791f 100644 --- a/api/envoy/api/v2/rds.proto +++ b/api/envoy/api/v2/rds.proto @@ -33,8 +33,7 @@ service RouteDiscoveryService { rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } - rpc IncrementalRoutes(stream IncrementalDiscoveryRequest) - returns (stream IncrementalDiscoveryResponse) { + rpc DeltaRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) { diff --git a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto index d0300f52a9aca..7a5ca47f77237 100644 --- a/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto +++ b/api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto @@ -3,8 +3,8 @@ syntax = "proto3"; package envoy.config.filter.http.ext_authz.v2; option java_outer_classname = "ExtAuthzProto"; -option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; option java_multiple_files = true; +option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; option go_package = "v2"; import "envoy/api/v2/core/base.proto"; @@ -16,7 +16,8 @@ import "envoy/type/matcher/string.proto"; import "validate/validate.proto"; // [#protodoc-title: External Authorization] -// ExtAuthz :ref:`configuration overview `. +// External Authorization :ref:`configuration overview `. + message ExtAuthz { // External authorization service configuration. oneof services { diff --git a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto index eb1dfd68fe2d9..0c33b6d077a16 100644 --- a/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto +++ b/api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto @@ -1,15 +1,18 @@ syntax = "proto3"; -package envoy.extensions.filter.http.grpc_http1_reverse_bridge.v2alpha1; +package envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; -option java_package = "io.envoyproxy.envoy.extensions.filter.http.grpc_http1_reverse_bridge.v2alpha1"; +option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1"; option go_package = "v2"; import "validate/validate.proto"; -// [#protodoc-title: Extensions gRPC Http1 Reverse Bridge] +// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] +// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview +// `. + // gRPC reverse bridge filter configuration message FilterConfig { // The content-type to pass to the upstream when the gRPC bridge filter is applied. diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index b60d74a652b87..555960521f547 100644 --- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -15,6 +15,9 @@ import "google/protobuf/empty.proto"; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; +// [#protodoc-title: JWT Authentication] +// JWT Authentication :ref:`configuration overview `. + // Please see following for JWT authentication flow: // // * `JSON Web Token (JWT) `_ diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 1364e20bf3518..9b01505e89794 100644 --- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -137,14 +137,13 @@ message HttpConnectionManager { // header in responses. If not set, the default is *envoy*. string server_name = 10; - // The maximum request headers size for incoming connections. The default max - // is 60K, based on default settings for http codecs. For HTTP1, the current - // limit set by http_parser is 80K. for HTTP2, the default allowed header - // block in nghttp2 is 64K. The max configurable setting is 64K in order to - // stay under both codec limits. - // Requests that exceed this size will receive a 431 response. + // The maximum request headers size for incoming connections. + // If unconfigured, the default max request headers allowed is 60 KiB. + // Requests that exceed this limit will receive a 431 response. + // The max configurable limit is 96 KiB, based on current implementation + // constraints. google.protobuf.UInt32Value max_request_headers_kb = 29 - [(validate.rules).uint32.gt = 0, (validate.rules).uint32.lte = 64]; + [(validate.rules).uint32.gt = 0, (validate.rules).uint32.lte = 96]; // The idle timeout for connections managed by the connection manager. The // idle timeout is defined as the period in which there are no active @@ -400,14 +399,20 @@ message HttpFilter { // [#comment:TODO(mattklein123): Auto generate the following list] // * :ref:`envoy.buffer ` // * :ref:`envoy.cors ` + // * :ref:`envoy.ext_authz ` // * :ref:`envoy.fault ` + // * :ref:`envoy.filters.http.header_to_metadata ` + // * :ref:`envoy.filters.http.grpc_http1_reverse_bridge \ + // ` + // * :ref:`envoy.filters.http.jwt_authn ` + // * :ref:`envoy.filters.http.rbac ` + // * :ref:`envoy.filters.http.tap ` // * :ref:`envoy.gzip ` // * :ref:`envoy.http_dynamo_filter ` // * :ref:`envoy.grpc_http1_bridge ` // * :ref:`envoy.grpc_json_transcoder ` // * :ref:`envoy.grpc_web ` // * :ref:`envoy.health_check ` - // * :ref:`envoy.header_to_metadata ` // * :ref:`envoy.ip_tagging ` // * :ref:`envoy.lua ` // * :ref:`envoy.rate_limit ` diff --git a/api/envoy/service/discovery/v2/ads.proto b/api/envoy/service/discovery/v2/ads.proto index 73f272191bd01..6a9d044ab4bdd 100644 --- a/api/envoy/service/discovery/v2/ads.proto +++ b/api/envoy/service/discovery/v2/ads.proto @@ -32,7 +32,7 @@ service AggregatedDiscoveryService { returns (stream envoy.api.v2.DiscoveryResponse) { } - rpc IncrementalAggregatedResources(stream envoy.api.v2.IncrementalDiscoveryRequest) - returns (stream envoy.api.v2.IncrementalDiscoveryResponse) { + rpc DeltaAggregatedResources(stream envoy.api.v2.DeltaDiscoveryRequest) + returns (stream envoy.api.v2.DeltaDiscoveryResponse) { } } diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index e6d2c67545e6c..a1b3a9fd69993 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -54,28 +54,11 @@ Dependencies between external libraries can use the standard Bazel dependency resolution logic, using the `$(location)` shell extension to resolve paths to binaries, libraries, headers, etc. -# Adding external dependencies to Envoy (build recipe) - -This is the older style of adding dependencies. It uses shell scripts to build and install -dependencies into a shared directory prefix. This should no longer be used unless there are -extenuating circumstances. - -1. Add a build recipe X in [`ci/build_container/build_recipes`](../ci/build_container/build_recipes) - for developer-local and CI external dependency build flows. -2. Add a build target Y in [`ci/prebuilt/BUILD`](../ci/prebuilt/BUILD) to consume the headers and - libraries produced by the build recipe X. -3. Add a map from target Y to build recipe X in [`target_recipes.bzl`](target_recipes.bzl). -4. Reference your new external dependency in some `envoy_cc_library` via Y in the `external_deps` - attribute. -5. `bazel test //test/...` - # Updating an external dependency version -1. If the dependency is a build recipe, update the build recipe in -[`ci/build_container/build_recipes`](../ci/build_container/build_recipes). -2. If not, update the corresponding entry in +1. Update the corresponding entry in [the repository locations file.](https://github.com/envoyproxy/envoy/blob/master/bazel/repository_locations.bzl) -3. `bazel test //test/...` +2. `bazel test //test/...` # Overriding an external dependency temporarily diff --git a/bazel/README.md b/bazel/README.md index d670ae2b483c9..014c40e05dfb9 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -559,28 +559,6 @@ Once this is set up, you can run clang-tidy without docker: Setting up an HTTP cache for Bazel output helps optimize Bazel performance and resource usage when using multiple compilation modes or multiple trees. -## Setup common `envoy_deps` - -This step sets up the common `envoy_deps` allowing HTTP or disk cache (described below) to work -across working trees in different paths. Also it allows new working trees to skip dependency -compilation. The drawback is that the cached dependencies won't be updated automatically, so make -sure all your working trees have same (or compatible) dependencies, and run this step periodically -to update them. - -Make sure you don't have `--override_repository` in your `.bazelrc` when you run this step. - -``` -bazel fetch //test/... -cp -LR $(bazel info output_base)/external/envoy_deps ${HOME}/envoy_deps_cache -``` - -Adding the following parameter to Bazel everytime or persist them in `.bazelrc`, note you will need to expand -the environment variables for `.bazelrc`. - -``` ---override_repository=envoy_deps=${HOME}/envoy_deps_cache -``` - ## Setup local cache You may use any [Remote Caching](https://docs.bazel.build/versions/master/remote-caching.html) backend diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl index b1075f011b6e8..da34d13ef65b2 100644 --- a/bazel/envoy_build_system.bzl +++ b/bazel/envoy_build_system.bzl @@ -679,6 +679,7 @@ def envoy_select_boringssl(if_fips, default = None): "//conditions:default": default or [], }) +# Selects the part of QUICHE that does not yet work with the current CI. def envoy_select_quiche(xs, repository = ""): return select({ repository + "//bazel:enable_quiche": xs, diff --git a/bazel/external/http-parser.BUILD b/bazel/external/http-parser.BUILD index 523d94fbf4316..303950d7c00b6 100644 --- a/bazel/external/http-parser.BUILD +++ b/bazel/external/http-parser.BUILD @@ -7,6 +7,10 @@ cc_library( "http_parser.h", ], hdrs = ["http_parser.h"], + # This compiler flag is set to an arbtitrarily high number so + # as to effectively disables the http_parser header limit, as + # we do our own checks in the conn manager and codec. + copts = ["-DHTTP_MAX_HEADER_SIZE=0x2000000"], includes = ["."], visibility = ["//visibility:public"], ) diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 05802b993b81f..f13503e458701 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -29,6 +29,7 @@ load(":genrule_cmd.bzl", "genrule_cmd") load( "@envoy//bazel:envoy_build_system.bzl", "envoy_cc_test", + "envoy_select_quiche", ) src_files = glob([ @@ -98,12 +99,24 @@ cc_library( cc_library( name = "quic_platform", - srcs = ["quiche/quic/platform/api/quic_mutex.cc"], + srcs = [ + "quiche/quic/platform/api/quic_mutex.cc", + ] + envoy_select_quiche( + [ + "quiche/quic/platform/api/quic_hostname_utils.cc", + ], + "@envoy", + ), hdrs = [ "quiche/quic/platform/api/quic_cert_utils.h", "quiche/quic/platform/api/quic_mutex.h", "quiche/quic/platform/api/quic_str_cat.h", - ], + ] + envoy_select_quiche( + [ + "quiche/quic/platform/api/quic_hostname_utils.h", + ], + "@envoy", + ), visibility = ["//visibility:public"], deps = [ ":quic_platform_base", @@ -148,8 +161,10 @@ cc_library( "quiche/quic/platform/api/quic_string_piece.h", "quiche/quic/platform/api/quic_string_utils.h", "quiche/quic/platform/api/quic_test.h", + "quiche/quic/platform/api/quic_test_output.h", "quiche/quic/platform/api/quic_text_utils.h", "quiche/quic/platform/api/quic_uint128.h", + "quiche/quic/platform/api/quic_thread.h", # TODO: uncomment the following files as implementations are added. # "quiche/quic/platform/api/quic_bug_tracker.h", # "quiche/quic/platform/api/quic_clock.h", @@ -158,7 +173,6 @@ cc_library( # "quiche/quic/platform/api/quic_flags.h", # "quiche/quic/platform/api/quic_fuzzed_data_provider.h", # "quiche/quic/platform/api/quic_goog_cc_sender.h", - # "quiche/quic/platform/api/quic_hostname_utils.h", # "quiche/quic/platform/api/quic_interval.h", # "quiche/quic/platform/api/quic_ip_address_family.h", # "quiche/quic/platform/api/quic_ip_address.h", @@ -172,8 +186,6 @@ cc_library( # "quiche/quic/platform/api/quic_test.h", # "quiche/quic/platform/api/quic_test_loopback.h", # "quiche/quic/platform/api/quic_test_mem_slice_vector.h", - # "quiche/quic/platform/api/quic_test_output.h", - # "quiche/quic/platform/api/quic_thread.h", ], visibility = ["//visibility:public"], deps = [ diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 84961a04eb924..3791c8f3adbdf 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -29,6 +29,16 @@ cc_library( ], ) +configure_make( + name = "luajit", + configure_command = "build.py", + lib_source = "@com_github_luajit_luajit//:all", + make_commands = [], + static_libraries = [ + "libluajit-5.1.a", + ], +) + envoy_cmake_external( name = "ares", cache_entries = { diff --git a/ci/build_container/build_recipes/luajit.sh b/bazel/foreign_cc/luajit.patch similarity index 56% rename from ci/build_container/build_recipes/luajit.sh rename to bazel/foreign_cc/luajit.patch index 00546892f29b6..15b025595e7e3 100644 --- a/ci/build_container/build_recipes/luajit.sh +++ b/bazel/foreign_cc/luajit.patch @@ -1,25 +1,3 @@ -#!/bin/bash - -set -e - -if [[ "${OS}" == "Windows_NT" ]]; then - exit 0 -fi - -SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")" - -$($SCRIPT_DIR/versions.py luajit) - -FILE_NAME=$(basename "$FILE_URL") - -curl "$FILE_URL" -sLo "$FILE_NAME" \ - && echo "$FILE_SHA256" "$FILE_NAME" | sha256sum --check -tar xf "$FILE_NAME" - -cd "$FILE_PREFIX" - -# Fixup Makefile with things that cannot be set via env var. -cat > luajit_make.diff << 'EOF' diff --git a/src/Makefile b/src/Makefile index f56465d..3f4f2fa 100644 --- a/src/Makefile @@ -65,11 +43,37 @@ index f56465d..3f4f2fa 100644 ############################################################################## EOF - -patch -p1 < luajit_make.diff - -# Default MACOSX_DEPLOYMENT_TARGET is 10.4, which will fail the build at link time on macOS 10.14: -# ld: library not found for -lgcc_s.10.4 -# This doesn't affect other platforms -MACOSX_DEPLOYMENT_TARGET=10.6 DEFAULT_CC=${CC} TARGET_CFLAGS=${CFLAGS} TARGET_LDFLAGS=${CFLAGS} \ - CFLAGS="" make V=1 PREFIX="$THIRDPARTY_BUILD" install +diff --git a/build.py b/build.py +new file mode 100755 +index 0000000..9c71271 +--- /dev/null ++++ b/build.py +@@ -0,0 +1,28 @@ ++#!/usr/bin/env python ++ ++import argparse ++import os ++import shutil ++ ++def main(): ++ parser = argparse.ArgumentParser() ++ parser.add_argument("--prefix") ++ args = parser.parse_args() ++ src_dir = os.path.dirname(os.path.realpath(__file__)) ++ shutil.copytree(src_dir, os.path.basename(src_dir)) ++ os.chdir(os.path.basename(src_dir)) ++ ++ os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.6" ++ os.environ["DEFAULT_CC"] = os.environ.get("CC", "") ++ os.environ["TARGET_CFLAGS"] = os.environ.get("CFLAGS", "") ++ os.environ["TARGET_LDFLAGS"] = os.environ.get("CFLAGS", "") ++ os.environ["CFLAGS"] = "" ++ # LuaJIT compile process build a tool `buildvm` and use it, building `buildvm` with ASAN ++ # will cause LSAN detect its leak and fail the build, set exitcode to 0 to make LSAN doesn't ++ # fail on it. ++ os.environ["LSAN_OPTIONS"] = "exitcode=0" ++ ++ os.system('make V=1 PREFIX="{}" install'.format(args.prefix)) ++ ++main() ++ diff --git a/bazel/gen_compilation_database.sh b/bazel/gen_compilation_database.sh index 11492a1602f10..13b6e778f478c 100755 --- a/bazel/gen_compilation_database.sh +++ b/bazel/gen_compilation_database.sh @@ -1,6 +1,6 @@ #!/bin/bash -RELEASE_VERSION=0.3.1 +RELEASE_VERSION=0.3.2 if [[ ! -d bazel-compilation-database-${RELEASE_VERSION} ]]; then curl -L https://github.com/grailbio/bazel-compilation-database/archive/${RELEASE_VERSION}.tar.gz | tar -xz diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index bb6ba134eb2f2..f2cfc4cfe6c1a 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -2,7 +2,6 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":genrule_repository.bzl", "genrule_repository") load("//api/bazel:envoy_http_archive.bzl", "envoy_http_archive") load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") -load(":target_recipes.bzl", "TARGET_RECIPES") load( "@bazel_tools//tools/cpp:windows_cc_configure.bzl", "find_vc_path", @@ -27,46 +26,6 @@ def _repository_impl(name, **kwargs): **kwargs ) -def _build_recipe_repository_impl(ctxt): - # on Windows, all deps use rules_foreign_cc - if ctxt.os.name.upper().startswith("WINDOWS"): - return - - # modify the recipes list based on the build context - recipes = _apply_dep_blacklist(ctxt, ctxt.attr.recipes) - - # Setup the build directory with links to the relevant files. - ctxt.symlink(Label("//bazel:repositories.sh"), "repositories.sh") - ctxt.symlink( - Label("//ci/build_container:build_and_install_deps.sh"), - "build_and_install_deps.sh", - ) - ctxt.symlink(Label("//ci/build_container:recipe_wrapper.sh"), "recipe_wrapper.sh") - ctxt.symlink(Label("//ci/build_container:Makefile"), "Makefile") - for r in recipes: - ctxt.symlink( - Label("//ci/build_container/build_recipes:" + r + ".sh"), - "build_recipes/" + r + ".sh", - ) - ctxt.symlink(Label("//ci/prebuilt:BUILD"), "BUILD") - - # Run the build script. - print("Fetching external dependencies...") - result = ctxt.execute( - ["./repositories.sh"] + recipes, - quiet = False, - ) - print(result.stdout) - print(result.stderr) - print("External dep build exited with return code: %d" % result.return_code) - if result.return_code != 0: - print("\033[31;1m\033[48;5;226m External dependency build failed, check above log " + - "for errors and ensure all prerequisites at " + - "https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#quick-start-bazel-build-for-developers are met.") - - # This error message doesn't appear to the user :( https://github.com/bazelbuild/bazel/issues/3683 - fail("External dep build failed") - def _default_envoy_build_config_impl(ctx): ctx.file("WORKSPACE", "") ctx.file("BUILD.bazel", "") @@ -191,43 +150,7 @@ def _envoy_api_deps(): actual = "@six_archive//:six", ) -def envoy_dependencies(path = "@envoy_deps//", skip_targets = []): - envoy_repository = repository_rule( - implementation = _build_recipe_repository_impl, - environ = [ - "CC", - "CXX", - "CFLAGS", - "CXXFLAGS", - "LD_LIBRARY_PATH", - ], - # Don't pretend we're in the sandbox, we do some evil stuff with envoy_dep_cache. - local = True, - attrs = { - "recipes": attr.string_list(), - }, - ) - - # Ideally, we wouldn't have a single repository target for all dependencies, but instead one per - # dependency, as suggested in #747. However, it's much faster to build all deps under a single - # recursive make job and single make jobserver. - recipes = depset() - for t in TARGET_RECIPES: - if t not in skip_targets: - recipes += depset([TARGET_RECIPES[t]]) - - envoy_repository( - name = "envoy_deps", - recipes = recipes.to_list(), - ) - - for t in TARGET_RECIPES: - if t not in skip_targets: - native.bind( - name = t, - actual = path + ":" + t, - ) - +def envoy_dependencies(skip_targets = []): # Treat Envoy's overall build config as an external repo, so projects that # build Envoy as a subcomponent can easily override the config. if "envoy_build_config" not in native.existing_rules().keys(): @@ -267,6 +190,7 @@ def envoy_dependencies(path = "@envoy_deps//", skip_targets = []): _com_github_gperftools_gperftools() _com_github_jbeder_yaml_cpp() _com_github_libevent_libevent() + _com_github_luajit_luajit() _com_github_madler_zlib() _com_github_nanopb_nanopb() _com_github_nghttp2_nghttp2() @@ -697,6 +621,22 @@ def _com_github_google_jwt_verify(): actual = "@com_github_google_jwt_verify//:jwt_verify_lib", ) +def _com_github_luajit_luajit(): + location = REPOSITORY_LOCATIONS["com_github_luajit_luajit"] + http_archive( + name = "com_github_luajit_luajit", + build_file_content = BUILD_ALL_CONTENT, + patches = ["@envoy//bazel/foreign_cc:luajit.patch"], + patch_args = ["-p1"], + patch_cmds = ["chmod u+x build.py"], + **location + ) + + native.bind( + name = "luajit", + actual = "@envoy//bazel/foreign_cc:luajit", + ) + def _com_github_gperftools_gperftools(): location = REPOSITORY_LOCATIONS["com_github_gperftools_gperftools"] http_archive( @@ -714,16 +654,6 @@ def _com_github_gperftools_gperftools(): def _foreign_cc_dependencies(): _repository_impl("rules_foreign_cc") -def _apply_dep_blacklist(ctxt, recipes): - newlist = [] - skip_list = [] - if _is_linux_ppc(ctxt): - skip_list += PPC_SKIP_TARGETS.keys() - for t in recipes: - if t not in skip_list: - newlist.append(t) - return newlist - def _is_linux(ctxt): return ctxt.os.name == "linux" diff --git a/bazel/repositories.sh b/bazel/repositories.sh deleted file mode 100755 index 6919a4b2c94d1..0000000000000 --- a/bazel/repositories.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -set -e - -if [[ `uname` == "Darwin" ]] -then - function md5sum { - gmd5sum $@ - } -fi - -# Tell build_and_install_deps.sh to build sequentially when performance debugging. -# export BUILD_CONCURRENCY=0 - -# Hash environment variables we care about to force rebuilds when they change. -ENV_HASH=$(echo "${CC} ${CXX} ${LD_LIBRARY_PATH}" | md5sum | cut -f 1 -d\ ) - -# Don't build inside the directory Bazel believes the repository_rule output goes. Instead, do so in -# a parallel directory. This allows the build artifacts to survive Bazel clobbering the repostory -# directory when a small change to repositories.bzl or a build recipe happens. We then rely on make -# dependency analysis to detect when stuff needs to be rebuilt. -BASEDIR="${PWD}_cache_${ENV_HASH}" - ->&2 echo "External dependency cache directory ${BASEDIR}" -mkdir -p "${BASEDIR}" - -export THIRDPARTY_DEPS="${BASEDIR}" -export THIRDPARTY_SRC="${BASEDIR}/thirdparty" -export THIRDPARTY_BUILD="${BASEDIR}/thirdparty_build" - -DEPS="" -for r in "$@" -do - DEPS="${DEPS} ${THIRDPARTY_DEPS}/$r.dep" -done - -set -o pipefail -BUILD_LOG="${BASEDIR}"/build.log -(time ./build_and_install_deps.sh ${DEPS}) 2>&1 | tee "${BUILD_LOG}" >&2 - -ln -sf "$(realpath "${THIRDPARTY_SRC}")" thirdparty -ln -sf "$(realpath "${THIRDPARTY_BUILD}")" thirdparty_build diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index f691833d54244..63165a9105582 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1,7 +1,7 @@ REPOSITORY_LOCATIONS = dict( bazel_gazelle = dict( - sha256 = "7949fc6cc17b5b191103e97481cf8889217263acf52e00b560683413af204fcb", - urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/0.16.0/bazel-gazelle-0.16.0.tar.gz"], + sha256 = "3c681998538231a2d24d0c07ed5a7658cb72bfb5fd4bf9911157c0e9ac6a2687", + urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/0.17.0/bazel-gazelle-0.17.0.tar.gz"], ), boringssl = dict( # Use commits from branch "chromium-stable-with-bazel" @@ -34,7 +34,7 @@ REPOSITORY_LOCATIONS = dict( com_github_circonus_labs_libcircllhist = dict( sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c", strip_prefix = "libcircllhist-63a16dd6f2fc7bc841bb17ff92be8318df60e2e1", - # 2018-02-11 + # 2019-02-11 urls = ["https://github.com/circonus-labs/libcircllhist/archive/63a16dd6f2fc7bc841bb17ff92be8318df60e2e1.tar.gz"], ), com_github_cyan4973_xxhash = dict( @@ -58,9 +58,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/fmtlib/fmt/releases/download/5.3.0/fmt-5.3.0.zip"], ), com_github_gabime_spdlog = dict( - sha256 = "78786c641ca278388107e30f1f0fa0307e7e98e1c5279c3d29f71a143f9176b6", - strip_prefix = "spdlog-1.3.0", - urls = ["https://github.com/gabime/spdlog/archive/v1.3.0.tar.gz"], + sha256 = "160845266e94db1d4922ef755637f6901266731c4cb3b30b45bf41efa0e6ab70", + strip_prefix = "spdlog-1.3.1", + urls = ["https://github.com/gabime/spdlog/archive/v1.3.1.tar.gz"], ), com_github_gcovr_gcovr = dict( sha256 = "8a60ba6242d67a58320e9e16630d80448ef6d5284fda5fb3eff927b63c8b04a2", @@ -86,15 +86,20 @@ REPOSITORY_LOCATIONS = dict( strip_prefix = "grpc-1.16.1", urls = ["https://github.com/grpc/grpc/archive/v1.16.1.tar.gz"], ), + com_github_luajit_luajit = dict( + sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8", + strip_prefix = "LuaJIT-2.1.0-beta3", + urls = ["https://github.com/LuaJIT/LuaJIT/archive/v2.1.0-beta3.tar.gz"], + ), com_github_nanopb_nanopb = dict( sha256 = "b8dd5cb0d184d424ddfea13ddee3f7b0920354334cbb44df434d55e5f0086b12", strip_prefix = "nanopb-0.3.9.2", urls = ["https://github.com/nanopb/nanopb/archive/0.3.9.2.tar.gz"], ), com_github_nghttp2_nghttp2 = dict( - sha256 = "cb70261634c33dc5adbe780afcfc5dab17838ee303631a02b983c6a217bc16ba", - strip_prefix = "nghttp2-1.35.1", - urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.35.1/nghttp2-1.35.1.tar.gz"], + sha256 = "6b222a264aca23d497f7878a7751bd9da12676717493fe747db49afb51daae79", + strip_prefix = "nghttp2-1.36.0", + urls = ["https://github.com/nghttp2/nghttp2/releases/download/v1.36.0/nghttp2-1.36.0.tar.gz"], ), io_opentracing_cpp = dict( sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301", @@ -124,9 +129,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/benchmark/archive/505be96ab23056580a3a2315abba048f4428b04e.tar.gz"], ), com_github_libevent_libevent = dict( - sha256 = "0ab250abac1def3d1e20e23e05ce827efa81db65c9004ccfff58d16404e3e369", - strip_prefix = "libevent-release-2.1.9-beta", - urls = ["https://github.com/libevent/libevent/archive/release-2.1.9-beta.tar.gz"], + sha256 = "53d4bb49b837944893b7caf9ae8eb43e94690ee5babea6469cc4a928722f99b1", + strip_prefix = "libevent-c4fbae3ae6166dddfa126734edd63213afa14dce", + urls = ["https://github.com/libevent/libevent/archive/c4fbae3ae6166dddfa126734edd63213afa14dce.tar.gz"], ), com_github_madler_zlib = dict( sha256 = "629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff", @@ -160,9 +165,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/pallets/jinja/releases/download/2.10/Jinja2-2.10.tar.gz"], ), com_github_pallets_markupsafe = dict( - sha256 = "62f6154071d1ceac8d7dfb5ed7a21dc502cc12e2348c032e5a1cedd018548381", - strip_prefix = "markupsafe-1.1.0/src", - urls = ["https://github.com/pallets/markupsafe/archive/1.1.0.tar.gz"], + sha256 = "222a10e3237d92a9cd45ed5ea882626bc72bc5e0264d3ed0f2c9129fa69fc167", + strip_prefix = "markupsafe-1.1.1/src", + urls = ["https://github.com/pallets/markupsafe/archive/1.1.1.tar.gz"], ), com_github_tencent_rapidjson = dict( sha256 = "bf7ced29704a1e696fbccf2a2b4ea068e7774fa37f6d7dd4039d0787f8bed98e", @@ -192,15 +197,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/googletest/archive/eb9225ce361affe561592e0912320b9db84985d0.tar.gz"], ), com_google_protobuf = dict( - sha256 = "46f1da3a6a6db66dd240cf95a5553198f7c6e98e6ac942fceb8a1cf03291d96e", - strip_prefix = "protobuf-7492b5681231c79f0265793fa57dc780ae2481d6", - # TODO(htuch): Switch back to released versions for protobuf when a release > 3.6.0 happens - # that includes: - # - https://github.com/protocolbuffers/protobuf/commit/f35669b8d3f46f7f1236bd21f14d744bba251e60 - # - https://github.com/protocolbuffers/protobuf/commit/6a4fec616ec4b20f54d5fb530808b855cb664390 - # - https://github.com/protocolbuffers/protobuf/commit/fa252ec2a54acb24ddc87d48fed1ecfd458445fd - # - https://github.com/protocolbuffers/protobuf/commit/7492b5681231c79f0265793fa57dc780ae2481d6 - urls = ["https://github.com/protocolbuffers/protobuf/archive/7492b5681231c79f0265793fa57dc780ae2481d6.tar.gz"], + sha256 = "3e933375ecc58d01e52705479b82f155aea2d02cc55d833f8773213e74f88363", + strip_prefix = "protobuf-3.7.0", + urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v3.7.0/protobuf-all-3.7.0.tar.gz"], ), grpc_httpjson_transcoding = dict( sha256 = "dedd76b0169eb8c72e479529301a1d9b914a4ccb4d2b5ddb4ebe92d63a7b2152", @@ -209,17 +208,15 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/64d6ac985360b624d8e95105701b64a3814794cd.tar.gz"], ), com_github_golang_protobuf = dict( - # TODO(sesmith177): Remove this dependency when both: - # 1. There's a release of golang/protobuf that includes - # https://github.com/golang/protobuf/commit/31e0d063dd98c052257e5b69eeb006818133f45c - # 2. That release is included in rules_go - sha256 = "4cbd5303a5cf85791b3c310a50a479027c035d75091bb90c482ba67b0a2cf5b4", - strip_prefix = "protobuf-31e0d063dd98c052257e5b69eeb006818133f45c", - urls = ["https://github.com/golang/protobuf/archive/31e0d063dd98c052257e5b69eeb006818133f45c.tar.gz"], + # TODO(sesmith177): Remove this dependency when: + # 1. There's a release of rules_go that includes golang/protobuf v1.3.0 + sha256 = "f44cfe140cdaf0031dac7d7376eee4d5b07084cce400d7ecfac4c46d33f18a52", + strip_prefix = "protobuf-1.3.0", + urls = ["https://github.com/golang/protobuf/archive/v1.3.0.tar.gz"], ), io_bazel_rules_go = dict( - sha256 = "7be7dc01f1e0afdba6c8eb2b43d2fa01c743be1b9273ab1eaf6c233df078d705", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.16.5/rules_go-0.16.5.tar.gz"], + sha256 = "6776d68ebb897625dead17ae510eac3d5f6342367327875210df44dbe2aeeb19", + urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.17.1/rules_go-0.17.1.tar.gz"], ), rules_foreign_cc = dict( sha256 = "e1b67e1fda647c7713baac11752573bfd4c2d45ef09afb4d4de9eb9bd4e5ac76", diff --git a/bazel/target_recipes.bzl b/bazel/target_recipes.bzl deleted file mode 100644 index 802b940704933..0000000000000 --- a/bazel/target_recipes.bzl +++ /dev/null @@ -1,6 +0,0 @@ -# These should reflect //ci/prebuilt/BUILD declared targets. This a map from -# target in //ci/prebuilt/BUILD to the underlying build recipe in -# ci/build_container/build_recipes. -TARGET_RECIPES = { - "luajit": "luajit", -} diff --git a/ci/WORKSPACE b/ci/WORKSPACE index 2f2197641d11f..f33b9aa583168 100644 --- a/ci/WORKSPACE +++ b/ci/WORKSPACE @@ -9,9 +9,7 @@ local_repository( path = "/source", ) -envoy_dependencies( - path = "@envoy//ci/prebuilt", -) +envoy_dependencies() # TODO(htuch): Roll this into envoy_dependencies() load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") @@ -24,7 +22,7 @@ load("@envoy_api//bazel:repositories.bzl", "api_dependencies") api_dependencies() -load("@io_bazel_rules_go//go:def.bzl", "go_register_toolchains", "go_rules_dependencies") +load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") go_rules_dependencies() diff --git a/ci/WORKSPACE.filter.example b/ci/WORKSPACE.filter.example index 388747003df8a..6262671453103 100644 --- a/ci/WORKSPACE.filter.example +++ b/ci/WORKSPACE.filter.example @@ -8,9 +8,8 @@ local_repository( load("@envoy//bazel:repositories.bzl", "envoy_dependencies", "GO_VERSION") load("@envoy//bazel:cc_configure.bzl", "cc_configure") -envoy_dependencies( - path = "@envoy//ci/prebuilt", -) +envoy_dependencies() + # TODO(htuch): Roll this into envoy_dependencies() load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") rules_foreign_cc_dependencies() @@ -20,6 +19,6 @@ cc_configure() load("@envoy_api//bazel:repositories.bzl", "api_dependencies") api_dependencies() -load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains") +load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") go_rules_dependencies() go_register_toolchains(go_version = GO_VERSION) diff --git a/ci/build_container/BUILD b/ci/build_container/BUILD deleted file mode 100644 index 9c7fd8d73f9a9..0000000000000 --- a/ci/build_container/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_package", -) - -envoy_package() - -exports_files([ - "build_and_install_deps.sh", - "Makefile", -]) diff --git a/ci/build_container/Dockerfile-centos b/ci/build_container/Dockerfile-centos index c98d7a52fcdbc..b8fb1abaf527a 100644 --- a/ci/build_container/Dockerfile-centos +++ b/ci/build_container/Dockerfile-centos @@ -1,10 +1,9 @@ FROM centos:7 -COPY ./build_and_install_deps.sh ./recipe_wrapper.sh ./Makefile ./build_container_common.sh / +COPY ./build_container_common.sh / COPY WORKSPACE /bazel-prebuilt/ COPY ./api /bazel-prebuilt/api COPY ./bazel /bazel-prebuilt/bazel -COPY ./build_recipes/*.sh /build_recipes/ COPY ./build_container_centos.sh / diff --git a/ci/build_container/Dockerfile-ubuntu b/ci/build_container/Dockerfile-ubuntu index 2a0ed6355ebe8..8c70abd297d3d 100644 --- a/ci/build_container/Dockerfile-ubuntu +++ b/ci/build_container/Dockerfile-ubuntu @@ -1,10 +1,9 @@ FROM ubuntu:xenial -COPY ./build_and_install_deps.sh ./recipe_wrapper.sh ./Makefile ./build_container_common.sh / +COPY ./build_container_common.sh / COPY WORKSPACE /bazel-prebuilt/ COPY ./api /bazel-prebuilt/api COPY ./bazel /bazel-prebuilt/bazel -COPY ./build_recipes /build_recipes COPY ./build_container_ubuntu.sh / diff --git a/ci/build_container/Makefile b/ci/build_container/Makefile deleted file mode 100644 index 2aaca46d28e11..0000000000000 --- a/ci/build_container/Makefile +++ /dev/null @@ -1,67 +0,0 @@ -# The individual build recipe scripts must contain sufficient information (e.g. SHA, URL, repo, -# version number, etc.) to uniquely identify the revision of the upstream dependency. This allows -# make to pick up changes with a simple direct dependency on the build recipe. - -RECIPES := build_recipes - -# Make sure we use a consistent compiler across all deps. -CC ?= gcc -CXX ?= g++ - -# Common compiler flags -CXXFLAGS += -ggdb3 -fno-omit-frame-pointer -O2 -CFLAGS += -ggdb3 -fno-omit-frame-pointer -O2 -CPPFLAGS ?= -DNDEBUG - -# Keep track of the env vars we depend upon for $(THIRDPARTY_DEPS)/%.dep.env. If the list (captured -# above) of flags changes, this should be updated. -ENV_STR := $(CC) $(CXX) $(CXXFLAGS) $(CFLAGS) $(CPPFLAGS) - -# If $(BUILD_DISTINCT) is set in the make environment, the artifacts are built and installed in -# distinct directories under $(THIRDPARTY_BUILD) and $(THIRDPARTY_SRC). They end up looking like -# $(THIRDPARTY_BUILD)/protobuf.dep/include, etc. instead of all being under -# $(THIRDPARTY_BUILD)/include. -DISTINCT_PATH = $(if $(BUILD_DISTINCT),$(@F),) - -build-setup = rm -rf "$@.build" && \ - $(if $(BUILD_DISTINCT),rm -rf "$(THIRDPARTY_BUILD)/$(DISTINCT_PATH)" &&,) \ - $(if $(BUILD_DISTINCT),rm -rf "$(THIRDPARTY_SRC)/$(DISTINCT_PATH)" &&,) \ - mkdir -p "$@.build" && \ - mkdir -p "$(THIRDPARTY_BUILD)/$(DISTINCT_PATH)/lib" && \ - mkdir -p "$(THIRDPARTY_BUILD)/$(DISTINCT_PATH)/include" && \ - cd "$@.build" && \ - echo "Building in $@.build, logs at $@.log" - -build-complete = rm -rf "$@.build" && \ - echo "Successful build of $@" && \ - touch $@ - -# This needs to be invoked with $(call build-recipe,DEFS) where DEFS are additional environment -# definitions that are to be injected into the build recipe execution environment. -build-recipe = cd "$(THIRDPARTY_SRC)" && \ - $(build-setup) && \ - (((THIRDPARTY_SRC="$(THIRDPARTY_SRC)/$(DISTINCT_PATH)" \ - THIRDPARTY_BUILD="$(THIRDPARTY_BUILD)/$(DISTINCT_PATH)" \ - CC="$(CC)" \ - CXX="$(CXX)" \ - CFLAGS="$(CFLAGS)" \ - CXXFLAGS="$(CXXFLAGS)" \ - CPPFLAGS="$(CPPFLAGS)" \ - $(1) \ - bash -c "time $(CURDIR)/recipe_wrapper.sh $(realpath $<)" 2>&1) > $@.log) || (cat $@.log; exit 1)) && \ - $(build-complete) - -# Simplify wildcard phony with FORCE target. -.PHONY: FORCE -FORCE: - -# Capture $(ENV_STR) deps to retrigger build when they change. -.PRECIOUS: $(THIRDPARTY_DEPS)/%.dep.env -$(THIRDPARTY_DEPS)/%.dep.env: FORCE - @[ "$$(cat $@)" != "$(ENV_STR)" ] && echo "$(ENV_STR)" > $@ || echo "No need to rebuild $@" - -$(THIRDPARTY_DEPS)/%.dep: $(RECIPES)/%.sh $(THIRDPARTY_DEPS)/%.dep.env - @+$(call build-recipe,) - -# Special support for targets that need protobuf, and hence take a dependency on protobuf.dep. -PROTOBUF_BUILD ?= $(THIRDPARTY_BUILD)/$(if $(BUILD_DISTINCT),protobuf.dep,) diff --git a/ci/build_container/build_and_install_deps.sh b/ci/build_container/build_and_install_deps.sh deleted file mode 100755 index 610776a49568a..0000000000000 --- a/ci/build_container/build_and_install_deps.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -set -e - -mkdir -p "${THIRDPARTY_DEPS}" -mkdir -p "${THIRDPARTY_BUILD}" -mkdir -p "${THIRDPARTY_SRC}" - -if [ -z "$NUM_CPUS" ]; then - case `uname` in - Darwin) - NUM_CPUS=`/usr/sbin/sysctl hw.physicalcpu | cut -f 2 -d' '`;; - *) - NUM_CPUS=`grep -c ^processor /proc/cpuinfo`;; - esac -fi - -# Invokers can set BUILD_CONCURRENCY=0 to ensure each build recipe is invoked sequentially, with all -# CPU resources available. This is useful when debugging build performance. -if [[ "${BUILD_CONCURRENCY}" == "0" ]] -then - for dep in "$@" - do - make -C "$(dirname "$0")" -j "${NUM_CPUS}" "$dep" - done -else - make -C "$(dirname "$0")" -j "${NUM_CPUS}" "$@" -fi diff --git a/ci/build_container/build_container_common.sh b/ci/build_container/build_container_common.sh index e914a2a0eb1cd..ae78400bb632f 100755 --- a/ci/build_container/build_container_common.sh +++ b/ci/build_container/build_container_common.sh @@ -1,8 +1,8 @@ #!/bin/bash -e # buildifier -VERSION=0.20.0 -SHA256=92c74a3c2331a12f578fcf9c5ace645b7537e1a18f02f91d0fdbb6f0655e8493 +VERSION=0.22.0 +SHA256=25159de982ec8896fc8213499df0a7003dfb4a03dd861f90fa5679d16faf0f99 curl --location --output /usr/local/bin/buildifier https://github.com/bazelbuild/buildtools/releases/download/"$VERSION"/buildifier \ && echo "$SHA256" '/usr/local/bin/buildifier' | sha256sum --check \ && chmod +x /usr/local/bin/buildifier @@ -11,18 +11,6 @@ curl --location --output /usr/local/bin/buildifier https://github.com/bazelbuild export CC=gcc export CXX=g++ -export THIRDPARTY_DEPS=/tmp -export THIRDPARTY_SRC=/thirdparty -DEPS=$(python <(cat /bazel-prebuilt/bazel/target_recipes.bzl; \ - echo "print ' '.join(\"${THIRDPARTY_DEPS}/%s.dep\" % r for r in set(TARGET_RECIPES.values()))")) - -# TODO(htuch): We build twice as a workaround for https://github.com/google/protobuf/issues/3322. -# Fix this. This will be gone real soon now. -export THIRDPARTY_BUILD=/thirdparty_build -export CPPFLAGS="-DNDEBUG" -echo "Building opt deps ${DEPS}" -"$(dirname "$0")"/build_and_install_deps.sh ${DEPS} - echo "Building Bazel-managed deps (//bazel/external:all_external)" mkdir /bazel-prebuilt-root /bazel-prebuilt-output BAZEL_OPTIONS="--output_user_root=/bazel-prebuilt-root --output_base=/bazel-prebuilt-output" diff --git a/ci/build_container/build_container_ubuntu.sh b/ci/build_container/build_container_ubuntu.sh index c21e17c266d6d..3f4b3f0b5f638 100755 --- a/ci/build_container/build_container_ubuntu.sh +++ b/ci/build_container/build_container_ubuntu.sh @@ -6,7 +6,7 @@ set -e apt-get update export DEBIAN_FRONTEND=noninteractive apt-get install -y wget software-properties-common make cmake git python python-pip python3 python3-pip \ - unzip bc libtool ninja-build automake zip time golang gdb strace wireshark tshark tcpdump + unzip bc libtool ninja-build automake zip time golang gdb strace wireshark tshark tcpdump lcov # clang 7. wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main" @@ -18,8 +18,10 @@ apt update apt install -y g++-7 update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-7 1000 update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 1000 +update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-7 1000 update-alternatives --config gcc update-alternatives --config g++ +update-alternatives --config gcov # Bazel and related dependencies. apt-get install -y openjdk-8-jdk curl echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list diff --git a/ci/build_container/build_recipes/BUILD b/ci/build_container/build_recipes/BUILD deleted file mode 100644 index 4678729957f0c..0000000000000 --- a/ci/build_container/build_recipes/BUILD +++ /dev/null @@ -1,10 +0,0 @@ -licenses(["notice"]) # Apache 2 - -load( - "//bazel:envoy_build_system.bzl", - "envoy_package", -) - -envoy_package() - -exports_files(glob(["*.sh"])) diff --git a/ci/build_container/build_recipes/versions.py b/ci/build_container/build_recipes/versions.py deleted file mode 100755 index 1dc60d3adc853..0000000000000 --- a/ci/build_container/build_recipes/versions.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python - -import sys - -LUAJIT_VERSION = '2.1.0-beta3' -LUAJIT_FILE_URL = 'https://github.com/LuaJIT/LuaJIT/archive/v' + LUAJIT_VERSION + '.tar.gz' -LUAJIT_FILE_SHA256 = '409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8' -LUAJIT_FILE_PREFIX = 'LuaJIT-' + LUAJIT_VERSION - -# TODO(cmluciano): Bump to release 2.8 -# This sha is specifically chosen to fix ppc64le builds that require inclusion -# of asm/ptrace.h -GPERFTOOLS_VERSION = 'fc00474ddc21fff618fc3f009b46590e241e425e' -GPERFTOOLS_FILE_URL = 'https://github.com/gperftools/gperftools/archive/' + GPERFTOOLS_VERSION + '.tar.gz' -GPERFTOOLS_FILE_SHA256 = '18574813a062eee487bc1b761e8024a346075a7cb93da19607af362dc09565ef' -GPERFTOOLS_FILE_PREFIX = 'gperftools-' + GPERFTOOLS_VERSION - -RECIPES = dict( - luajit=dict( - version=LUAJIT_VERSION, - url=LUAJIT_FILE_URL, - sha256=LUAJIT_FILE_SHA256, - strip_prefix=LUAJIT_FILE_PREFIX, - ), - gperftools=dict( - version=GPERFTOOLS_VERSION, - url=GPERFTOOLS_FILE_URL, - sha256=GPERFTOOLS_FILE_SHA256, - strip_prefix=GPERFTOOLS_FILE_PREFIX, - )) - -if __name__ == '__main__': - if len(sys.argv) != 2: - print('Usage: %s ' % sys.argv[0]) - sys.exit(1) - name = sys.argv[1] - if name not in RECIPES: - print('Unknown recipie: %s' % recipe) - sys.exit(1) - recipe = RECIPES[name] - print(""" - export VERSION={} - export FILE_URL={} - export FILE_SHA256={} - export FILE_PREFIX={} - """.format( - recipe['version'], - recipe['url'], - recipe['sha256'], - recipe['strip_prefix'], - )) diff --git a/ci/build_container/recipe_wrapper.sh b/ci/build_container/recipe_wrapper.sh deleted file mode 100755 index cdea29f8d1c8a..0000000000000 --- a/ci/build_container/recipe_wrapper.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -PS4='+ $(date "+%s.%N") ' -set -x - -if [[ `uname` == "Darwin" ]]; then - function sha256sum { - gsha256sum $@ - } -fi - -. $1 - -echo DONE diff --git a/ci/build_setup.sh b/ci/build_setup.sh index 5bd8cfb601be8..6e969f272c652 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -77,8 +77,6 @@ export BAZEL_TEST_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HOME --test_env=PYT --test_env=UBSAN_OPTIONS=print_stacktrace=1 \ --cache_test_results=no --test_output=all ${BAZEL_EXTRA_TEST_OPTIONS}" [[ "${BAZEL_EXPUNGE}" == "1" ]] && "${BAZEL}" clean --expunge -ln -sf /thirdparty "${ENVOY_SRCDIR}"/ci/prebuilt -ln -sf /thirdparty_build "${ENVOY_SRCDIR}"/ci/prebuilt # Replace the existing Bazel output cache with a copy of the image's prebuilt deps. if [[ -d /bazel-prebuilt-output && ! -d "${TEST_TMPDIR}/_bazel_${USER}" ]]; then diff --git a/ci/prebuilt/BUILD b/ci/prebuilt/BUILD deleted file mode 100644 index 97f8f37a9d3bc..0000000000000 --- a/ci/prebuilt/BUILD +++ /dev/null @@ -1,12 +0,0 @@ -licenses(["notice"]) # Apache 2 - -package(default_visibility = ["//visibility:public"]) - -cc_library( - name = "luajit", - srcs = ["thirdparty_build/lib/libluajit-5.1.a"], - hdrs = glob(["thirdparty_build/include/luajit-2.1/*"]), - includes = ["thirdparty_build/include"], - # TODO(mattklein123): We should strip luajit-2.1 here for consumers. However, if we do that - # the headers get included using -I vs. -isystem which then causes old-style-cast warnings. -) diff --git a/docs/root/configuration/cluster_manager/cds.rst b/docs/root/configuration/cluster_manager/cds.rst index 89f2dbcd4b186..ebfe5008ac92a 100644 --- a/docs/root/configuration/cluster_manager/cds.rst +++ b/docs/root/configuration/cluster_manager/cds.rst @@ -29,4 +29,4 @@ CDS has a statistics tree rooted at *cluster_manager.cds.* with the following st update_failure, Counter, Total API fetches that failed because of network errors update_rejected, Counter, Total API fetches that failed because of schema/validation errors version, Gauge, Hash of the contents from the last successful API fetch - control_plane.connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server + control_plane.connected_state, BoolIndicator, Current connection state with management server diff --git a/docs/root/configuration/cluster_manager/cluster_stats.rst b/docs/root/configuration/cluster_manager/cluster_stats.rst index 5135080e356e2..680a2ff067cb8 100644 --- a/docs/root/configuration/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/cluster_manager/cluster_stats.rst @@ -60,6 +60,7 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_cx_max_requests, Counter, Total connections closed due to maximum requests upstream_cx_none_healthy, Counter, Total times connection not established due to no healthy hosts upstream_rq_total, Counter, Total requests + upstream_rq_hedge_abandoned, Counter, Number of hedged requests that were abandoned due to accepting another response. upstream_rq_active, Gauge, Total active requests upstream_rq_pending_total, Counter, Total requests pending a connection pool connection upstream_rq_pending_overflow, Counter, Total requests that overflowed connection pool circuit breaking and were failed @@ -148,10 +149,10 @@ Circuit breakers statistics will be rooted at *cluster..circuit_breakers.< :header: Name, Type, Description :widths: 1, 1, 2 - cx_open, Gauge, Whether the connection circuit breaker is closed (0) or open (1) - rq_pending_open, Gauge, Whether the pending requests circuit breaker is closed (0) or open (1) - rq_open, Gauge, Whether the requests circuit breaker is closed (0) or open (1) - rq_retry_open, Gauge, Whether the retry circuit breaker is closed (0) or open (1) + cx_open, BoolIndicator, Whether the connection circuit breaker is closed (false) or open (true) + rq_pending_open, BoolIndicator, Whether the pending requests circuit breaker is closed (false) or open (true) + rq_open, BoolIndicator, Whether the requests circuit breaker is closed (false) or open (true) + rq_retry_open, BoolIndicator, Whether the retry circuit breaker is closed (false) or open (true) .. _config_cluster_manager_cluster_stats_dynamic_http: diff --git a/docs/root/configuration/http_filters/grpc_http1_reverse_bridge_filter.rst b/docs/root/configuration/http_filters/grpc_http1_reverse_bridge_filter.rst index 04334a483ae80..1ef27594b6e9d 100644 --- a/docs/root/configuration/http_filters/grpc_http1_reverse_bridge_filter.rst +++ b/docs/root/configuration/http_filters/grpc_http1_reverse_bridge_filter.rst @@ -1,11 +1,11 @@ -.. _config_http_filters_grpc_reverse_bridge: +.. _config_http_filters_grpc_http1_reverse_bridge: gRPC HTTP/1.1 reverse bridge ============================ * gRPC :ref:`architecture overview ` * :ref:`v2 API reference ` -* This filter should be configured with the name *envoy.grpc_http1_reverse_bridge*. +* This filter should be configured with the name *envoy.filters.http.grpc_http1_reverse_bridge*. This is a filter that enables converting an incoming gRPC request into a HTTP/1.1 request to allow a server that does not understand HTTP/2 or gRPC semantics to handle the request. diff --git a/docs/root/configuration/listeners/lds.rst b/docs/root/configuration/listeners/lds.rst index a90fe84f6f11e..a876087bb305a 100644 --- a/docs/root/configuration/listeners/lds.rst +++ b/docs/root/configuration/listeners/lds.rst @@ -48,4 +48,4 @@ LDS has a statistics tree rooted at *listener_manager.lds.* with the following s update_failure, Counter, Total API fetches that failed because of network errors update_rejected, Counter, Total API fetches that failed because of schema/validation errors version, Gauge, Hash of the contents from the last successful API fetch - control_plane.connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server + control_plane.connected_state, BoolIndicator, Current connection state with management server diff --git a/docs/root/configuration/overview/v2_overview.rst b/docs/root/configuration/overview/v2_overview.rst index 560f6f130981d..d65bf287c0e0e 100644 --- a/docs/root/configuration/overview/v2_overview.rst +++ b/docs/root/configuration/overview/v2_overview.rst @@ -590,7 +590,7 @@ Management Server has a statistics tree rooted at *control_plane.* with the foll :header: Name, Type, Description :widths: 1, 1, 2 - connected_state, Gauge, A boolean (1 for connected and 0 for disconnected) that indicates the current connection state with management server + connected_state, BoolIndicator, Current connection state with management server rate_limit_enforced, Counter, Total number of times rate limit was enforced for management server requests pending_requests, Gauge, Total number of pending requests when the rate limit was enforced diff --git a/docs/root/install/building.rst b/docs/root/install/building.rst index f047b582fc169..3db6ffc7ea4d8 100644 --- a/docs/root/install/building.rst +++ b/docs/root/install/building.rst @@ -21,7 +21,6 @@ recent Linux including Ubuntu 16 LTS. Building Envoy has the following requirements: * GCC 7+ or Clang/LLVM 7+ (for C++14 support). -* These :repo:`pre-built ` third party dependencies. * These :repo:`Bazel native ` dependencies. Please see the linked :repo:`CI ` and :repo:`Bazel ` documentation diff --git a/docs/root/intro/arch_overview/grpc.rst b/docs/root/intro/arch_overview/grpc.rst index dace892196e7f..84226f444511d 100644 --- a/docs/root/intro/arch_overview/grpc.rst +++ b/docs/root/intro/arch_overview/grpc.rst @@ -31,8 +31,8 @@ Envoy supports two gRPC bridges: * :ref:`grpc_http1_bridge filter ` which allows gRPC requests to be sent to Envoy over HTTP/1.1. Envoy then translates the requests to HTTP/2 for transport to the target server. The response is translated back to HTTP/1.1. When installed, the bridge filter gathers per RPC statistics in addition to the standard array of global HTTP statistics. -* :ref:`grpc_http1_reverse_bridge filter ` which allows gRPC requests to be sent to Envoy and - then translated to HTTP/1.1 when sent to the upstream. The response is then converted back into gRPC when sent to the downstream. +* :ref:`grpc_http1_reverse_bridge filter ` which allows gRPC requests to be sent to Envoy + and then translated to HTTP/1.1 when sent to the upstream. The response is then converted back into gRPC when sent to the downstream. This filter can also optionally manage the gRPC frame header, allowing the upstream to not have to be gRPC aware at all. .. _arch_overview_grpc_services: diff --git a/docs/root/intro/arch_overview/service_discovery.rst b/docs/root/intro/arch_overview/service_discovery.rst index f950b82b667a2..4d00f638dbdc8 100644 --- a/docs/root/intro/arch_overview/service_discovery.rst +++ b/docs/root/intro/arch_overview/service_discovery.rst @@ -96,6 +96,14 @@ The Envoy project provides reference gRPC implementations of EDS and in both `Java `_ and `Go `_. +.. _arch_overview_service_discovery_types_custom: + +Custom cluster +^^^^^^^^^^^^^^ + +Envoy also supports custom cluster discovery mechanism. Custom clusters are specified using +:ref:`cluster_type field ` on the cluster configuration. + Generally active health checking is used in conjunction with the eventually consistent service discovery service data to making load balancing and routing decisions. This is discussed further in the following section. diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index a1e3eedb15e72..64dbd13178feb 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -14,6 +14,8 @@ Version history * config: removed deprecated --v2-config-only from command line config. * config: removed deprecated_v1 sds_config from :ref:`Bootstrap config `. * config: removed REST_LEGACY as a valid :ref:`ApiType `. +* config: finish cluster warming only when a named response i.e. ClusterLoadAssignment associated to the cluster being warmed comes in the EDS response. This is a behavioural change from the current implementation where warming of cluster completes on missing load assignments also. +* config: use Envoy cpuset size to set the default number or worker threads if :option:`--cpuset-threads` is enabled. * cors: added :ref:`filter_enabled & shadow_enabled RuntimeFractionalPercent flags ` to filter. * ext_authz: added an configurable option to make the gRPC service cross-compatible with V2Alpha. Note that this feature is already deprecated. It should be used for a short time, and only when transitioning from alpha to V2 release version. * ext_authz: migrated from V2alpha to V2 and improved the documentation. @@ -44,11 +46,13 @@ Version history * stats: added support for histograms in prometheus * stats: added usedonly flag to prometheus stats to only output metrics which have been updated at least once. +* stats: added BoolIndicator stat type, converted the following 1-or-0 Gauges: control_plane.connected_state, cx_open, rq_pending_open, rq_open, rq_retry_open, runtime.admin_overrides_active, open_gauge, config.active, server.live. * tap: added new alpha :ref:`HTTP tap filter `. * tls: enabled TLS 1.3 on the server-side (non-FIPS builds). * upstream: add hash_function to specify the hash function for :ref:`ring hash` as either xxHash or `murmurHash2 `_. MurmurHash2 is compatible with std::hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled on Linux and not macOS. * upstream: added :ref:`degraded health value` which allows routing to certain hosts only when there are insufficient healthy hosts available. +* upstream: add cluster factory to allow creating and registering :ref:`custom cluster type`. * tracing: added :ref:`verbose ` to support logging annotations on spans. * upstream: added support for host weighting and :ref:`locality weighting ` in the :ref:`ring hash load balancer `, and added a :ref:`maximum_ring_size` config parameter to strictly bound the ring size. * upstream: added configuration option to select any host when the fallback policy fails. diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index 3b0796fcfd929..e88da1ac382ab 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -213,7 +213,8 @@ modify different aspects of the server: "restart_epoch": 0, "file_flush_interval": "10s", "drain_time": "600s", - "parent_shutdown_time": "900s" + "parent_shutdown_time": "900s", + "cpuset_threads": false }, "uptime_current_epoch": "6s", "uptime_all_epochs": "6s" diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst index 6e2b45d10ba22..c863a47e693c6 100644 --- a/docs/root/operations/cli.rst +++ b/docs/root/operations/cli.rst @@ -76,6 +76,14 @@ following are the command line options that Envoy supports. `connection` component to run at `trace` level, you should pass ``upstream:debug,connection:trace`` to this flag. See ``ALL_LOGGER_IDS`` in :repo:`/source/common/common/logger.h` for a list of components. +.. option:: --cpuset-threads + + *(optional)* This flag is used to control the number of worker threads if :option:`--concurrency` is + not set. If enabled, the assigned cpuset size is used to determine the number of worker threads on + Linux-based systems. Otherwise the number of worker threads is set to the number of hardware threads + on the machine. You can read more about cpusets in the + `kernel documentation `_. + .. option:: --log-path *(optional)* The output file path where logs should be written. This file will be re-opened diff --git a/examples/grpc-bridge/script/build b/examples/grpc-bridge/script/build.sh similarity index 100% rename from examples/grpc-bridge/script/build rename to examples/grpc-bridge/script/build.sh diff --git a/include/envoy/api/BUILD b/include/envoy/api/BUILD index 265e047ec9789..0b2b6df8cc2d4 100644 --- a/include/envoy/api/BUILD +++ b/include/envoy/api/BUILD @@ -25,5 +25,9 @@ envoy_cc_library( envoy_cc_library( name = "os_sys_calls_interface", - hdrs = ["os_sys_calls.h"], + hdrs = [ + "os_sys_calls.h", + "os_sys_calls_common.h", + "os_sys_calls_linux.h", + ], ) diff --git a/include/envoy/api/os_sys_calls.h b/include/envoy/api/os_sys_calls.h index d3edee58fa6df..07b11a65c4286 100644 --- a/include/envoy/api/os_sys_calls.h +++ b/include/envoy/api/os_sys_calls.h @@ -9,33 +9,12 @@ #include #include +#include "envoy/api/os_sys_calls_common.h" #include "envoy/common/pure.h" namespace Envoy { namespace Api { -/** - * SysCallResult holds the rc and errno values resulting from a system call. - */ -template struct SysCallResult { - - /** - * The return code from the system call. - */ - T rc_; - - /** - * The errno value as captured after the system call. - */ - int errno_; -}; - -typedef SysCallResult SysCallIntResult; -typedef SysCallResult SysCallSizeResult; -typedef SysCallResult SysCallPtrResult; -typedef SysCallResult SysCallStringResult; -typedef SysCallResult SysCallBoolResult; - class OsSysCalls { public: virtual ~OsSysCalls() {} @@ -65,6 +44,11 @@ class OsSysCalls { */ virtual SysCallSizeResult recv(int socket, void* buffer, size_t length, int flags) PURE; + /** + * @see recv (man 2 recvfrom) + */ + virtual SysCallSizeResult recvfrom(int sockfd, void* buffer, size_t length, int flags, + struct sockaddr* addr, socklen_t* addrlen) PURE; /** * Release all resources allocated for fd. * @return zero on success, -1 returned otherwise. diff --git a/include/envoy/api/os_sys_calls_common.h b/include/envoy/api/os_sys_calls_common.h new file mode 100644 index 0000000000000..3c283e064bbfd --- /dev/null +++ b/include/envoy/api/os_sys_calls_common.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include + +namespace Envoy { +namespace Api { +/** + * SysCallResult holds the rc and errno values resulting from a system call. + */ +template struct SysCallResult { + + /** + * The return code from the system call. + */ + T rc_; + + /** + * The errno value as captured after the system call. + */ + int errno_; +}; + +typedef SysCallResult SysCallIntResult; +typedef SysCallResult SysCallSizeResult; +typedef SysCallResult SysCallPtrResult; +typedef SysCallResult SysCallStringResult; +typedef SysCallResult SysCallBoolResult; + +} // namespace Api +} // namespace Envoy diff --git a/include/envoy/api/os_sys_calls_linux.h b/include/envoy/api/os_sys_calls_linux.h new file mode 100644 index 0000000000000..cd90daea538df --- /dev/null +++ b/include/envoy/api/os_sys_calls_linux.h @@ -0,0 +1,28 @@ +#pragma once + +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include + +#include "envoy/api/os_sys_calls_common.h" +#include "envoy/common/pure.h" + +namespace Envoy { +namespace Api { + +class LinuxOsSysCalls { +public: + virtual ~LinuxOsSysCalls() {} + + /** + * @see sched_getaffinity (man 2 sched_getaffinity) + */ + virtual SysCallIntResult sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t* mask) PURE; +}; + +typedef std::unique_ptr LinuxOsSysCallsPtr; + +} // namespace Api +} // namespace Envoy diff --git a/include/envoy/config/BUILD b/include/envoy/config/BUILD index 5c41821d10c80..4696d029075fc 100644 --- a/include/envoy/config/BUILD +++ b/include/envoy/config/BUILD @@ -23,6 +23,7 @@ envoy_cc_library( deps = [ "//include/envoy/stats:stats_macros", "//source/common/protobuf", + "@envoy_api//envoy/api/v2:discovery_cc", ], ) diff --git a/include/envoy/config/grpc_mux.h b/include/envoy/config/grpc_mux.h index 6872a62d875b6..af568566e7edb 100644 --- a/include/envoy/config/grpc_mux.h +++ b/include/envoy/config/grpc_mux.h @@ -13,9 +13,9 @@ namespace Config { * All control plane related stats. @see stats_macros.h */ // clang-format off -#define ALL_CONTROL_PLANE_STATS(COUNTER, GAUGE) \ +#define ALL_CONTROL_PLANE_STATS(BOOL_INDICATOR, COUNTER, GAUGE) \ COUNTER(rate_limit_enforced) \ - GAUGE(connected_state) \ + BOOL_INDICATOR(connected_state) \ GAUGE(pending_requests) \ // clang-format on @@ -23,7 +23,7 @@ namespace Config { * Struct definition for all control plane stats. @see stats_macros.h */ struct ControlPlaneStats { - ALL_CONTROL_PLANE_STATS(GENERATE_COUNTER_STRUCT,GENERATE_GAUGE_STRUCT) + ALL_CONTROL_PLANE_STATS(GENERATE_BOOL_INDICATOR_STRUCT,GENERATE_COUNTER_STRUCT,GENERATE_GAUGE_STRUCT) }; class GrpcMuxCallbacks { diff --git a/include/envoy/config/subscription.h b/include/envoy/config/subscription.h index 6d103c9c01e1d..b81d769585fb7 100644 --- a/include/envoy/config/subscription.h +++ b/include/envoy/config/subscription.h @@ -3,6 +3,7 @@ #include #include +#include "envoy/api/v2/discovery.pb.h" #include "envoy/common/exception.h" #include "envoy/common/pure.h" #include "envoy/stats/stats_macros.h" @@ -29,6 +30,22 @@ template class SubscriptionCallbacks { virtual void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) PURE; + // TODO(fredlas) it is a HACK that there are two of these. After delta CDS is merged, + // I intend to reimplement all state-of-the-world xDSes' use of onConfigUpdate + // in terms of this delta-style one (and remove the original). + /** + * Called when a delta configuration update is received. + * @param added_resources resources newly added since the previous fetch. + * @param removed_resources names of resources that this fetch instructed to be removed. + * @param system_version_info aggregate response data "version", for debugging. + * @throw EnvoyException with reason if the config changes are rejected. Otherwise the changes + * are accepted. Accepted changes have their version_info reflected in subsequent requests. + */ + virtual void + onConfigUpdate(const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) PURE; + /** * Called when either the Subscription is unable to fetch a config update or when onConfigUpdate * invokes an exception. diff --git a/include/envoy/http/BUILD b/include/envoy/http/BUILD index bdd112be4e1ed..4f0355015ce3c 100644 --- a/include/envoy/http/BUILD +++ b/include/envoy/http/BUILD @@ -73,7 +73,10 @@ envoy_cc_library( envoy_cc_library( name = "header_map_interface", hdrs = ["header_map.h"], - deps = ["//source/common/common:hash_lib"], + deps = [ + "//source/common/common:assert_lib", + "//source/common/common:hash_lib", + ], ) envoy_cc_library( diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 780fc61b0ae49..f67a55c607963 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -12,6 +12,7 @@ #include "envoy/common/pure.h" +#include "common/common/assert.h" #include "common/common/hash.h" #include "absl/strings/string_view.h" @@ -25,9 +26,12 @@ namespace Http { */ class LowerCaseString { public: - LowerCaseString(LowerCaseString&& rhs) : string_(std::move(rhs.string_)) {} - LowerCaseString(const LowerCaseString& rhs) : string_(rhs.string_) {} - explicit LowerCaseString(const std::string& new_string) : string_(new_string) { lower(); } + LowerCaseString(LowerCaseString&& rhs) : string_(std::move(rhs.string_)) { ASSERT(valid()); } + LowerCaseString(const LowerCaseString& rhs) : string_(rhs.string_) { ASSERT(valid()); } + explicit LowerCaseString(const std::string& new_string) : string_(new_string) { + ASSERT(valid()); + lower(); + } const std::string& get() const { return string_; } bool operator==(const LowerCaseString& rhs) const { return string_ == rhs.string_; } @@ -36,6 +40,9 @@ class LowerCaseString { private: void lower() { std::transform(string_.begin(), string_.end(), string_.begin(), tolower); } + // Used by ASSERTs to validate internal consistency. E.g. valid HTTP header keys/values should + // never contain embedded NULLs. + bool valid() const { return string_.find('\0') == std::string::npos; } std::string string_; }; @@ -176,6 +183,9 @@ class HeaderString { }; void freeDynamic(); + // Used by ASSERTs to validate internal consistency. E.g. valid HTTP header keys/values should + // never contain embedded NULLs. + bool valid() const; uint32_t string_length_; Type type_; diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h index e090c9caadc63..ad23563581906 100644 --- a/include/envoy/router/router.h +++ b/include/envoy/router/router.h @@ -253,6 +253,19 @@ class RetryState { virtual RetryStatus shouldRetryReset(const Http::StreamResetReason reset_reason, DoRetryCallback callback) PURE; + /** + * Determine whether a "hedged" retry should be sent after the per try + * timeout expires. This means the original request is not canceled, but a + * new one is sent to hedge against the original request taking even longer. + * @param callback supplies the callback that will be invoked when the retry should take place. + * This is used to add timed backoff, etc. The callback will never be called + * inline. + * @return RetryStatus if a retry should take place. @param callback will be called at some point + * in the future. Otherwise a retry should not take place and the callback will never be + * called. Calling code should proceed with error handling. + */ + virtual RetryStatus shouldHedgeRetryPerTryTimeout(DoRetryCallback callback) PURE; + /** * Called when a host was attempted but the request failed and is eligible for another retry. * Should be used to update whatever internal state depends on previously attempted hosts. diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 89ee2d132690d..905a218f9cc3f 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -174,6 +174,11 @@ class Options { */ virtual bool mutexTracingEnabled() const PURE; + /** + * @return bool indicating whether cpuset size should determine the number of worker threads. + */ + virtual bool cpusetThreadsEnabled() const PURE; + /** * Converts the Options in to CommandLineOptions proto message defined in server_info.proto. * @return CommandLineOptionsPtr the protobuf representation of the options. diff --git a/include/envoy/stats/scope.h b/include/envoy/stats/scope.h index ef913edea6e02..ea0378bb752a9 100644 --- a/include/envoy/stats/scope.h +++ b/include/envoy/stats/scope.h @@ -51,6 +51,11 @@ class Scope { */ virtual Gauge& gauge(const std::string& name) PURE; + /** + * @return a bool within the scope's namespace. + */ + virtual BoolIndicator& boolIndicator(const std::string& name) PURE; + /** * @return a histogram within the scope's namespace with a particular value type. */ diff --git a/include/envoy/stats/source.h b/include/envoy/stats/source.h index 8dea65487a587..149b8e7fc3c70 100644 --- a/include/envoy/stats/source.h +++ b/include/envoy/stats/source.h @@ -37,6 +37,14 @@ class Source { */ virtual const std::vector& cachedGauges() PURE; + /** + * Returns all known bools. Will use cached values if already accessed and clearCache() hasn't + * been called since. + * @return std::vector& all known bools. Note: reference may not be + * valid after clearCache() is called. + */ + virtual const std::vector& cachedBoolIndicators() PURE; + /** * Returns all known parent histograms. Will use cached values if already accessed and * clearCache() hasn't been called since. diff --git a/include/envoy/stats/stat_data_allocator.h b/include/envoy/stats/stat_data_allocator.h index f0ea93e266d04..5696408e2f062 100644 --- a/include/envoy/stats/stat_data_allocator.h +++ b/include/envoy/stats/stat_data_allocator.h @@ -47,6 +47,17 @@ class StatDataAllocator { virtual GaugeSharedPtr makeGauge(absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags) PURE; + /** + * @param name the full name of the stat. + * @param tag_extracted_name the name of the stat with tag-values stripped out. + * @param tags the extracted tag values. + * @return BoolIndicatorSharedPtr a bool, or nullptr if allocation failed, in which case + * tag_extracted_name and tags are not moved. + */ + virtual BoolIndicatorSharedPtr makeBoolIndicator(absl::string_view name, + std::string&& tag_extracted_name, + std::vector&& tags) PURE; + /** * Determines whether this stats allocator requires bounded stat-name size. */ diff --git a/include/envoy/stats/stats.h b/include/envoy/stats/stats.h index 6d191bf24f9fb..09e5842b4d041 100644 --- a/include/envoy/stats/stats.h +++ b/include/envoy/stats/stats.h @@ -94,5 +94,18 @@ class Gauge : public virtual Metric { typedef std::shared_ptr GaugeSharedPtr; +/** + * A Boolean. + */ +class BoolIndicator : public virtual Metric { +public: + virtual ~BoolIndicator() {} + + virtual void set(bool value) PURE; + virtual bool value() const PURE; +}; + +typedef std::shared_ptr BoolIndicatorSharedPtr; + } // namespace Stats } // namespace Envoy diff --git a/include/envoy/stats/stats_macros.h b/include/envoy/stats/stats_macros.h index eb1c89557c664..99cab9aeac3ba 100644 --- a/include/envoy/stats/stats_macros.h +++ b/include/envoy/stats/stats_macros.h @@ -29,15 +29,18 @@ namespace Envoy { #define GENERATE_COUNTER_STRUCT(NAME) Stats::Counter& NAME##_; #define GENERATE_GAUGE_STRUCT(NAME) Stats::Gauge& NAME##_; +#define GENERATE_BOOL_INDICATOR_STRUCT(NAME) Stats::BoolIndicator& NAME##_; #define GENERATE_HISTOGRAM_STRUCT(NAME) Stats::Histogram& NAME##_; #define FINISH_STAT_DECL_(X) + std::string(#X)), #define POOL_COUNTER_PREFIX(POOL, PREFIX) (POOL).counter(PREFIX FINISH_STAT_DECL_ #define POOL_GAUGE_PREFIX(POOL, PREFIX) (POOL).gauge(PREFIX FINISH_STAT_DECL_ +#define POOL_BOOL_INDICATOR_PREFIX(POOL, PREFIX) (POOL).boolIndicator(PREFIX FINISH_STAT_DECL_ #define POOL_HISTOGRAM_PREFIX(POOL, PREFIX) (POOL).histogram(PREFIX FINISH_STAT_DECL_ #define POOL_COUNTER(POOL) POOL_COUNTER_PREFIX(POOL, "") #define POOL_GAUGE(POOL) POOL_GAUGE_PREFIX(POOL, "") +#define POOL_BOOL_INDICATOR(POOL) POOL_BOOL_INDICATOR_PREFIX(POOL, "") #define POOL_HISTOGRAM(POOL) POOL_HISTOGRAM_PREFIX(POOL, "") } // namespace Envoy diff --git a/include/envoy/stats/store.h b/include/envoy/stats/store.h index cd017f9ad8843..d22ff2b3eb67d 100644 --- a/include/envoy/stats/store.h +++ b/include/envoy/stats/store.h @@ -39,6 +39,11 @@ class Store : public Scope { */ virtual std::vector gauges() const PURE; + /** + * @return a list of all known bools. + */ + virtual std::vector boolIndicators() const PURE; + /** * @return a list of all known histograms. */ diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD index bc4b714035d5c..31edcd0dff059 100644 --- a/include/envoy/upstream/BUILD +++ b/include/envoy/upstream/BUILD @@ -147,3 +147,25 @@ envoy_cc_library( "//include/envoy/ssl:context_manager_interface", ], ) + +envoy_cc_library( + name = "cluster_factory_interface", + hdrs = ["cluster_factory.h"], + deps = [ + ":cluster_manager_interface", + ":health_check_host_monitor_interface", + ":load_balancer_type_interface", + ":locality_lib", + ":resource_manager_interface", + ":upstream_interface", + "//include/envoy/common:callback", + "//include/envoy/config:typed_metadata_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/network:connection_interface", + "//include/envoy/network:transport_socket_interface", + "//include/envoy/runtime:runtime_interface", + "//include/envoy/ssl:context_interface", + "//include/envoy/ssl:context_manager_interface", + "@envoy_api//envoy/api/v2:cds_cc", + ], +) diff --git a/include/envoy/upstream/cluster_factory.h b/include/envoy/upstream/cluster_factory.h new file mode 100644 index 0000000000000..86ff93c2cb810 --- /dev/null +++ b/include/envoy/upstream/cluster_factory.h @@ -0,0 +1,143 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "envoy/access_log/access_log.h" +#include "envoy/api/api.h" +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/event/dispatcher.h" +#include "envoy/local_info/local_info.h" +#include "envoy/network/dns.h" +#include "envoy/runtime/runtime.h" +#include "envoy/server/admin.h" +#include "envoy/singleton/manager.h" +#include "envoy/ssl/context.h" +#include "envoy/ssl/context_manager.h" +#include "envoy/stats/stats.h" +#include "envoy/stats/store.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/outlier_detection.h" + +namespace Envoy { +namespace Upstream { + +/** + * Context passed to cluster factory to access envoy resources. Cluster factory should only access + * the rest of the server through this context object. + */ +class ClusterFactoryContext { +public: + virtual ~ClusterFactoryContext() = default; + + /** + * @return bool flag indicating whether the cluster is added via api. + */ + virtual bool addedViaApi() PURE; + + /** + * @return Server::Admin& the server's admin interface. + */ + virtual Server::Admin& admin() PURE; + + /** + * @return Api::Api& a reference to the api object. + */ + virtual Api::Api& api() PURE; + + /** + * @return Upstream::ClusterManager& singleton for use by the entire server. + */ + virtual ClusterManager& clusterManager() PURE; + + /** + * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used + * for all singleton processing. + */ + virtual Event::Dispatcher& dispatcher() PURE; + + /** + * @return Network::DnsResolverSharedPtr the dns resolver for the server. + */ + virtual Network::DnsResolverSharedPtr dnsResolver() PURE; + + /** + * @return information about the local environment the server is running in. + */ + virtual const LocalInfo::LocalInfo& localInfo() PURE; + + /** + * @return AccessLogManager for use by the entire server. + */ + virtual AccessLog::AccessLogManager& logManager() PURE; + + /** + * @return RandomGenerator& the random generator for the server. + */ + virtual Runtime::RandomGenerator& random() PURE; + + /** + * @return Runtime::Loader& the singleton runtime loader for the server. + */ + virtual Runtime::Loader& runtime() PURE; + + /** + * @return Singleton::Manager& the server-wide singleton manager. + */ + virtual Singleton::Manager& singletonManager() PURE; + + /** + * @return Ssl::ContextManager& the SSL context manager. + */ + virtual Ssl::ContextManager& sslContextManager() PURE; + + /** + * TODO(hyang): Remove this and only expose the scope, this would require refactoring + * TransportSocketFactoryContext + * @return the server-wide stats store. + */ + virtual Stats::Store& stats() PURE; + + /** + * @return the server's TLS slot allocator. + */ + virtual ThreadLocal::SlotAllocator& tls() PURE; + + /** + * @return Outlier::EventLoggerSharedPtr sink for outlier detection event logs. + */ + virtual Outlier::EventLoggerSharedPtr outlierEventLogger() PURE; +}; + +/** + * Implemented by cluster and registered via Registry::registerFactory() or the convenience class + * RegisterFactory. + */ +class ClusterFactory { +public: + virtual ~ClusterFactory() = default; + + /** + * Create a new instance of cluster. If the implementation is unable to produce a cluster instance + * with the provided parameters, it should throw an EnvoyException in the case of general error. + * @param cluster supplies the general protobuf configuration for the cluster. + * @param context supplies the cluster's context. + * @return ClusterSharedPtr the cluster instance. + */ + virtual ClusterSharedPtr create(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context) PURE; + + /** + * @return std::string the identifying name for a particular implementation of a cluster factory. + */ + virtual std::string name() PURE; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/include/envoy/upstream/host_description.h b/include/envoy/upstream/host_description.h index 1fabc6686946e..db3a8359e82d5 100644 --- a/include/envoy/upstream/host_description.h +++ b/include/envoy/upstream/host_description.h @@ -24,6 +24,7 @@ namespace Upstream { COUNTER(cx_total) \ GAUGE (cx_active) \ COUNTER(cx_connect_fail) \ + COUNTER(rq_hedge_abandoned) \ COUNTER(rq_total) \ COUNTER(rq_timeout) \ COUNTER(rq_success) \ diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 79a54472fdd7d..80f9204471ba1 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -474,6 +474,7 @@ class PrioritySet { COUNTER (upstream_cx_max_requests) \ COUNTER (upstream_cx_none_healthy) \ COUNTER (upstream_rq_total) \ + COUNTER (upstream_rq_hedge_abandoned) \ GAUGE (upstream_rq_active) \ COUNTER (upstream_rq_completed) \ COUNTER (upstream_rq_pending_total) \ @@ -524,11 +525,11 @@ class PrioritySet { * Cluster circuit breakers stats. */ // clang-format off -#define ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(GAUGE) \ - GAUGE (cx_open) \ - GAUGE (rq_pending_open) \ - GAUGE (rq_open) \ - GAUGE (rq_retry_open) +#define ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(BOOL_INDICATOR) \ + BOOL_INDICATOR (cx_open) \ + BOOL_INDICATOR (rq_pending_open) \ + BOOL_INDICATOR (rq_open) \ + BOOL_INDICATOR (rq_retry_open) // clang-format on /** @@ -549,7 +550,7 @@ struct ClusterLoadReportStats { * Struct definition for cluster circuit breakers stats. @see stats_macros.h */ struct ClusterCircuitBreakersStats { - ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(GENERATE_GAUGE_STRUCT) + ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(GENERATE_BOOL_INDICATOR_STRUCT) }; /** diff --git a/source/common/api/BUILD b/source/common/api/BUILD index 92623ea51279f..d4dd9c3950223 100644 --- a/source/common/api/BUILD +++ b/source/common/api/BUILD @@ -22,8 +22,16 @@ envoy_cc_library( envoy_cc_library( name = "os_sys_calls_lib", - srcs = ["os_sys_calls_impl.cc"], - hdrs = ["os_sys_calls_impl.h"], + srcs = ["os_sys_calls_impl.cc"] + select({ + "@bazel_tools//src/conditions:linux_x86_64": ["os_sys_calls_impl_linux.cc"], + "@bazel_tools//src/conditions:linux_aarch64": ["os_sys_calls_impl_linux.cc"], + "//conditions:default": [], + }), + hdrs = ["os_sys_calls_impl.h"] + select({ + "@bazel_tools//src/conditions:linux_x86_64": ["os_sys_calls_impl_linux.h"], + "@bazel_tools//src/conditions:linux_aarch64": ["os_sys_calls_impl_linux.h"], + "//conditions:default": [], + }), deps = [ "//include/envoy/api:os_sys_calls_interface", "//source/common/singleton:threadsafe_singleton", diff --git a/source/common/api/os_sys_calls_impl.cc b/source/common/api/os_sys_calls_impl.cc index e0c42130a9076..fe8a17114b0c0 100644 --- a/source/common/api/os_sys_calls_impl.cc +++ b/source/common/api/os_sys_calls_impl.cc @@ -38,6 +38,12 @@ SysCallSizeResult OsSysCallsImpl::recv(int socket, void* buffer, size_t length, return {rc, errno}; } +SysCallSizeResult OsSysCallsImpl::recvfrom(int sockfd, void* buffer, size_t length, int flags, + struct sockaddr* addr, socklen_t* addrlen) { + const ssize_t rc = ::recvfrom(sockfd, buffer, length, flags, addr, addrlen); + return {rc, errno}; +} + SysCallIntResult OsSysCallsImpl::shmOpen(const char* name, int oflag, mode_t mode) { const int rc = ::shm_open(name, oflag, mode); return {rc, errno}; diff --git a/source/common/api/os_sys_calls_impl.h b/source/common/api/os_sys_calls_impl.h index c5991190b7fc2..7d9d12a579a98 100644 --- a/source/common/api/os_sys_calls_impl.h +++ b/source/common/api/os_sys_calls_impl.h @@ -15,6 +15,8 @@ class OsSysCallsImpl : public OsSysCalls { SysCallSizeResult writev(int fd, const iovec* iovec, int num_iovec) override; SysCallSizeResult readv(int fd, const iovec* iovec, int num_iovec) override; SysCallSizeResult recv(int socket, void* buffer, size_t length, int flags) override; + SysCallSizeResult recvfrom(int sockfd, void* buffer, size_t length, int flags, + struct sockaddr* addr, socklen_t* addrlen) override; SysCallIntResult close(int fd) override; SysCallIntResult shmOpen(const char* name, int oflag, mode_t mode) override; SysCallIntResult shmUnlink(const char* name) override; diff --git a/source/common/api/os_sys_calls_impl_linux.cc b/source/common/api/os_sys_calls_impl_linux.cc new file mode 100644 index 0000000000000..fcf2fafdc7d0d --- /dev/null +++ b/source/common/api/os_sys_calls_impl_linux.cc @@ -0,0 +1,20 @@ +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include "common/api/os_sys_calls_impl_linux.h" + +#include +#include + +namespace Envoy { +namespace Api { + +SysCallIntResult LinuxOsSysCallsImpl::sched_getaffinity(pid_t pid, size_t cpusetsize, + cpu_set_t* mask) { + const int rc = ::sched_getaffinity(pid, cpusetsize, mask); + return {rc, errno}; +} + +} // namespace Api +} // namespace Envoy diff --git a/source/common/api/os_sys_calls_impl_linux.h b/source/common/api/os_sys_calls_impl_linux.h new file mode 100644 index 0000000000000..d3b08fe427d9f --- /dev/null +++ b/source/common/api/os_sys_calls_impl_linux.h @@ -0,0 +1,23 @@ +#pragma once + +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include "envoy/api/os_sys_calls_linux.h" + +#include "common/singleton/threadsafe_singleton.h" + +namespace Envoy { +namespace Api { + +class LinuxOsSysCallsImpl : public LinuxOsSysCalls { +public: + // Api::LinuxOsSysCalls + SysCallIntResult sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t* mask) override; +}; + +typedef ThreadSafeSingleton LinuxOsSysCallsSingleton; + +} // namespace Api +} // namespace Envoy diff --git a/source/common/config/BUILD b/source/common/config/BUILD index 4737db10e65eb..8071c1855276b 100644 --- a/source/common/config/BUILD +++ b/source/common/config/BUILD @@ -97,6 +97,22 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "delta_subscription_lib", + hdrs = ["delta_subscription_impl.h"], + deps = [ + ":grpc_stream_lib", + ":utility_lib", + "//include/envoy/config:subscription_interface", + "//include/envoy/grpc:async_client_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/common:backoff_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/common:token_bucket_impl_lib", + "//source/common/protobuf", + ], +) + envoy_cc_library( name = "grpc_stream_lib", hdrs = ["grpc_stream.h"], @@ -303,6 +319,7 @@ envoy_cc_library( name = "subscription_factory_lib", hdrs = ["subscription_factory.h"], deps = [ + ":delta_subscription_lib", ":filesystem_subscription_lib", ":grpc_mux_subscription_lib", ":grpc_subscription_lib", diff --git a/source/common/config/delta_subscription_impl.h b/source/common/config/delta_subscription_impl.h new file mode 100644 index 0000000000000..5164d284b6a4a --- /dev/null +++ b/source/common/config/delta_subscription_impl.h @@ -0,0 +1,213 @@ +#pragma once + +#include + +#include "envoy/api/v2/discovery.pb.h" +#include "envoy/common/token_bucket.h" +#include "envoy/config/subscription.h" + +#include "common/common/assert.h" +#include "common/common/backoff_strategy.h" +#include "common/common/logger.h" +#include "common/common/token_bucket_impl.h" +#include "common/config/grpc_stream.h" +#include "common/config/utility.h" +#include "common/grpc/common.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Config { + +struct ResourceNameDiff { + std::vector added_; + std::vector removed_; +}; + +const char EmptyVersion[] = ""; + +/** + * Manages the logic of a (non-aggregated) delta xDS subscription. + * TODO(fredlas) add aggregation support. + */ +template +class DeltaSubscriptionImpl + : public Subscription, + public GrpcStream { +public: + DeltaSubscriptionImpl(const LocalInfo::LocalInfo& local_info, Grpc::AsyncClientPtr async_client, + Event::Dispatcher& dispatcher, + const Protobuf::MethodDescriptor& service_method, + Runtime::RandomGenerator& random, Stats::Scope& scope, + const RateLimitSettings& rate_limit_settings, SubscriptionStats stats) + : GrpcStream(std::move(async_client), service_method, random, dispatcher, + scope, rate_limit_settings), + type_url_(Grpc::Common::typeUrl(ResourceType().GetDescriptor()->full_name())), + local_info_(local_info), stats_(stats) { + request_.set_type_url(type_url_); + request_.mutable_node()->MergeFrom(local_info_.node()); + } + + // Enqueues and attempts to send a discovery request, (un)subscribing to resources missing from / + // added to the passed 'resources' argument, relative to resources_. Updates resources_ to + // 'resources'. + void buildAndQueueDiscoveryRequest(const std::vector& resources) { + ResourceNameDiff diff; + std::set_difference(resources.begin(), resources.end(), resource_names_.begin(), + resource_names_.end(), std::inserter(diff.added_, diff.added_.begin())); + std::set_difference(resource_names_.begin(), resource_names_.end(), resources.begin(), + resources.end(), std::inserter(diff.removed_, diff.removed_.begin())); + + for (const auto& added : diff.added_) { + resources_[added] = EmptyVersion; + resource_names_.insert(added); + } + for (const auto& removed : diff.removed_) { + resources_.erase(removed); + resource_names_.erase(removed); + } + queueDiscoveryRequest(diff); + } + + void sendDiscoveryRequest(const ResourceNameDiff& diff) override { + if (!grpcStreamAvailable()) { + ENVOY_LOG(debug, "No stream available to sendDiscoveryRequest for {}", type_url_); + return; // Drop this request; the reconnect will enqueue a new one. + } + if (paused_) { + ENVOY_LOG(trace, "API {} paused during sendDiscoveryRequest().", type_url_); + pending_ = diff; + return; // The unpause will send this request. + } + + request_.clear_resource_names_subscribe(); + request_.clear_resource_names_unsubscribe(); + std::copy(diff.added_.begin(), diff.added_.end(), + Protobuf::RepeatedFieldBackInserter(request_.mutable_resource_names_subscribe())); + std::copy(diff.removed_.begin(), diff.removed_.end(), + Protobuf::RepeatedFieldBackInserter(request_.mutable_resource_names_unsubscribe())); + + ENVOY_LOG(trace, "Sending DiscoveryRequest for {}: {}", type_url_, request_.DebugString()); + sendMessage(request_); + request_.clear_error_detail(); + request_.clear_initial_resource_versions(); + } + + void subscribe(const std::vector& resources) { + ENVOY_LOG(debug, "delta subscribe for " + type_url_); + buildAndQueueDiscoveryRequest(resources); + } + + void pause() { + ENVOY_LOG(debug, "Pausing discovery requests for {}", type_url_); + ASSERT(!paused_); + paused_ = true; + } + + void resume() { + ENVOY_LOG(debug, "Resuming discovery requests for {}", type_url_); + ASSERT(paused_); + paused_ = false; + if (pending_.has_value()) { + queueDiscoveryRequest(pending_.value()); + pending_.reset(); + } + } + + // Config::SubscriptionCallbacks + void onConfigUpdate(const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& version_info) { + callbacks_->onConfigUpdate(added_resources, removed_resources, version_info); + for (const auto& resource : added_resources) { + resources_[resource.name()] = resource.version(); + } + stats_.update_success_.inc(); + stats_.update_attempt_.inc(); + stats_.version_.set(HashUtil::xxHash64(version_info)); + ENVOY_LOG(debug, "Delta config for {} accepted with {} resources added, {} removed", type_url_, + added_resources.size(), removed_resources.size()); + } + + void handleResponse(std::unique_ptr&& message) override { + ENVOY_LOG(debug, "Received gRPC message for {} at version {}", type_url_, + message->system_version_info()); + + request_.set_response_nonce(message->nonce()); + + try { + onConfigUpdate(message->resources(), message->removed_resources(), + message->system_version_info()); + } catch (const EnvoyException& e) { + stats_.update_rejected_.inc(); + ENVOY_LOG(warn, "delta config for {} rejected: {}", type_url_, e.what()); + stats_.update_attempt_.inc(); + callbacks_->onConfigUpdateFailed(&e); + ::google::rpc::Status* error_detail = request_.mutable_error_detail(); + error_detail->set_code(Grpc::Status::GrpcStatus::Internal); + error_detail->set_message(e.what()); + } + queueDiscoveryRequest(ResourceNameDiff()); // no change to subscribed resources + } + + void handleStreamEstablished() override { + // initial_resource_versions "must be populated for first request in a stream", so guarantee + // that the initial version'd request we're about to enqueue is what gets sent. + clearRequestQueue(); + + request_.Clear(); + for (auto const& resource : resources_) { + (*request_.mutable_initial_resource_versions())[resource.first] = resource.second; + } + request_.set_type_url(type_url_); + request_.mutable_node()->MergeFrom(local_info_.node()); + queueDiscoveryRequest(ResourceNameDiff()); // no change to subscribed resources + } + + void handleEstablishmentFailure() override { + stats_.update_failure_.inc(); + ENVOY_LOG(debug, "delta update for {} failed", type_url_); + stats_.update_attempt_.inc(); + callbacks_->onConfigUpdateFailed(nullptr); + } + + // Config::DeltaSubscription + void start(const std::vector& resources, + SubscriptionCallbacks& callbacks) override { + callbacks_ = &callbacks; + establishNewStream(); + subscribe(resources); + // The attempt stat here is maintained for the purposes of having consistency between ADS and + // individual DeltaSubscriptions. Since ADS is push based and muxed, the notion of an + // "attempt" for a given xDS API combined by ADS is not really that meaningful. + stats_.update_attempt_.inc(); + } + + void updateResources(const std::vector& resources) override { + subscribe(resources); + stats_.update_attempt_.inc(); + } + +private: + // A map from resource name to per-resource version. + std::unordered_map resources_; + // The keys of resources_. Only tracked separately because std::map does not provide an iterator + // into just its keys, e.g. for use in std::set_difference. + std::unordered_set resource_names_; + const std::string type_url_; + SubscriptionCallbacks* callbacks_{}; + // In-flight or previously sent request. + envoy::api::v2::DeltaDiscoveryRequest request_; + // Paused via pause()? + bool paused_{}; + absl::optional pending_; + + const LocalInfo::LocalInfo& local_info_; + + SubscriptionStats stats_; +}; + +} // namespace Config +} // namespace Envoy diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 1ea87a07c2643..7519a990e2661 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -157,6 +157,9 @@ void GrpcMuxImpl::handleResponse(std::unique_ptrresources_.empty()) { watch->callbacks_.onConfigUpdate(message->resources(), message->version_info()); continue; @@ -168,7 +171,11 @@ void GrpcMuxImpl::handleResponse(std::unique_ptrMergeFrom(it->second); } } - watch->callbacks_.onConfigUpdate(found_resources, message->version_info()); + // onConfigUpdate should be called only on watches(clusters/routes) that have updates in the + // message for EDS/RDS. + if (found_resources.size() > 0) { + watch->callbacks_.onConfigUpdate(found_resources, message->version_info()); + } } // TODO(mattklein123): In the future if we start tracking per-resource versions, we would do // that tracking here. diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index c1e5ef761255d..2d03e4e88c621 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -12,7 +12,7 @@ namespace Envoy { namespace Config { -// Oversees communication for gRPC xDS implementations (parent to both regular xDS and incremental +// Oversees communication for gRPC xDS implementations (parent to both regular xDS and delta // xDS variants). Reestablishes the gRPC channel when necessary, and provides rate limiting of // requests. template @@ -49,6 +49,14 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, drainRequests(); } + void clearRequestQueue() { + control_plane_stats_.pending_requests_.sub(request_queue_.size()); + // TODO(fredlas) when we have C++17: request_queue_ = {}; + while (!request_queue_.empty()) { + request_queue_.pop(); + } + } + void establishNewStream() { ENVOY_LOG(debug, "Establishing new gRPC bidi stream for {}", service_method_.DebugString()); stream_ = async_client_->start(service_method_, *this); @@ -58,7 +66,7 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, setRetryTimer(); return; } - control_plane_stats_.connected_state_.set(1); + control_plane_stats_.connected_state_.set(true); handleStreamEstablished(); } @@ -78,6 +86,10 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, void onReceiveMessage(std::unique_ptr&& message) override { // Reset here so that it starts with fresh backoff interval on next disconnect. backoff_strategy_->reset(); + // Some times during hot restarts this stat's value becomes inconsistent and will continue to + // have 0 till it is reconnected. Setting here ensures that it is consistent with the state of + // management server connection. + control_plane_stats_.connected_state_.set(1); handleResponse(std::move(message)); } @@ -88,7 +100,7 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override { ENVOY_LOG(warn, "gRPC config stream closed: {}, {}", status, message); stream_ = nullptr; - control_plane_stats_.connected_state_.set(0); + control_plane_stats_.connected_state_.set(false); handleEstablishmentFailure(); setRetryTimer(); } @@ -131,7 +143,8 @@ class GrpcStream : public Grpc::TypedAsyncStreamCallbacks, ControlPlaneStats generateControlPlaneStats(Stats::Scope& scope) { const std::string control_plane_prefix = "control_plane."; - return {ALL_CONTROL_PLANE_STATS(POOL_COUNTER_PREFIX(scope, control_plane_prefix), + return {ALL_CONTROL_PLANE_STATS(POOL_BOOL_INDICATOR_PREFIX(scope, control_plane_prefix), + POOL_COUNTER_PREFIX(scope, control_plane_prefix), POOL_GAUGE_PREFIX(scope, control_plane_prefix))}; } diff --git a/source/common/config/subscription_factory.h b/source/common/config/subscription_factory.h index ce6a1e8d69a51..4362291ab7a60 100644 --- a/source/common/config/subscription_factory.h +++ b/source/common/config/subscription_factory.h @@ -8,6 +8,7 @@ #include "envoy/stats/scope.h" #include "envoy/upstream/cluster_manager.h" +#include "common/config/delta_subscription_impl.h" #include "common/config/filesystem_subscription_impl.h" #include "common/config/grpc_mux_subscription_impl.h" #include "common/config/grpc_subscription_impl.h" @@ -67,16 +68,26 @@ class SubscriptionFactory { Utility::apiConfigSourceRequestTimeout(api_config_source), *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(rest_method), stats)); break; - case envoy::api::v2::core::ApiConfigSource::GRPC: { + case envoy::api::v2::core::ApiConfigSource::GRPC: result.reset(new GrpcSubscriptionImpl( local_info, Config::Utility::factoryForGrpcApiConfigSource(cm.grpcAsyncClientManager(), - config.api_config_source(), scope) + api_config_source, scope) ->create(), dispatcher, random, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(grpc_method), stats, scope, Utility::parseRateLimitSettings(api_config_source))); break; + case envoy::api::v2::core::ApiConfigSource::DELTA_GRPC: { + Utility::checkApiConfigSourceSubscriptionBackingCluster(cm.clusters(), api_config_source); + result.reset(new DeltaSubscriptionImpl( + local_info, + Config::Utility::factoryForGrpcApiConfigSource(cm.grpcAsyncClientManager(), + api_config_source, scope) + ->create(), + dispatcher, *Protobuf::DescriptorPool::generated_pool()->FindMethodByName(grpc_method), + random, scope, Utility::parseRateLimitSettings(api_config_source), stats)); + break; } default: NOT_REACHED_GCOVR_EXCL_LINE; diff --git a/source/common/config/utility.cc b/source/common/config/utility.cc index cfe2fdbc06af3..d8b11b71d97f6 100644 --- a/source/common/config/utility.cc +++ b/source/common/config/utility.cc @@ -89,7 +89,8 @@ void Utility::checkFilesystemSubscriptionBackingPath(const std::string& path, Ap void Utility::checkApiConfigSourceNames( const envoy::api::v2::core::ApiConfigSource& api_config_source) { const bool is_grpc = - (api_config_source.api_type() == envoy::api::v2::core::ApiConfigSource::GRPC); + (api_config_source.api_type() == envoy::api::v2::core::ApiConfigSource::GRPC || + api_config_source.api_type() == envoy::api::v2::core::ApiConfigSource::DELTA_GRPC); if (api_config_source.cluster_names().empty() && api_config_source.grpc_services().empty()) { throw EnvoyException( @@ -99,19 +100,19 @@ void Utility::checkApiConfigSourceNames( if (is_grpc) { if (!api_config_source.cluster_names().empty()) { - throw EnvoyException(fmt::format( - "envoy::api::v2::core::ConfigSource::GRPC must not have a cluster name specified: {}", - api_config_source.DebugString())); + throw EnvoyException(fmt::format("envoy::api::v2::core::ConfigSource::(DELTA_)GRPC " + "must not have a cluster name specified: {}", + api_config_source.DebugString())); } if (api_config_source.grpc_services().size() > 1) { - throw EnvoyException(fmt::format( - "envoy::api::v2::core::ConfigSource::GRPC must have a single gRPC service specified: {}", - api_config_source.DebugString())); + throw EnvoyException(fmt::format("envoy::api::v2::core::ConfigSource::(DELTA_)GRPC " + "must have a single gRPC service specified: {}", + api_config_source.DebugString())); } } else { if (!api_config_source.grpc_services().empty()) { throw EnvoyException( - fmt::format("envoy::api::v2::core::ConfigSource, if not of type gRPC, must not have " + fmt::format("envoy::api::v2::core::ConfigSource, if not a gRPC type, must not have " "a gRPC service specified: {}", api_config_source.DebugString())); } @@ -126,6 +127,7 @@ void Utility::checkApiConfigSourceNames( void Utility::validateClusterName(const Upstream::ClusterManager::ClusterInfoMap& clusters, const std::string& cluster_name) { const auto& it = clusters.find(cluster_name); + if (it == clusters.end() || it->second.get().info()->addedViaApi() || it->second.get().info()->type() == envoy::api::v2::Cluster::EDS) { throw EnvoyException(fmt::format( @@ -248,8 +250,9 @@ Grpc::AsyncClientFactoryPtr Utility::factoryForGrpcApiConfigSource( const envoy::api::v2::core::ApiConfigSource& api_config_source, Stats::Scope& scope) { Utility::checkApiConfigSourceNames(api_config_source); - if (api_config_source.api_type() != envoy::api::v2::core::ApiConfigSource::GRPC) { - throw EnvoyException(fmt::format("envoy::api::v2::core::ConfigSource type must be GRPC: {}", + if (api_config_source.api_type() != envoy::api::v2::core::ApiConfigSource::GRPC && + api_config_source.api_type() != envoy::api::v2::core::ApiConfigSource::DELTA_GRPC) { + throw EnvoyException(fmt::format("envoy::api::v2::core::ConfigSource type must be gRPC: {}", api_config_source.DebugString())); } diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index 2ebfe881d4d79..004c8cfff3607 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -41,11 +41,11 @@ ServerConnectionPtr ConnectionManagerUtility::autoCreateCodec( ServerConnectionCallbacks& callbacks, Stats::Scope& scope, const Http1Settings& http1_settings, const Http2Settings& http2_settings, const uint32_t max_request_headers_kb) { if (determineNextProtocol(connection, data) == Http2::ALPN_STRING) { - return ServerConnectionPtr{new Http2::ServerConnectionImpl( - connection, callbacks, scope, http2_settings, max_request_headers_kb)}; + return std::make_unique(connection, callbacks, scope, + http2_settings, max_request_headers_kb); } else { - return ServerConnectionPtr{ - new Http1::ServerConnectionImpl(connection, callbacks, http1_settings)}; + return std::make_unique(connection, callbacks, http1_settings, + max_request_headers_kb); } } diff --git a/source/common/http/header_map_impl.cc b/source/common/http/header_map_impl.cc index e5c642349176a..68bfc4365d531 100644 --- a/source/common/http/header_map_impl.cc +++ b/source/common/http/header_map_impl.cc @@ -39,16 +39,19 @@ HeaderString::HeaderString() : type_(Type::Inline) { clear(); static_assert(sizeof(inline_buffer_) >= MaxIntegerLength, ""); static_assert(MinDynamicCapacity >= MaxIntegerLength, ""); + ASSERT(valid()); } HeaderString::HeaderString(const LowerCaseString& ref_value) : type_(Type::Reference) { buffer_.ref_ = ref_value.get().c_str(); string_length_ = ref_value.get().size(); + ASSERT(valid()); } HeaderString::HeaderString(const std::string& ref_value) : type_(Type::Reference) { buffer_.ref_ = ref_value.c_str(); string_length_ = ref_value.size(); + ASSERT(valid()); } HeaderString::HeaderString(HeaderString&& move_value) { @@ -76,6 +79,7 @@ HeaderString::HeaderString(HeaderString&& move_value) { break; } } + ASSERT(valid()); } HeaderString::~HeaderString() { freeDynamic(); } @@ -86,6 +90,10 @@ void HeaderString::freeDynamic() { } } +bool HeaderString::valid() const { + return std::string(c_str(), string_length_).find('\0') == std::string::npos; +} + void HeaderString::append(const char* data, uint32_t size) { switch (type_) { case Type::Reference: { @@ -143,6 +151,7 @@ void HeaderString::append(const char* data, uint32_t size) { memcpy(buffer_.dynamic_ + string_length_, data, size); string_length_ += size; buffer_.dynamic_[string_length_] = 0; + ASSERT(valid()); } void HeaderString::clear() { @@ -203,6 +212,7 @@ void HeaderString::setCopy(const char* data, uint32_t size) { memcpy(buffer_.dynamic_, data, size); buffer_.dynamic_[size] = 0; string_length_ = size; + ASSERT(valid()); } void HeaderString::setInteger(uint64_t value) { @@ -235,6 +245,7 @@ void HeaderString::setReference(const std::string& ref_value) { type_ = Type::Reference; buffer_.ref_ = ref_value.c_str(); string_length_ = ref_value.size(); + ASSERT(valid()); } // Specialization needed for HeaderMapImpl::HeaderList::insert() when key is LowerCaseString. diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index fcb9bf0b5ebc2..43a3c1ef4cd55 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -316,9 +316,11 @@ const ToLowerTable& ConnectionImpl::toLowerTable() { return *table; } -ConnectionImpl::ConnectionImpl(Network::Connection& connection, http_parser_type type) +ConnectionImpl::ConnectionImpl(Network::Connection& connection, http_parser_type type, + uint32_t max_headers_kb) : connection_(connection), output_buffer_([&]() -> void { this->onBelowLowWatermark(); }, - [&]() -> void { this->onAboveHighWatermark(); }) { + [&]() -> void { this->onAboveHighWatermark(); }), + max_headers_kb_(max_headers_kb) { output_buffer_.setWatermarks(connection.bufferLimit()); http_parser_init(&parser_, type); parser_.data = this; @@ -419,6 +421,14 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { header_parsing_state_ = HeaderParsingState::Value; current_header_value_.append(data, length); + + const uint32_t total = + current_header_field_.size() + current_header_value_.size() + current_header_map_->byteSize(); + if (total > (max_headers_kb_ * 1024)) { + error_code_ = Http::Code::RequestHeaderFieldsTooLarge; + sendProtocolError(); + throw CodecProtocolException("headers size exceeds limit"); + } } int ConnectionImpl::onHeadersCompleteBase() { @@ -471,8 +481,9 @@ void ConnectionImpl::onResetStreamBase(StreamResetReason reason) { ServerConnectionImpl::ServerConnectionImpl(Network::Connection& connection, ServerConnectionCallbacks& callbacks, - Http1Settings settings) - : ConnectionImpl(connection, HTTP_REQUEST), callbacks_(callbacks), codec_settings_(settings) {} + Http1Settings settings, uint32_t max_request_headers_kb) + : ConnectionImpl(connection, HTTP_REQUEST, max_request_headers_kb), callbacks_(callbacks), + codec_settings_(settings) {} void ServerConnectionImpl::onEncodeComplete() { ASSERT(active_request_); @@ -643,7 +654,7 @@ void ServerConnectionImpl::onBelowLowWatermark() { } ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, ConnectionCallbacks&) - : ConnectionImpl(connection, HTTP_RESPONSE) {} + : ConnectionImpl(connection, HTTP_RESPONSE, MAX_RESPONSE_HEADERS_KB) {} bool ClientConnectionImpl::cannotHaveBody() { if ((!pending_responses_.empty() && pending_responses_.front().head_request_) || diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index 772807c9d46e6..c0e7bf43186b0 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -165,7 +165,8 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable pending_responses_; // Set true between receiving 100-Continue headers and receiving the spurious onMessageComplete. bool ignore_message_complete_for_100_continue_{}; + + // The default limit of 80 KiB is the vanilla http_parser behaviour. + static constexpr uint32_t MAX_RESPONSE_HEADERS_KB = 80; }; } // namespace Http1 diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index ce0e216fe7b9a..c1b429752f2bc 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -868,6 +868,10 @@ ConnectionImpl::Http2Options::Http2Options(const Http2Settings& http2_settings) nghttp2_option_set_no_closed_streams(options_, 1); nghttp2_option_set_no_auto_window_update(options_, 1); + // The max send header block length is configured to an arbitrarily high number so as to never + // trigger the check within nghttp2, as we check request headers length in codec_impl::saveHeader. + nghttp2_option_set_max_send_header_block_length(options_, 0x2000000); + if (http2_settings.hpack_table_size_ != NGHTTP2_DEFAULT_HEADER_TABLE_SIZE) { nghttp2_option_set_max_deflate_dynamic_table_size(options_, http2_settings.hpack_table_size_); } diff --git a/source/common/network/udp_listener_impl.cc b/source/common/network/udp_listener_impl.cc index 0998896651486..5e7cc75c6e1d5 100644 --- a/source/common/network/udp_listener_impl.cc +++ b/source/common/network/udp_listener_impl.cc @@ -5,6 +5,7 @@ #include "envoy/buffer/buffer.h" #include "envoy/common/exception.h" +#include "common/api/os_sys_calls_impl.h" #include "common/common/assert.h" #include "common/common/empty_string.h" #include "common/common/fmt.h" @@ -56,17 +57,19 @@ UdpListenerImpl::ReceiveResult UdpListenerImpl::doRecvFrom(sockaddr_storage& pee const uint64_t num_slices = buffer->reserve(read_length, &slice, 1); ASSERT(num_slices == 1); - // TODO(conqerAtapple): Use os_syscalls - const ssize_t rc = ::recvfrom(socket_.ioHandle().fd(), slice.mem_, read_length, 0, - reinterpret_cast(&peer_addr), &addr_len); - if (rc < 0) { - return ReceiveResult{Api::SysCallIntResult{static_cast(rc), errno}, nullptr}; - } - slice.len_ = std::min(slice.len_, static_cast(rc)); + auto& os_sys_calls = Api::OsSysCallsSingleton::get(); + const Api::SysCallSizeResult result = + os_sys_calls.recvfrom(socket_.ioHandle().fd(), slice.mem_, read_length, 0, + reinterpret_cast(&peer_addr), &addr_len); + if (result.rc_ < 0) { + return ReceiveResult{Api::SysCallIntResult{static_cast(result.rc_), result.errno_}, + nullptr}; + } + slice.len_ = std::min(slice.len_, static_cast(result.rc_)); buffer->commit(&slice, 1); - return ReceiveResult{Api::SysCallIntResult{static_cast(rc), 0}, std::move(buffer)}; + return ReceiveResult{Api::SysCallIntResult{static_cast(result.rc_), 0}, std::move(buffer)}; } void UdpListenerImpl::onSocketEvent(short flags) { diff --git a/source/common/router/rds_impl.h b/source/common/router/rds_impl.h index 4498eb99bb104..67e086258e21b 100644 --- a/source/common/router/rds_impl.h +++ b/source/common/router/rds_impl.h @@ -107,7 +107,12 @@ class RdsRouteConfigSubscription } // Config::SubscriptionCallbacks + // TODO(fredlas) deduplicate void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).name(); diff --git a/source/common/router/retry_state_impl.cc b/source/common/router/retry_state_impl.cc index 37ca170673b56..3ce99640a561f 100644 --- a/source/common/router/retry_state_impl.cc +++ b/source/common/router/retry_state_impl.cc @@ -188,11 +188,18 @@ RetryStatus RetryStateImpl::shouldRetryHeaders(const Http::HeaderMap& response_h return shouldRetry(wouldRetryFromHeaders(response_headers), callback); } -RetryStatus RetryStateImpl::shouldRetryReset(const Http::StreamResetReason reset_reason, +RetryStatus RetryStateImpl::shouldRetryReset(Http::StreamResetReason reset_reason, DoRetryCallback callback) { return shouldRetry(wouldRetryFromReset(reset_reason), callback); } +RetryStatus RetryStateImpl::shouldHedgeRetryPerTryTimeout(DoRetryCallback callback) { + // A hedged retry on per try timeout is always retried if there are retries + // left. NOTE: this is different than non-hedged per try timeouts which are only retried + // if RETRY_ON_5XX or RETRY_ON_GATEWAY_ERROR + return shouldRetry([]() -> bool { return true; }, callback); +} + bool RetryStateImpl::wouldRetryFromHeaders(const Http::HeaderMap& response_headers) { if (response_headers.EnvoyOverloaded() != nullptr) { return false; diff --git a/source/common/router/retry_state_impl.h b/source/common/router/retry_state_impl.h index 78d017cf8db9c..9d7ce7647322c 100644 --- a/source/common/router/retry_state_impl.h +++ b/source/common/router/retry_state_impl.h @@ -40,6 +40,7 @@ class RetryStateImpl : public RetryState { DoRetryCallback callback) override; RetryStatus shouldRetryReset(const Http::StreamResetReason reset_reason, DoRetryCallback callback) override; + RetryStatus shouldHedgeRetryPerTryTimeout(DoRetryCallback callback) override; void onHostAttempted(Upstream::HostDescriptionConstSharedPtr host) override { std::for_each(retry_host_predicates_.begin(), retry_host_predicates_.end(), diff --git a/source/common/router/router.cc b/source/common/router/router.cc index f794dcbbd400d..6d9afab3e64ce 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -179,9 +179,25 @@ FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_he return timeout; } +FilterUtility::HedgingParams FilterUtility::finalHedgingParams(const RouteEntry& route, + uint64_t random_value) { + HedgingParams hedgingParams; + hedgingParams.initial_requests_ = route.hedgePolicy().initialRequests(); + hedgingParams.hedge_on_per_try_timeout_ = route.hedgePolicy().hedgeOnPerTryTimeout(); + + if (ProtobufPercentHelper::evaluateFractionalPercent( + route.hedgePolicy().additionalRequestChance(), random_value)) { + hedgingParams.initial_requests_++; + } + + return hedgingParams; +} + Filter::~Filter() { // Upstream resources should already have been cleaned. - ASSERT(!upstream_request_); + for (unsigned long i = 0; i < upstream_requests_.size(); i++) { + ASSERT(!upstream_requests_[i]); + } ASSERT(!retry_state_); } @@ -359,6 +375,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e include_attempt_count_ = route_entry_->includeAttemptCount(); if (include_attempt_count_) { + // This attempt count is local to this UpstreamRequest, so if we had + // multiple initial requests the real attempt count might be higher. headers.insertEnvoyAttemptCount().value(attempt_count_); } @@ -372,6 +390,8 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e // Ensure an http transport scheme is selected before continuing with decoding. ASSERT(headers.Scheme()); + hedging_params_ = FilterUtility::finalHedgingParams(*route_entry_, callbacks_->streamId()); + retry_state_ = createRetryState(route_entry_->retryPolicy(), headers, *cluster_, config_.runtime_, config_.random_, callbacks_->dispatcher(), route_entry_->priority()); @@ -380,8 +400,9 @@ Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool e ENVOY_STREAM_LOG(debug, "router decoding headers:\n{}", *callbacks_, headers); - upstream_request_ = std::make_unique(*this, *conn_pool); - upstream_request_->encodeHeaders(end_stream); + UpstreamRequestPtr upstream_request = std::make_unique(*this, *conn_pool); + upstream_requests_.emplace_back(std::move(upstream_request)); + upstream_requests_[0]->encodeHeaders(end_stream); if (end_stream) { onRequestComplete(); } @@ -423,20 +444,24 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea do_shadowing_ = false; } - if (buffering) { - // If we are going to buffer for retries or shadowing, we need to make a copy before encoding - // since it's all moves from here on. - Buffer::OwnedImpl copy(data); - upstream_request_->encodeData(copy, end_stream); - - // If we are potentially going to retry or shadow this request we need to buffer. - // This will not cause the connection manager to 413 because before we hit the - // buffer limit we give up on retries and buffering. We must buffer using addDecodedData() - // so that all buffered data is available by the time we do request complete processing and - // potentially shadow. - callbacks_->addDecodedData(data, true); - } else { - upstream_request_->encodeData(data, end_stream); + for (unsigned long i = 0; i < upstream_requests_.size(); i++) { + if (buffering) { + // If we are going to buffer for retries or shadowing, we need to make a copy before encoding + // since it's all moves from here on. + Buffer::OwnedImpl copy(data); + upstream_requests_[i]->encodeData(copy, end_stream); + + if (i == 0) { + // If we are potentially going to retry or shadow this request we need to buffer. + // This will not cause the connection manager to 413 because before we hit the + // buffer limit we give up on retries and buffering. We must buffer using addDecodedData() + // so that all buffered data is available by the time we do request complete processing and + // potentially shadow. + callbacks_->addDecodedData(data, true); + } + } else { + upstream_requests_[i]->encodeData(data, end_stream); + } } if (end_stream) { @@ -449,7 +474,9 @@ Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_strea Http::FilterTrailersStatus Filter::decodeTrailers(Http::HeaderMap& trailers) { ENVOY_STREAM_LOG(debug, "router decoding trailers:\n{}", *callbacks_, trailers); downstream_trailers_ = &trailers; - upstream_request_->encodeTrailers(trailers); + for (unsigned long i = 0; i < upstream_requests_.size(); i++) { + upstream_requests_[i]->encodeTrailers(trailers); + } onRequestComplete(); return Http::FilterTrailersStatus::StopIteration; } @@ -463,13 +490,17 @@ void Filter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callb } void Filter::cleanup() { - // upstream_request_ is only destroyed in this method (cleanup()) or when we - // do a retry (setupRetry()). In the latter case we don't want to save the - // upstream timings to the downstream info. - if (upstream_request_) { - callbacks_->streamInfo().setUpstreamTiming(upstream_request_->upstream_timing_); + for (unsigned long i = 0; i < upstream_requests_.size(); i++) { + UpstreamRequest* upstream_request = upstream_requests_[i].get(); + if (upstream_request) { + if (final_upstream_request_ != nullptr && upstream_request == final_upstream_request_) { + callbacks_->streamInfo().setUpstreamTiming(final_upstream_request_->upstream_timing_); + } else { + upstream_request->resetStream(); // Idempotent. + } + upstream_requests_[i].reset(); + } } - upstream_request_.reset(); retry_state_.reset(); if (response_timeout_) { response_timeout_->disableTimer(); @@ -502,7 +533,15 @@ void Filter::onRequestComplete() { downstream_request_complete_time_ = dispatcher.timeSource().monotonicTime(); // Possible that we got an immediate reset. - if (upstream_request_) { + bool any_upstreams = false; + for (unsigned long i = 0; i < upstream_requests_.size(); i++) { + if (upstream_requests_[i]) { + any_upstreams = true; + break; + } + } + + if (any_upstreams) { // Even if we got an immediate reset, we could still shadow, but that is a riskier change and // seems unnecessary right now. maybeDoShadowing(); @@ -515,63 +554,68 @@ void Filter::onRequestComplete() { } void Filter::onDestroy() { - if (upstream_request_ && !attempting_internal_redirect_with_complete_stream_) { - upstream_request_->resetStream(); + if (!attempting_internal_redirect_with_complete_stream_) { + for (unsigned long i = 0; i < upstream_requests_.size(); i++) { + if (upstream_requests_[i]) { + upstream_requests_[i]->resetStream(); + } + } } cleanup(); } void Filter::onResponseTimeout() { ENVOY_STREAM_LOG(debug, "upstream timeout", *callbacks_); - cluster_->stats().upstream_rq_timeout_.inc(); - // It's possible to timeout during a retry backoff delay when we have no upstream request. In - // this case we fake a reset since onUpstreamReset() doesn't care. - if (upstream_request_) { - if (upstream_request_->upstream_host_) { - upstream_request_->upstream_host_->stats().rq_timeout_.inc(); + // Reset any upstream requests that are still in flight. + for (unsigned long i = 0; i < upstream_requests_.size(); i++) { + // Don't record a timeout for upstream requests we've already seen headers + // for. + UpstreamRequest* upstream_request = upstream_requests_[i].get(); + if (upstream_request && !upstream_request->upstream_headers_) { + cluster_->stats().upstream_rq_timeout_.inc(); + if (upstream_request->upstream_host_) { + upstream_request->upstream_host_->stats().rq_timeout_.inc(); + } + + // If this upstream request already hit a "soft" timeout, then it + // already recorded a timeout into outlier detection. Don't do it again. + if (!upstream_request->outlier_detection_timeout_recorded_) { + updateOutlierDetection(timeout_response_code_, upstream_request); + } + upstream_request->resetStream(); + + chargeUpstreamAbort(timeout_response_code_, false, upstream_request); } - upstream_request_->resetStream(); } - onUpstreamReset(UpstreamResetType::GlobalTimeout, absl::optional()); + const absl::string_view body = + timeout_response_code_ == Http::Code::GatewayTimeout ? "upstream request timeout" : ""; + onUpstreamAbort(timeout_response_code_, StreamInfo::ResponseFlag::UpstreamRequestTimeout, body, + false); } -void Filter::onUpstreamReset(UpstreamResetType type, - const absl::optional reset_reason) { - ASSERT(type == UpstreamResetType::GlobalTimeout || upstream_request_); - if (type == UpstreamResetType::Reset) { - ENVOY_STREAM_LOG(debug, "upstream reset: reset reason {}", *callbacks_, - reset_reason ? Http::Utility::resetReasonToString(reset_reason.value()) : ""); - } - - Upstream::HostDescriptionConstSharedPtr upstream_host; - if (upstream_request_) { - upstream_host = upstream_request_->upstream_host_; - if (upstream_host) { - upstream_host->outlierDetector().putHttpResponseCode( - enumToInt(type == UpstreamResetType::Reset ? Http::Code::ServiceUnavailable - : timeout_response_code_)); - } - } +// Called when the per try timeout is hit but we didn't reset the request +// (hedge_on_per_try_timeout enabled). +void Filter::onSoftPerTryTimeout(UpstreamRequest* upstream_request) { + // Even though we didn't cancel the request yet we still want to track it + // in outlier detection. + // TODO(mpuncel) is it weird to have a pretend response code here? we might + // get a 200 back from this request later. + updateOutlierDetection(timeout_response_code_, upstream_request); + upstream_request->outlier_detection_timeout_recorded_ = true; - // We don't retry on a global timeout or if we already started the response. - if (type != UpstreamResetType::GlobalTimeout && !downstream_response_started_ && retry_state_) { - // Notify retry modifiers about the attempted host. - if (upstream_host != nullptr) { - retry_state_->onHostAttempted(upstream_host); - } + Upstream::HostDescriptionConstSharedPtr upstream_host = upstream_request->upstream_host_; - // There must be a value for reset_reason because the only case where it's - // empty is when type == UpstreamResetType::GlobalTimeout. - ASSERT(reset_reason.has_value()); + if (!downstream_response_started_ && retry_state_) { RetryStatus retry_status = - retry_state_->shouldRetryReset(reset_reason.value(), [this]() -> void { doRetry(); }); - if (retry_status == RetryStatus::Yes && setupRetry(true)) { - if (upstream_host) { - upstream_host->stats().rq_error_.inc(); - } - return; + retry_state_->shouldHedgeRetryPerTryTimeout([this]() -> void { doRetry(); }); + + if (retry_status == RetryStatus::Yes && setupRetry()) { + setupRetry(); + // Don't increment upstream_host->stats().rq_error_ here, we'll do that + // later if 1) we hit global timeout or 2) we get bad response headers + // back. } else if (retry_status == RetryStatus::NoOverflow) { callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow); } else if (retry_status == RetryStatus::NoRetryLimitExceeded) { @@ -579,38 +623,50 @@ void Filter::onUpstreamReset(UpstreamResetType type, StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded); } } +} - // If we have not yet sent anything downstream, send a response with an appropriate status code. - // Otherwise just reset the ongoing response. +void Filter::onPerTryTimeout(UpstreamRequest* upstream_request) { + if (hedging_params_.hedge_on_per_try_timeout_) { + onSoftPerTryTimeout(upstream_request); + return; + } + + cluster_->stats().upstream_rq_per_try_timeout_.inc(); + if (upstream_request->upstream_host_) { + upstream_request->upstream_host_->stats().rq_timeout_.inc(); + } + + upstream_request->resetStream(); + + updateOutlierDetection(timeout_response_code_, upstream_request); + + if (maybeRetryReset(Http::StreamResetReason::LocalReset, upstream_request)) { + return; + } + + chargeUpstreamAbort(timeout_response_code_, false, upstream_request); + + const std::string body = + timeout_response_code_ == Http::Code::GatewayTimeout ? "upstream request timeout" : ""; + onUpstreamAbort(timeout_response_code_, StreamInfo::ResponseFlag::UpstreamRequestTimeout, body, + false); +} + +void Filter::updateOutlierDetection(Http::Code code, UpstreamRequest* upstream_request) { + if (upstream_request->upstream_host_) { + upstream_request->upstream_host_->outlierDetector().putHttpResponseCode(enumToInt(code)); + } +} + +void Filter::chargeUpstreamAbort(Http::Code code, bool dropped, UpstreamRequest* upstream_request) { if (downstream_response_started_) { - if (upstream_request_ != nullptr && upstream_request_->grpc_rq_success_deferred_) { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + if (upstream_request != nullptr && upstream_request->grpc_rq_success_deferred_) { + upstream_request->upstream_host_->stats().rq_error_.inc(); config_.stats_.rq_reset_after_downstream_response_started_.inc(); } - // This will destroy any created retry timers. - cleanup(); - callbacks_->resetStream(); } else { - // This will destroy any created retry timers. - cleanup(); - Http::Code code; - std::string body; - if (type == UpstreamResetType::GlobalTimeout || type == UpstreamResetType::PerTryTimeout) { - callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout); - - code = timeout_response_code_; - body = code == Http::Code::GatewayTimeout ? "upstream request timeout" : ""; - } else { - StreamInfo::ResponseFlag response_flags = - streamResetReasonToResponseFlag(reset_reason.value()); - callbacks_->streamInfo().setResponseFlag(response_flags); - code = Http::Code::ServiceUnavailable; - body = absl::StrCat( - "upstream connect error or disconnect/reset before headers. reset reason: ", - reset_reason ? Http::Utility::resetReasonToString(reset_reason.value()) : ""); - } + Upstream::HostDescriptionConstSharedPtr upstream_host = upstream_request->upstream_host_; - const bool dropped = reset_reason && reset_reason.value() == Http::StreamResetReason::Overflow; chargeUpstreamCode(code, upstream_host, dropped); // If we had non-5xx but still have been reset by backend or timeout before // starting response, we treat this as an error. We only get non-5xx when @@ -619,7 +675,24 @@ void Filter::onUpstreamReset(UpstreamResetType type, if (upstream_host != nullptr && !Http::CodeUtility::is5xx(enumToInt(code))) { upstream_host->stats().rq_error_.inc(); } - callbacks_->sendLocalReply(code, body.c_str(), + } +} + +void Filter::onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_flags, + absl::string_view body, bool dropped) { + // If we have not yet sent anything downstream, send a response with an appropriate status code. + // Otherwise just reset the ongoing response. + if (downstream_response_started_) { + // This will destroy any created retry timers. + cleanup(); + callbacks_->resetStream(); + } else { + // This will destroy any created retry timers. + cleanup(); + + callbacks_->streamInfo().setResponseFlag(response_flags); + + callbacks_->sendLocalReply(code, body, [dropped, this](Http::HeaderMap& headers) { if (dropped && !config_.suppress_envoy_headers_) { headers.insertEnvoyOverloaded().value( @@ -630,6 +703,50 @@ void Filter::onUpstreamReset(UpstreamResetType type, } } +bool Filter::maybeRetryReset(Http::StreamResetReason reset_reason, + UpstreamRequest* upstream_request) { + // We don't retry on a global timeout or if we already started the response. + if (!downstream_response_started_ && retry_state_) { + const RetryStatus retry_status = + retry_state_->shouldRetryReset(reset_reason, [this]() -> void { doRetry(); }); + if (retry_status == RetryStatus::Yes && setupRetry()) { + if (upstream_request->upstream_host_) { + upstream_request->upstream_host_->stats().rq_error_.inc(); + } + return true; + } else if (retry_status == RetryStatus::NoOverflow) { + callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow); + } else if (retry_status == RetryStatus::NoRetryLimitExceeded) { + callbacks_->streamInfo().setResponseFlag( + StreamInfo::ResponseFlag::UpstreamRetryLimitExceeded); + } + } + + return false; +} + +void Filter::onUpstreamReset(Http::StreamResetReason reset_reason, + UpstreamRequest* upstream_request) { + ENVOY_STREAM_LOG(debug, "upstream reset: reset reason {}", *callbacks_, + Http::Utility::resetReasonToString(reset_reason)); + + updateOutlierDetection(Http::Code::ServiceUnavailable, upstream_request); + + if (maybeRetryReset(reset_reason, upstream_request)) { + return; + } + + bool dropped = reset_reason == Http::StreamResetReason::Overflow; + chargeUpstreamAbort(Http::Code::ServiceUnavailable, dropped, upstream_request); + + const StreamInfo::ResponseFlag response_flags = streamResetReasonToResponseFlag(reset_reason); + const std::string body = + absl::StrCat("upstream connect error or disconnect/reset before headers. reset reason: ", + Http::Utility::resetReasonToString(reset_reason)); + + onUpstreamAbort(Http::Code::ServiceUnavailable, response_flags, body, dropped); +} + StreamInfo::ResponseFlag Filter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) { switch (reset_reason) { @@ -650,7 +767,8 @@ Filter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) { NOT_REACHED_GCOVR_EXCL_LINE; } -void Filter::handleNon5xxResponseHeaders(const Http::HeaderMap& headers, bool end_stream) { +void Filter::handleNon5xxResponseHeaders(const Http::HeaderMap& headers, + UpstreamRequest* upstream_request, bool end_stream) { // We need to defer gRPC success until after we have processed grpc-status in // the trailers. if (grpc_request_) { @@ -658,55 +776,82 @@ void Filter::handleNon5xxResponseHeaders(const Http::HeaderMap& headers, bool en absl::optional grpc_status = Grpc::Common::getGrpcStatus(headers); if (grpc_status && !Http::CodeUtility::is5xx(Grpc::Utility::grpcToHttpStatus(grpc_status.value()))) { - upstream_request_->upstream_host_->stats().rq_success_.inc(); + upstream_request->upstream_host_->stats().rq_success_.inc(); } else { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + upstream_request->upstream_host_->stats().rq_error_.inc(); } } else { - upstream_request_->grpc_rq_success_deferred_ = true; + upstream_request->grpc_rq_success_deferred_ = true; } } else { - upstream_request_->upstream_host_->stats().rq_success_.inc(); + upstream_request->upstream_host_->stats().rq_success_.inc(); } } -void Filter::onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers) { +void Filter::onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers, + UpstreamRequest* upstream_request) { ENVOY_STREAM_LOG(debug, "upstream 100 continue", *callbacks_); - downstream_response_started_ = true; + if (!downstream_response_started_) { + downstream_response_started_ = true; + final_upstream_request_ = upstream_request; + resetOtherUpstreams(upstream_request); + } // Don't send retries after 100-Continue has been sent on. Arguably we could attempt to do a // retry, assume the next upstream would also send an 100-Continue and swallow the second one // but it's sketchy (as the subsequent upstream might not send a 100-Continue) and not worth // the complexity until someone asks for it. retry_state_.reset(); - callbacks_->encode100ContinueHeaders(std::move(headers)); + if (final_upstream_request_ == upstream_request) { + callbacks_->encode100ContinueHeaders(std::move(headers)); + } +} + +void Filter::resetOtherUpstreams(UpstreamRequest* upstream_request) { + UpstreamRequest* upstream_request_tmp; + for (unsigned long i = 0; i < upstream_requests_.size(); i++) { + upstream_request_tmp = upstream_requests_[i].get(); + if (upstream_request_tmp != upstream_request) { + if (!upstream_request_tmp->encode_complete_ || !upstream_request_tmp->decode_complete_) { + upstream_request_tmp->resetStream(); + if (upstream_request_tmp->upstream_host_) { + upstream_request_tmp->upstream_host_->stats().rq_hedge_abandoned_.inc(); + } + cluster_->stats().upstream_rq_hedge_abandoned_.inc(); + } + } + } } -void Filter::onUpstreamHeaders(const uint64_t response_code, Http::HeaderMapPtr&& headers, - bool end_stream) { +void Filter::onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& headers, + UpstreamRequest* upstream_request, bool end_stream) { ENVOY_STREAM_LOG(debug, "upstream headers complete: end_stream={}", *callbacks_, end_stream); - upstream_request_->upstream_host_->outlierDetector().putHttpResponseCode(response_code); + upstream_request->upstream_host_->outlierDetector().putHttpResponseCode(response_code); if (headers->EnvoyImmediateHealthCheckFail() != nullptr) { - upstream_request_->upstream_host_->healthChecker().setUnhealthy(); + upstream_request->upstream_host_->healthChecker().setUnhealthy(); } - if (retry_state_) { - // Notify retry modifiers about the attempted host. - retry_state_->onHostAttempted(upstream_request_->upstream_host_); - + // Check if this upstream request was already retried, for instance after + // hitting a per try timeout. Don't retry it if we already have. + if (retry_state_ && !upstream_request->retried_) { RetryStatus retry_status = retry_state_->shouldRetryHeaders(*headers, [this]() -> void { doRetry(); }); // Capture upstream_host since setupRetry() in the following line will clear - // upstream_request_. - const auto upstream_host = upstream_request_->upstream_host_; - if (retry_status == RetryStatus::Yes && setupRetry(end_stream)) { + // upstream_request. + const auto upstream_host = upstream_request->upstream_host_; + if (retry_status == RetryStatus::Yes && setupRetry()) { + if (!end_stream) { + upstream_request->resetStream(); + } + Http::CodeStats& code_stats = httpContext().codeStats(); code_stats.chargeBasicResponseStat(cluster_->statsScope(), "retry.", static_cast(response_code)); upstream_host->stats().rq_error_.inc(); + upstream_request->retried_ = true; return; } else if (retry_status == RetryStatus::NoOverflow) { callbacks_->streamInfo().setResponseFlag(StreamInfo::ResponseFlag::UpstreamOverflow); @@ -722,7 +867,7 @@ void Filter::onUpstreamHeaders(const uint64_t response_code, Http::HeaderMapPtr& if (static_cast(response_code) == Http::Code::Found && route_entry_->internalRedirectAction() == InternalRedirectAction::Handle && - setupRedirect(*headers)) { + setupRedirect(*headers, upstream_request)) { return; // If the redirect could not be handled, fail open and let it pass to the // next downstream. @@ -740,12 +885,12 @@ void Filter::onUpstreamHeaders(const uint64_t response_code, Http::HeaderMapPtr& } } - upstream_request_->upstream_canary_ = + upstream_request->upstream_canary_ = (headers->EnvoyUpstreamCanary() && headers->EnvoyUpstreamCanary()->value() == "true") || - upstream_request_->upstream_host_->canary(); - chargeUpstreamCode(response_code, *headers, upstream_request_->upstream_host_, false); + upstream_request->upstream_host_->canary(); + chargeUpstreamCode(response_code, *headers, upstream_request->upstream_host_, false); if (!Http::CodeUtility::is5xx(response_code)) { - handleNon5xxResponseHeaders(*headers, end_stream); + handleNon5xxResponseHeaders(*headers, upstream_request, end_stream); } // Append routing cookies @@ -758,47 +903,61 @@ void Filter::onUpstreamHeaders(const uint64_t response_code, Http::HeaderMapPtr& // provide finalizeResponseHeaders functions on the Router::Config and VirtualHost interfaces. route_entry_->finalizeResponseHeaders(*headers, callbacks_->streamInfo()); - downstream_response_started_ = true; + if (!downstream_response_started_) { + downstream_response_started_ = true; + final_upstream_request_ = upstream_request; + resetOtherUpstreams(upstream_request); + } if (end_stream) { - onUpstreamComplete(); + onUpstreamComplete(upstream_request); } - callbacks_->encodeHeaders(std::move(headers), end_stream); + if (final_upstream_request_ == upstream_request) { + callbacks_->encodeHeaders(std::move(headers), end_stream); + } } -void Filter::onUpstreamData(Buffer::Instance& data, bool end_stream) { +void Filter::onUpstreamData(Buffer::Instance& data, UpstreamRequest* upstream_request, + bool end_stream) { if (end_stream) { // gRPC request termination without trailers is an error. - if (upstream_request_->grpc_rq_success_deferred_) { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + if (upstream_request->grpc_rq_success_deferred_) { + upstream_request->upstream_host_->stats().rq_error_.inc(); } - onUpstreamComplete(); + onUpstreamComplete(upstream_request); } - callbacks_->encodeData(data, end_stream); + if (final_upstream_request_ == upstream_request) { + callbacks_->encodeData(data, end_stream); + } } -void Filter::onUpstreamTrailers(Http::HeaderMapPtr&& trailers) { - if (upstream_request_->grpc_rq_success_deferred_) { +void Filter::onUpstreamTrailers(Http::HeaderMapPtr&& trailers, UpstreamRequest* upstream_request) { + if (upstream_request->grpc_rq_success_deferred_) { absl::optional grpc_status = Grpc::Common::getGrpcStatus(*trailers); if (grpc_status && !Http::CodeUtility::is5xx(Grpc::Utility::grpcToHttpStatus(grpc_status.value()))) { - upstream_request_->upstream_host_->stats().rq_success_.inc(); + upstream_request->upstream_host_->stats().rq_success_.inc(); } else { - upstream_request_->upstream_host_->stats().rq_error_.inc(); + upstream_request->upstream_host_->stats().rq_error_.inc(); } } - onUpstreamComplete(); - callbacks_->encodeTrailers(std::move(trailers)); + onUpstreamComplete(upstream_request); + if (final_upstream_request_ == upstream_request) { + callbacks_->encodeTrailers(std::move(trailers)); + } } -void Filter::onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map) { - callbacks_->encodeMetadata(std::move(metadata_map)); +void Filter::onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map, + UpstreamRequest* upstream_request) { + if (final_upstream_request_ == upstream_request) { + callbacks_->encodeMetadata(std::move(metadata_map)); + } } -void Filter::onUpstreamComplete() { +void Filter::onUpstreamComplete(UpstreamRequest* upstream_request) { if (!downstream_end_stream_) { - upstream_request_->resetStream(); + upstream_request->resetStream(); } if (config_.emit_dynamic_stats_ && !callbacks_->streamInfo().healthCheck() && @@ -807,7 +966,7 @@ void Filter::onUpstreamComplete() { std::chrono::milliseconds response_time = std::chrono::duration_cast( dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); - upstream_request_->upstream_host_->outlierDetector().putResponseTime(response_time); + upstream_request->upstream_host_->outlierDetector().putResponseTime(response_time); const Http::HeaderEntry* internal_request_header = downstream_headers_->EnvoyInternalRequest(); const bool internal_request = @@ -821,13 +980,13 @@ void Filter::onUpstreamComplete() { cluster_->statsScope(), EMPTY_STRING, response_time, - upstream_request_->upstream_canary_, + upstream_request->upstream_canary_, internal_request, route_entry_->virtualHost().name(), request_vcluster_ ? request_vcluster_->name() : EMPTY_STRING, zone_name, - upstreamZone(upstream_request_->upstream_host_)}; + upstreamZone(upstream_request->upstream_host_)}; code_stats.chargeResponseTiming(info); @@ -836,12 +995,12 @@ void Filter::onUpstreamComplete() { cluster_->statsScope(), alt_stat_prefix_, response_time, - upstream_request_->upstream_canary_, + upstream_request->upstream_canary_, internal_request, EMPTY_STRING, EMPTY_STRING, zone_name, - upstreamZone(upstream_request_->upstream_host_)}; + upstreamZone(upstream_request->upstream_host_)}; code_stats.chargeResponseTiming(info); } @@ -850,7 +1009,7 @@ void Filter::onUpstreamComplete() { cleanup(); } -bool Filter::setupRetry(bool end_stream) { +bool Filter::setupRetry() { // If we responded before the request was complete we don't bother doing a retry. This may not // catch certain cases where we are in full streaming mode and we have a connect timeout or an // overflow of some kind. However, in many cases deployments will use the buffer filter before @@ -861,15 +1020,11 @@ bool Filter::setupRetry(bool end_stream) { } ENVOY_STREAM_LOG(debug, "performing retry", *callbacks_); - if (!end_stream) { - upstream_request_->resetStream(); - } - upstream_request_.reset(); return true; } -bool Filter::setupRedirect(const Http::HeaderMap& headers) { +bool Filter::setupRedirect(const Http::HeaderMap& headers, UpstreamRequest* upstream_request) { ENVOY_STREAM_LOG(debug, "attempting internal redirect", *callbacks_); const Http::HeaderEntry* location = headers.Location(); @@ -883,7 +1038,7 @@ bool Filter::setupRedirect(const Http::HeaderMap& headers) { // completion here and check it in onDestroy. This is annoyingly complicated but is better than // needlessly resetting streams. attempting_internal_redirect_with_complete_stream_ = - upstream_request_->upstream_timing_.last_upstream_rx_byte_received_ && downstream_end_stream_; + upstream_request->upstream_timing_.last_upstream_rx_byte_received_ && downstream_end_stream_; // As with setupRetry, redirects are not supported for streaming requests yet. if (downstream_end_stream_ && @@ -918,19 +1073,20 @@ void Filter::doRetry() { } ASSERT(response_timeout_ || timeout_.global_timeout_.count() == 0); - ASSERT(!upstream_request_); - upstream_request_ = std::make_unique(*this, *conn_pool); - upstream_request_->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_); + UpstreamRequestPtr upstream_request = std::make_unique(*this, *conn_pool); + UpstreamRequest* upstream_request_ptr = upstream_request.get(); + upstream_requests_.emplace_back(std::move(upstream_request)); + upstream_request_ptr->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_); // It's possible we got immediately reset. - if (upstream_request_) { + if (upstream_request_ptr) { if (callbacks_->decodingBuffer()) { // If we are doing a retry we need to make a copy. Buffer::OwnedImpl copy(*callbacks_->decodingBuffer()); - upstream_request_->encodeData(copy, !downstream_trailers_); + upstream_request_ptr->encodeData(copy, !downstream_trailers_); } if (downstream_trailers_) { - upstream_request_->encodeTrailers(*downstream_trailers_); + upstream_request_ptr->encodeTrailers(*downstream_trailers_); } } } @@ -938,8 +1094,9 @@ void Filter::doRetry() { Filter::UpstreamRequest::UpstreamRequest(Filter& parent, Http::ConnectionPool::Instance& pool) : parent_(parent), conn_pool_(pool), grpc_rq_success_deferred_(false), stream_info_(pool.protocol(), parent_.callbacks_->dispatcher().timeSource()), - calling_encode_headers_(false), upstream_canary_(false), encode_complete_(false), - encode_trailers_(false) { + calling_encode_headers_(false), upstream_canary_(false), decode_complete_(false), + encode_complete_(false), encode_trailers_(false), retried_(false), + outlier_detection_timeout_recorded_(false) { if (parent_.config_.start_child_span_) { span_ = parent_.callbacks_->activeSpan().spawnChild( @@ -972,7 +1129,7 @@ Filter::UpstreamRequest::~UpstreamRequest() { void Filter::UpstreamRequest::decode100ContinueHeaders(Http::HeaderMapPtr&& headers) { ASSERT(100 == Http::Utility::getResponseStatus(*headers)); - parent_.onUpstream100ContinueHeaders(std::move(headers)); + parent_.onUpstream100ContinueHeaders(std::move(headers), this); } void Filter::UpstreamRequest::decodeHeaders(Http::HeaderMapPtr&& headers, bool end_stream) { @@ -983,28 +1140,29 @@ void Filter::UpstreamRequest::decodeHeaders(Http::HeaderMapPtr&& headers, bool e upstream_headers_ = headers.get(); const uint64_t response_code = Http::Utility::getResponseStatus(*headers); stream_info_.response_code_ = static_cast(response_code); - parent_.onUpstreamHeaders(response_code, std::move(headers), end_stream); + parent_.onUpstreamHeaders(response_code, std::move(headers), this, end_stream); } void Filter::UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) { maybeEndDecode(end_stream); stream_info_.addBytesReceived(data.length()); - parent_.onUpstreamData(data, end_stream); + parent_.onUpstreamData(data, this, end_stream); } void Filter::UpstreamRequest::decodeTrailers(Http::HeaderMapPtr&& trailers) { maybeEndDecode(true); upstream_trailers_ = trailers.get(); - parent_.onUpstreamTrailers(std::move(trailers)); + parent_.onUpstreamTrailers(std::move(trailers), this); } void Filter::UpstreamRequest::decodeMetadata(Http::MetadataMapPtr&& metadata_map) { - parent_.onUpstreamMetadata(std::move(metadata_map)); + parent_.onUpstreamMetadata(std::move(metadata_map), this); } void Filter::UpstreamRequest::maybeEndDecode(bool end_stream) { if (end_stream) { upstream_timing_.onLastUpstreamRxByteReceived(parent_.callbacks_->dispatcher().timeSource()); + decode_complete_ = true; } } @@ -1063,14 +1221,18 @@ void Filter::UpstreamRequest::onResetStream(Http::StreamResetReason reason) { clearRequestEncoder(); if (!calling_encode_headers_) { stream_info_.setResponseFlag(parent_.streamResetReasonToResponseFlag(reason)); - parent_.onUpstreamReset(UpstreamResetType::Reset, - absl::optional(reason)); + parent_.onUpstreamReset(reason, this); } else { deferred_reset_reason_ = reason; } } void Filter::UpstreamRequest::resetStream() { + // Don't reset the stream if we're already done with it. + if (encode_complete_ && decode_complete_) { + return; + } + if (conn_pool_stream_handle_) { ENVOY_STREAM_LOG(debug, "cancelling pool request", *parent_.callbacks_); ASSERT(!request_encoder_); @@ -1082,6 +1244,7 @@ void Filter::UpstreamRequest::resetStream() { ENVOY_STREAM_LOG(debug, "resetting pool request", *parent_.callbacks_); request_encoder_->getStream().removeCallbacks(*this); request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + clearRequestEncoder(); } } @@ -1099,15 +1262,11 @@ void Filter::UpstreamRequest::onPerTryTimeout() { // to the global timeout if (!parent_.downstream_response_started_) { ENVOY_STREAM_LOG(debug, "upstream per try timeout", *parent_.callbacks_); - parent_.cluster_->stats().upstream_rq_per_try_timeout_.inc(); - if (upstream_host_) { - upstream_host_->stats().rq_timeout_.inc(); - } - resetStream(); + + // Set response flag to UT for now, but it might be overwritten if a + // response arrives later and hedg_on_per_try_timeout_ is set stream_info_.setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout); - parent_.onUpstreamReset( - UpstreamResetType::PerTryTimeout, - absl::optional(Http::StreamResetReason::LocalReset)); + parent_.onPerTryTimeout(this); } else { ENVOY_STREAM_LOG(debug, "ignored upstream per try timeout due to already started downstream response", diff --git a/source/common/router/router.h b/source/common/router/router.h index eb3d13f8e69e6..feef6dc46110b 100644 --- a/source/common/router/router.h +++ b/source/common/router/router.h @@ -61,6 +61,11 @@ class FilterUtility { std::chrono::milliseconds per_try_timeout_{0}; }; + struct HedgingParams { + uint32_t initial_requests_; + bool hedge_on_per_try_timeout_; + }; + /** * Set the :scheme header based on the properties of the upstream cluster. */ @@ -88,6 +93,15 @@ class FilterUtility { */ static TimeoutData finalTimeout(const RouteEntry& route, Http::HeaderMap& request_headers, bool insert_envoy_expected_request_timeout_ms, bool grpc_request); + + /** + * Determine the final hedging settings after applying randomized behavior. + * @param route supplies the request route. + * @param random_value supplies a stable random value to use for evaluating whether an additional + * initial request should be sent + * @return HedgingParams the final parameters to use for request hedging + */ + static HedgingParams finalHedgingParams(const RouteEntry& route, uint64_t random_value); }; /** @@ -285,6 +299,9 @@ class Filter : Logger::Loggable, stream_info_.onUpstreamHostSelected(host); upstream_host_ = host; parent_.callbacks_->streamInfo().onUpstreamHostSelected(host); + if (parent_.retry_state_ && host) { + parent_.retry_state_->onHostAttempted(host); + } } // Http::StreamDecoder @@ -347,14 +364,15 @@ class Filter : Logger::Loggable, bool calling_encode_headers_ : 1; bool upstream_canary_ : 1; + bool decode_complete_ : 1; bool encode_complete_ : 1; bool encode_trailers_ : 1; + bool retried_ : 1; + bool outlier_detection_timeout_recorded_ : 1; }; typedef std::unique_ptr UpstreamRequestPtr; - enum class UpstreamResetType { Reset, GlobalTimeout, PerTryTimeout }; - StreamInfo::ResponseFlag streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason); static const std::string upstreamZone(Upstream::HostDescriptionConstSharedPtr upstream_host); @@ -362,6 +380,7 @@ class Filter : Logger::Loggable, Upstream::HostDescriptionConstSharedPtr upstream_host, bool dropped); void chargeUpstreamCode(Http::Code code, Upstream::HostDescriptionConstSharedPtr upstream_host, bool dropped); + void chargeUpstreamAbort(Http::Code code, bool dropped, UpstreamRequest* upstream_request); void cleanup(); virtual RetryStatePtr createRetryState(const RetryPolicy& policy, Http::HeaderMap& request_headers, @@ -371,23 +390,33 @@ class Filter : Logger::Loggable, Upstream::ResourcePriority priority) PURE; Http::ConnectionPool::Instance* getConnPool(); void maybeDoShadowing(); + bool maybeRetryReset(Http::StreamResetReason reset_reason, UpstreamRequest* upstream_request); + void onGlobalTimeout(); + void onPerTryTimeout(UpstreamRequest* upstream_request); void onRequestComplete(); void onResponseTimeout(); - void onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers); - void onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& headers, bool end_stream); - void onUpstreamData(Buffer::Instance& data, bool end_stream); - void onUpstreamTrailers(Http::HeaderMapPtr&& trailers); - void onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map); - void onUpstreamComplete(); - void onUpstreamReset(UpstreamResetType type, - const absl::optional reset_reason); + void onSoftPerTryTimeout(); + void onSoftPerTryTimeout(UpstreamRequest* upstream_request); + void onUpstream100ContinueHeaders(Http::HeaderMapPtr&& headers, UpstreamRequest* upstream_request); + // Handle an "aborted" upstream request, meaning we didn't see response + // headers (e.g. due to a reset). Handles recording stats and responding + // downstream if appropriate. + void onUpstreamAbort(Http::Code code, StreamInfo::ResponseFlag response_flag, absl::string_view body, bool dropped); + void onUpstreamHeaders(uint64_t response_code, Http::HeaderMapPtr&& headers, UpstreamRequest* upstream_request, bool end_stream); + void onUpstreamData(Buffer::Instance& data, UpstreamRequest* upstream_request, bool end_stream); + void onUpstreamTrailers(Http::HeaderMapPtr&& trailers, UpstreamRequest* upstream_request); + void onUpstreamMetadata(Http::MetadataMapPtr&& metadata_map, UpstreamRequest* upstream_request); + void onUpstreamComplete(UpstreamRequest* upstream_request); + void onUpstreamReset(Http::StreamResetReason reset_reason, UpstreamRequest* upstream_request); + void resetOtherUpstreams(UpstreamRequest* upstream_request); void sendNoHealthyUpstreamResponse(); - bool setupRetry(bool end_stream); - bool setupRedirect(const Http::HeaderMap& headers); + bool setupRetry(); + bool setupRedirect(const Http::HeaderMap& headers, UpstreamRequest* upstream_request); + void updateOutlierDetection(Http::Code code, UpstreamRequest* upstream_request); void doRetry(); // Called immediately after a non-5xx header is received from upstream, performs stats accounting // and handle difference between gRPC and non-gRPC requests. - void handleNon5xxResponseHeaders(const Http::HeaderMap& headers, bool end_stream); + void handleNon5xxResponseHeaders(const Http::HeaderMap& headers, UpstreamRequest* upstream_request, bool end_stream); TimeSource& timeSource() { return config_.timeSource(); } Http::Context& httpContext() { return config_.http_context_; } @@ -400,8 +429,12 @@ class Filter : Logger::Loggable, const VirtualCluster* request_vcluster_; Event::TimerPtr response_timeout_; FilterUtility::TimeoutData timeout_; + FilterUtility::HedgingParams hedging_params_; Http::Code timeout_response_code_ = Http::Code::GatewayTimeout; - UpstreamRequestPtr upstream_request_; + std::vector upstream_requests_; + // Tracks which upstream request "wins" and will have the corresponding + // response forwarded downstream + UpstreamRequest* final_upstream_request_; bool grpc_request_{}; Http::HeaderMap* downstream_headers_{}; Http::HeaderMap* downstream_trailers_{}; diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index a7b3ee2f48565..b8e79b07c24c3 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -319,7 +319,7 @@ void AdminLayer::mergeValues(const std::unordered_map& values_.emplace(kv.first, SnapshotImpl::createEntry(kv.second)); } } - stats_.admin_overrides_active_.set(values_.empty() ? 0 : 1); + stats_.admin_overrides_active_.set(!values_.empty()); } DiskLayer::DiskLayer(const std::string& name, const std::string& path, Api::Api& api) @@ -429,8 +429,9 @@ DiskBackedLoaderImpl::DiskBackedLoaderImpl(Event::Dispatcher& dispatcher, RuntimeStats LoaderImpl::generateStats(Stats::Store& store) { std::string prefix = "runtime."; - RuntimeStats stats{ - ALL_RUNTIME_STATS(POOL_COUNTER_PREFIX(store, prefix), POOL_GAUGE_PREFIX(store, prefix))}; + RuntimeStats stats{ALL_RUNTIME_STATS(POOL_BOOL_INDICATOR_PREFIX(store, prefix), + POOL_COUNTER_PREFIX(store, prefix), + POOL_GAUGE_PREFIX(store, prefix))}; return stats; } diff --git a/source/common/runtime/runtime_impl.h b/source/common/runtime/runtime_impl.h index d2e80127a23df..edbed0bbba4a3 100644 --- a/source/common/runtime/runtime_impl.h +++ b/source/common/runtime/runtime_impl.h @@ -43,21 +43,21 @@ class RandomGeneratorImpl : public RandomGenerator { * All runtime stats. @see stats_macros.h */ // clang-format off -#define ALL_RUNTIME_STATS(COUNTER, GAUGE) \ - COUNTER(load_error) \ - COUNTER(override_dir_not_exists) \ - COUNTER(override_dir_exists) \ - COUNTER(load_success) \ - COUNTER(deprecated_feature_use) \ - GAUGE (num_keys) \ - GAUGE (admin_overrides_active) +#define ALL_RUNTIME_STATS(BOOL_INDICATOR, COUNTER, GAUGE) \ + COUNTER (load_error) \ + COUNTER (override_dir_not_exists) \ + COUNTER (override_dir_exists) \ + COUNTER (load_success) \ + COUNTER (deprecated_feature_use) \ + GAUGE (num_keys) \ + BOOL_INDICATOR (admin_overrides_active) // clang-format on /** * Struct definition for all runtime stats. @see stats_macros.h */ struct RuntimeStats { - ALL_RUNTIME_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) + ALL_RUNTIME_STATS(GENERATE_BOOL_INDICATOR_STRUCT, GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; /** diff --git a/source/common/secret/sds_api.h b/source/common/secret/sds_api.h index b19df9d54023b..6123159b372fe 100644 --- a/source/common/secret/sds_api.h +++ b/source/common/secret/sds_api.h @@ -40,7 +40,12 @@ class SdsApi : public Init::Target, void initialize(std::function callback) override; // Config::SubscriptionCallbacks + // TODO(fredlas) deduplicate void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).name(); diff --git a/source/common/stats/isolated_store_impl.cc b/source/common/stats/isolated_store_impl.cc index d85d38ef03213..8caa8561c5fad 100644 --- a/source/common/stats/isolated_store_impl.cc +++ b/source/common/stats/isolated_store_impl.cc @@ -23,6 +23,11 @@ IsolatedStoreImpl::IsolatedStoreImpl() std::vector tags; return alloc_.makeGauge(name, std::move(tag_extracted_name), std::move(tags)); }), + bool_indicators_([this](const std::string& name) -> BoolIndicatorSharedPtr { + std::string tag_extracted_name = name; + std::vector tags; + return alloc_.makeBoolIndicator(name, std::move(tag_extracted_name), std::move(tags)); + }), histograms_([this](const std::string& name) -> HistogramSharedPtr { return std::make_shared(name, *this, std::string(name), std::vector()); }) {} @@ -38,6 +43,9 @@ struct IsolatedScopeImpl : public Scope { void deliverHistogramToSinks(const Histogram&, uint64_t) override {} Counter& counter(const std::string& name) override { return parent_.counter(prefix_ + name); } Gauge& gauge(const std::string& name) override { return parent_.gauge(prefix_ + name); } + BoolIndicator& boolIndicator(const std::string& name) override { + return parent_.boolIndicator(prefix_ + name); + } Histogram& histogram(const std::string& name) override { return parent_.histogram(prefix_ + name); } diff --git a/source/common/stats/isolated_store_impl.h b/source/common/stats/isolated_store_impl.h index 7765fd50e3e72..91b8d39d1c127 100644 --- a/source/common/stats/isolated_store_impl.h +++ b/source/common/stats/isolated_store_impl.h @@ -61,6 +61,9 @@ class IsolatedStoreImpl : public Store { ScopePtr createScope(const std::string& name) override; void deliverHistogramToSinks(const Histogram&, uint64_t) override {} Gauge& gauge(const std::string& name) override { return gauges_.get(name); } + BoolIndicator& boolIndicator(const std::string& name) override { + return bool_indicators_.get(name); + } Histogram& histogram(const std::string& name) override { Histogram& histogram = histograms_.get(name); return histogram; @@ -70,6 +73,9 @@ class IsolatedStoreImpl : public Store { // Stats::Store std::vector counters() const override { return counters_.toVector(); } std::vector gauges() const override { return gauges_.toVector(); } + std::vector boolIndicators() const override { + return bool_indicators_.toVector(); + } std::vector histograms() const override { return std::vector{}; } @@ -78,6 +84,7 @@ class IsolatedStoreImpl : public Store { HeapStatDataAllocator alloc_; IsolatedStatsCache counters_; IsolatedStatsCache gauges_; + IsolatedStatsCache bool_indicators_; IsolatedStatsCache histograms_; const StatsOptionsImpl stats_options_; }; diff --git a/source/common/stats/source_impl.cc b/source/common/stats/source_impl.cc index e6102d5a608cd..a0cbbe84ff0f5 100644 --- a/source/common/stats/source_impl.cc +++ b/source/common/stats/source_impl.cc @@ -17,6 +17,12 @@ std::vector& SourceImpl::cachedGauges() { } return *gauges_; } +std::vector& SourceImpl::cachedBoolIndicators() { + if (!bool_indicators_) { + bool_indicators_ = store_.boolIndicators(); + } + return *bool_indicators_; +} std::vector& SourceImpl::cachedHistograms() { if (!histograms_) { histograms_ = store_.histograms(); @@ -27,6 +33,7 @@ std::vector& SourceImpl::cachedHistograms() { void SourceImpl::clearCache() { counters_.reset(); gauges_.reset(); + bool_indicators_.reset(); histograms_.reset(); } diff --git a/source/common/stats/source_impl.h b/source/common/stats/source_impl.h index a3a79ba9a76ab..5c85d8c980ce8 100644 --- a/source/common/stats/source_impl.h +++ b/source/common/stats/source_impl.h @@ -16,6 +16,7 @@ class SourceImpl : public Source { // Stats::Source std::vector& cachedCounters() override; std::vector& cachedGauges() override; + std::vector& cachedBoolIndicators() override; std::vector& cachedHistograms() override; void clearCache() override; @@ -23,6 +24,7 @@ class SourceImpl : public Source { Store& store_; absl::optional> counters_; absl::optional> gauges_; + absl::optional> bool_indicators_; absl::optional> histograms_; }; diff --git a/source/common/stats/stat_data_allocator_impl.h b/source/common/stats/stat_data_allocator_impl.h index 81df5f4269576..4b17527d3b8ce 100644 --- a/source/common/stats/stat_data_allocator_impl.h +++ b/source/common/stats/stat_data_allocator_impl.h @@ -34,6 +34,8 @@ template class StatDataAllocatorImpl : public StatDataAllocator std::vector&& tags) override; GaugeSharedPtr makeGauge(absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags) override; + BoolIndicatorSharedPtr makeBoolIndicator(absl::string_view name, std::string&& tag_extracted_name, + std::vector&& tags) override; /** * @param name the full name of the stat. @@ -69,6 +71,7 @@ template class CounterImpl : public Counter, public MetricImpl // Stats::Metric std::string name() const override { return std::string(data_.name()); } const char* nameCStr() const override { return data_.name(); } + bool used() const override { return data_.flags_ & Flags::Used; } // Stats::Counter void add(uint64_t amount) override { @@ -80,7 +83,6 @@ template class CounterImpl : public Counter, public MetricImpl void inc() override { add(1); } uint64_t latch() override { return data_.pending_increment_.exchange(0); } void reset() override { data_.value_ = 0; } - bool used() const override { return data_.flags_ & Flags::Used; } uint64_t value() const override { return data_.value_; } private: @@ -121,6 +123,7 @@ template class GaugeImpl : public Gauge, public MetricImpl { // Stats::Metric std::string name() const override { return std::string(data_.name()); } const char* nameCStr() const override { return data_.name(); } + bool used() const override { return data_.flags_ & Flags::Used; } // Stats::Gauge virtual void add(uint64_t amount) override { @@ -135,11 +138,10 @@ template class GaugeImpl : public Gauge, public MetricImpl { } virtual void sub(uint64_t amount) override { ASSERT(data_.value_ >= amount); - ASSERT(used()); + ASSERT(used() || amount == 0); data_.value_ -= amount; } virtual uint64_t value() const override { return data_.value_; } - bool used() const override { return data_.flags_ & Flags::Used; } private: StatData& data_; @@ -167,6 +169,50 @@ class NullGaugeImpl : public Gauge { uint64_t value() const override { return 0; } }; +/** + * BoolIndicator implementation that wraps a StatData. + */ +template class BoolIndicatorImpl : public BoolIndicator, public MetricImpl { +public: + BoolIndicatorImpl(StatData& data, StatDataAllocatorImpl& alloc, + std::string&& tag_extracted_name, std::vector&& tags) + : MetricImpl(std::move(tag_extracted_name), std::move(tags)), data_(data), alloc_(alloc) {} + ~BoolIndicatorImpl() { alloc_.free(data_); } + + // Stats::Metric + std::string name() const override { return std::string(data_.name()); } + const char* nameCStr() const override { return data_.name(); } + bool used() const override { return data_.flags_ & Flags::Used; } + + // Stats::BoolIndicator + virtual void set(bool value) override { + data_.value_ = value ? 1 : 0; + data_.flags_ |= Flags::Used; + } + virtual bool value() const override { return data_.value_; } + +private: + StatData& data_; + StatDataAllocatorImpl& alloc_; +}; + +/** + * Null bool implementation. + * No-ops on all calls and requires no underlying metric or data. + */ +class NullBoolIndicatorImpl : public BoolIndicator { +public: + NullBoolIndicatorImpl() {} + ~NullBoolIndicatorImpl() {} + std::string name() const override { return ""; } + const char* nameCStr() const override { return ""; } + const std::string& tagExtractedName() const override { CONSTRUCT_ON_FIRST_USE(std::string, ""); } + const std::vector& tags() const override { CONSTRUCT_ON_FIRST_USE(std::vector, {}); } + void set(bool) override {} + bool used() const override { return false; } + bool value() const override { return false; } +}; + template CounterSharedPtr StatDataAllocatorImpl::makeCounter(absl::string_view name, std::string&& tag_extracted_name, @@ -191,5 +237,16 @@ GaugeSharedPtr StatDataAllocatorImpl::makeGauge(absl::string_view name std::move(tags)); } +template +BoolIndicatorSharedPtr StatDataAllocatorImpl::makeBoolIndicator( + absl::string_view name, std::string&& tag_extracted_name, std::vector&& tags) { + StatData* data = alloc(name); + if (data == nullptr) { + return nullptr; + } + return std::make_shared>(*data, *this, std::move(tag_extracted_name), + std::move(tags)); +} + } // namespace Stats } // namespace Envoy diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 2c72dbf94b983..d8d7567658734 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -45,6 +45,7 @@ void ThreadLocalStoreImpl::setStatsMatcher(StatsMatcherPtr&& stats_matcher) { for (ScopeImpl* scope : scopes_) { removeRejectedStats(scope->central_cache_.counters_, deleted_counters_); removeRejectedStats(scope->central_cache_.gauges_, deleted_gauges_); + removeRejectedStats(scope->central_cache_.bool_indicators_, deleted_bool_indicators_); removeRejectedStats(scope->central_cache_.histograms_, deleted_histograms_); } } @@ -111,6 +112,22 @@ std::vector ThreadLocalStoreImpl::gauges() const { return ret; } +std::vector ThreadLocalStoreImpl::boolIndicators() const { + // Handle de-dup due to overlapping scopes. + std::vector ret; + CharStarHashSet names; + Thread::LockGuard lock(lock_); + for (ScopeImpl* scope : scopes_) { + for (auto& bool_indicator : scope->central_cache_.bool_indicators_) { + if (names.insert(bool_indicator.first).second) { + ret.push_back(bool_indicator.second); + } + } + } + + return ret; +} + std::vector ThreadLocalStoreImpl::histograms() const { std::vector ret; Thread::LockGuard lock(lock_); @@ -359,6 +376,34 @@ Gauge& ThreadLocalStoreImpl::ScopeImpl::gauge(const std::string& name) { tls_cache); } +BoolIndicator& ThreadLocalStoreImpl::ScopeImpl::boolIndicator(const std::string& name) { + // See comments in counter(). There is no super clean way (via templates or otherwise) to + // share this code so I'm leaving it largely duplicated for now. + // + // Note that we can do map.find(final_name.c_str()), but we cannot do + // map[final_name.c_str()] as the char*-keyed maps would then save the pointer to + // a temporary, and address sanitization errors would follow. Instead we must + // do a find() first, using that if it succeeds. If it fails, then after we + // construct the stat we can insert it into the required maps. + std::string final_name = prefix_ + name; + if (parent_.rejects(final_name)) { + return null_bool_; + } + + StatMap* tls_cache = nullptr; + if (!parent_.shutting_down_ && parent_.tls_) { + tls_cache = &parent_.tls_->getTyped().scope_cache_[this->scope_id_].bool_indicators_; + } + + return safeMakeStat( + final_name, central_cache_.bool_indicators_, + [](StatDataAllocator& allocator, absl::string_view name, std::string&& tag_extracted_name, + std::vector&& tags) -> BoolIndicatorSharedPtr { + return allocator.makeBoolIndicator(name, std::move(tag_extracted_name), std::move(tags)); + }, + tls_cache); +} + Histogram& ThreadLocalStoreImpl::ScopeImpl::histogram(const std::string& name) { // See comments in counter(). There is no super clean way (via templates or otherwise) to // share this code so I'm leaving it largely duplicated for now. diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 4f85c763567c2..292147f44d473 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -145,6 +145,9 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo return default_scope_->deliverHistogramToSinks(histogram, value); } Gauge& gauge(const std::string& name) override { return default_scope_->gauge(name); } + BoolIndicator& boolIndicator(const std::string& name) override { + return default_scope_->boolIndicator(name); + } Histogram& histogram(const std::string& name) override { return default_scope_->histogram(name); }; @@ -152,6 +155,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo // Stats::Store std::vector counters() const override; std::vector gauges() const override; + std::vector boolIndicators() const override; std::vector histograms() const override; // Stats::StoreRoot @@ -176,6 +180,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo struct TlsCacheEntry { StatMap counters_; StatMap gauges_; + StatMap bool_indicators_; StatMap histograms_; StatMap parent_histograms_; }; @@ -183,6 +188,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo struct CentralCacheEntry { StatMap counters_; StatMap gauges_; + StatMap bool_indicators_; StatMap histograms_; }; @@ -199,6 +205,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo } void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override; Gauge& gauge(const std::string& name) override; + BoolIndicator& boolIndicator(const std::string& name) override; Histogram& histogram(const std::string& name) override; Histogram& tlsHistogram(const std::string& name, ParentHistogramImpl& parent) override; const Stats::StatsOptions& statsOptions() const override { return parent_.statsOptions(); } @@ -234,6 +241,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo NullCounterImpl null_counter_; NullGaugeImpl null_gauge_; + NullBoolIndicatorImpl null_bool_; NullHistogramImpl null_histogram_; }; @@ -283,6 +291,7 @@ class ThreadLocalStoreImpl : Logger::Loggable, public StoreRo // but that would be fairly complex to change. std::vector deleted_counters_; std::vector deleted_gauges_; + std::vector deleted_bool_indicators_; std::vector deleted_histograms_; }; diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index aaad5800e0d1a..bd09d3f9329b1 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -190,14 +190,17 @@ envoy_cc_library( srcs = ["logical_dns_cluster.cc"], hdrs = ["logical_dns_cluster.h"], deps = [ + ":cluster_factory_lib", ":upstream_includes", "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:cluster_factory_interface", "//source/common/common:empty_string", "//source/common/config:utility_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", + "//source/extensions/clusters:well_known_names", ], ) @@ -206,11 +209,14 @@ envoy_cc_library( srcs = ["original_dst_cluster.cc"], hdrs = ["original_dst_cluster.h"], deps = [ + ":cluster_factory_lib", ":upstream_includes", "//include/envoy/secret:secret_manager_interface", + "//include/envoy/upstream:cluster_factory_interface", "//source/common/common:empty_string", "//source/common/network:address_lib", "//source/common/network:utility_lib", + "//source/extensions/clusters:well_known_names", ], ) @@ -280,11 +286,13 @@ envoy_cc_library( srcs = ["eds.cc"], hdrs = ["eds.h"], deps = [ + ":cluster_factory_lib", ":upstream_includes", "//include/envoy/config:grpc_mux_interface", "//include/envoy/config:subscription_interface", "//include/envoy/local_info:local_info_interface", "//include/envoy/secret:secret_manager_interface", + "//include/envoy/upstream:cluster_factory_interface", "//include/envoy/upstream:locality_lib", "//source/common/config:metadata_lib", "//source/common/config:subscription_factory_lib", @@ -293,6 +301,7 @@ envoy_cc_library( "//source/common/network:resolver_lib", "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", + "//source/extensions/clusters:well_known_names", "@envoy_api//envoy/api/v2:eds_cc", "@envoy_api//envoy/api/v2/core:base_cc", "@envoy_api//envoy/api/v2/endpoint:endpoint_cc", @@ -383,3 +392,71 @@ envoy_cc_library( "@envoy_api//envoy/api/v2/endpoint:endpoint_cc", ], ) + +envoy_cc_library( + name = "cluster_factory_lib", + srcs = ["cluster_factory_impl.cc"], + deps = [ + ":cluster_factory_includes", + ":health_checker_lib", + ":upstream_includes", + "//include/envoy/event:dispatcher_interface", + "//include/envoy/event:timer_interface", + "//include/envoy/network:dns_interface", + "//include/envoy/network:listen_socket_interface", + "//include/envoy/ssl:context_interface", + "//include/envoy/upstream:health_checker_interface", + "//source/common/common:enum_to_int", + "//source/common/common:utility_lib", + "//source/common/config:protocol_json_lib", + "//source/common/config:tls_context_json_lib", + "//source/common/http:utility_lib", + "//source/common/network:address_lib", + "//source/common/network:resolver_lib", + "//source/common/network:socket_option_factory_lib", + "//source/common/network:utility_lib", + "//source/common/protobuf", + "//source/common/protobuf:utility_lib", + "//source/extensions/clusters:well_known_names", + "//source/extensions/transport_sockets:well_known_names", + "//source/server:transport_socket_config_lib", + "@envoy_api//envoy/api/v2/core:base_cc", + ], +) + +envoy_cc_library( + name = "cluster_factory_includes", + hdrs = ["cluster_factory_impl.h"], + deps = [ + ":load_balancer_lib", + ":outlier_detection_lib", + ":resource_manager_lib", + ":upstream_includes", + "//include/envoy/event:timer_interface", + "//include/envoy/local_info:local_info_interface", + "//include/envoy/network:dns_interface", + "//include/envoy/runtime:runtime_interface", + "//include/envoy/server:transport_socket_config_interface", + "//include/envoy/ssl:context_manager_interface", + "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:cluster_factory_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//include/envoy/upstream:health_checker_interface", + "//include/envoy/upstream:load_balancer_interface", + "//include/envoy/upstream:locality_lib", + "//include/envoy/upstream:upstream_interface", + "//source/common/common:callback_impl_lib", + "//source/common/common:enum_to_int", + "//source/common/common:minimal_logger_lib", + "//source/common/config:metadata_lib", + "//source/common/config:utility_lib", + "//source/common/config:well_known_names", + "//source/common/stats:isolated_store_lib", + "//source/common/stats:stats_lib", + "//source/extensions/clusters:well_known_names", + "//source/server:init_manager_lib", + "//source/server:transport_socket_config_lib", + "@envoy_api//envoy/api/v2/core:base_cc", + "@envoy_api//envoy/api/v2/endpoint:endpoint_cc", + ], +) diff --git a/source/common/upstream/cds_api_impl.cc b/source/common/upstream/cds_api_impl.cc index c3eb5c76c07a5..805ffe41eed7b 100644 --- a/source/common/upstream/cds_api_impl.cc +++ b/source/common/upstream/cds_api_impl.cc @@ -34,31 +34,47 @@ CdsApiImpl::CdsApiImpl(const envoy::api::v2::core::ConfigSource& cds_config, Clu Config::SubscriptionFactory::subscriptionFromConfigSource( cds_config, local_info, dispatcher, cm, random, *scope_, "envoy.api.v2.ClusterDiscoveryService.FetchClusters", - "envoy.api.v2.ClusterDiscoveryService.StreamClusters", api); + "envoy.api.v2.ClusterDiscoveryService.DeltaClusters", api); } void CdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::string& version_info) { + ClusterManager::ClusterInfoMap clusters_to_remove = cm_.clusters(); + for (const auto& cluster : resources) { + clusters_to_remove.erase(cluster.name()); + } + Protobuf::RepeatedPtrField to_remove_repeated; + for (const auto& cluster : clusters_to_remove) { + *to_remove_repeated.Add() = cluster.first; + } + Protobuf::RepeatedPtrField to_add_repeated; + for (const auto& cluster : resources) { + envoy::api::v2::Resource* to_add = to_add_repeated.Add(); + to_add->set_name(cluster.name()); + to_add->set_version(version_info); + to_add->mutable_resource()->PackFrom(cluster); + } + onConfigUpdate(to_add_repeated, to_remove_repeated, version_info); +} + +void CdsApiImpl::onConfigUpdate( + const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info) { cm_.adsMux().pause(Config::TypeUrl::get().ClusterLoadAssignment); Cleanup eds_resume([this] { cm_.adsMux().resume(Config::TypeUrl::get().ClusterLoadAssignment); }); std::vector exception_msgs; std::unordered_set cluster_names; - for (const auto& cluster : resources) { - if (!cluster_names.insert(cluster.name()).second) { - throw EnvoyException(fmt::format("duplicate cluster {} found", cluster.name())); - } - } - for (const auto& cluster : resources) { - MessageUtil::validate(cluster); - } - // We need to keep track of which clusters we might need to remove. - ClusterManager::ClusterInfoMap clusters_to_remove = cm_.clusters(); - for (auto& cluster : resources) { - const std::string cluster_name = cluster.name(); + for (const auto& resource : added_resources) { + envoy::api::v2::Cluster cluster; try { - clusters_to_remove.erase(cluster_name); + cluster = MessageUtil::anyConvert(resource.resource()); + MessageUtil::validate(cluster); + if (!cluster_names.insert(cluster.name()).second) { + throw EnvoyException(fmt::format("duplicate cluster {} found", cluster.name())); + } if (cm_.addOrUpdateCluster( - cluster, version_info, + cluster, resource.version(), [this](const std::string&, ClusterManager::ClusterWarmingState state) { // Following if/else block implements a control flow mechanism that can be used // by an ADS implementation to properly sequence CDS and RDS update. It is not @@ -85,26 +101,24 @@ void CdsApiImpl::onConfigUpdate(const ResourceVector& resources, const std::stri cm_.adsMux().resume(Config::TypeUrl::get().Cluster); } })) { - ENVOY_LOG(debug, "cds: add/update cluster '{}'", cluster_name); + ENVOY_LOG(debug, "cds: add/update cluster '{}'", cluster.name()); } } catch (const EnvoyException& e) { - exception_msgs.push_back(fmt::format("{}: {}", cluster_name, e.what())); + exception_msgs.push_back(fmt::format("{}: {}", cluster.name(), e.what())); } } - - for (auto cluster : clusters_to_remove) { - const std::string cluster_name = cluster.first; - if (cm_.removeCluster(cluster_name)) { - ENVOY_LOG(debug, "cds: remove cluster '{}'", cluster_name); + for (auto resource_name : removed_resources) { + if (cm_.removeCluster(resource_name)) { + ENVOY_LOG(debug, "cds: remove cluster '{}'", resource_name); } } - version_info_ = version_info; runInitializeCallbackIfAny(); if (!exception_msgs.empty()) { throw EnvoyException( fmt::format("Error adding/updating cluster(s) {}", StringUtil::join(exception_msgs, ", "))); } + system_version_info_ = system_version_info; } void CdsApiImpl::onConfigUpdateFailed(const EnvoyException*) { diff --git a/source/common/upstream/cds_api_impl.h b/source/common/upstream/cds_api_impl.h index 8fb1fb1cc3035..77e215259fd75 100644 --- a/source/common/upstream/cds_api_impl.h +++ b/source/common/upstream/cds_api_impl.h @@ -32,10 +32,13 @@ class CdsApiImpl : public CdsApi, void setInitializedCb(std::function callback) override { initialize_callback_ = callback; } - const std::string versionInfo() const override { return version_info_; } + const std::string versionInfo() const override { return system_version_info_; } // Config::SubscriptionCallbacks + // TODO(fredlas) deduplicate void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override; void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).name(); @@ -49,7 +52,7 @@ class CdsApiImpl : public CdsApi, ClusterManager& cm_; std::unique_ptr> subscription_; - std::string version_info_; + std::string system_version_info_; std::function initialize_callback_; Stats::ScopePtr scope_; }; diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc new file mode 100644 index 0000000000000..4b3f8a8940c23 --- /dev/null +++ b/source/common/upstream/cluster_factory_impl.cc @@ -0,0 +1,152 @@ +#include "common/upstream/cluster_factory_impl.h" + +#include "common/http/utility.h" +#include "common/network/address_impl.h" +#include "common/network/resolver_impl.h" +#include "common/network/socket_option_factory.h" +#include "common/upstream/health_checker_impl.h" + +#include "server/transport_socket_config_impl.h" + +namespace Envoy { +namespace Upstream { + +namespace { + +Stats::ScopePtr generateStatsScope(const envoy::api::v2::Cluster& config, Stats::Store& stats) { + return stats.createScope(fmt::format( + "cluster.{}.", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name())); +} + +} // namespace + +ClusterSharedPtr ClusterFactoryImplBase::create( + const envoy::api::v2::Cluster& cluster, ClusterManager& cluster_manager, Stats::Store& stats, + ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, + Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, + Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, + Server::Admin& admin, Singleton::Manager& singleton_manager, + Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api) { + + std::string cluster_type; + + if (!cluster.has_cluster_type()) { + switch (cluster.type()) { + case envoy::api::v2::Cluster::STATIC: + cluster_type = Extensions::Clusters::ClusterTypes::get().Static; + break; + case envoy::api::v2::Cluster::STRICT_DNS: + cluster_type = Extensions::Clusters::ClusterTypes::get().StrictDns; + break; + case envoy::api::v2::Cluster::LOGICAL_DNS: + cluster_type = Extensions::Clusters::ClusterTypes::get().LogicalDns; + break; + case envoy::api::v2::Cluster::ORIGINAL_DST: + cluster_type = Extensions::Clusters::ClusterTypes::get().OriginalDst; + break; + case envoy::api::v2::Cluster::EDS: + cluster_type = Extensions::Clusters::ClusterTypes::get().Eds; + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } else { + cluster_type = cluster.cluster_type().name(); + } + ClusterFactory* factory = Registry::FactoryRegistry::getFactory(cluster_type); + + if (factory == nullptr) { + throw EnvoyException(fmt::format( + "Didn't find a registered cluster factory implementation for name: '{}'", cluster_type)); + } + + ClusterFactoryContextImpl context(cluster_manager, stats, tls, std::move(dns_resolver), + ssl_context_manager, runtime, random, dispatcher, log_manager, + local_info, admin, singleton_manager, + std::move(outlier_event_logger), added_via_api, api); + return factory->create(cluster, context); +} + +Network::DnsResolverSharedPtr +ClusterFactoryImplBase::selectDnsResolver(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context) { + // We make this a shared pointer to deal with the distinct ownership + // scenarios that can exist: in one case, we pass in the "default" + // DNS resolver that is owned by the Server::Instance. In the case + // where 'dns_resolvers' is specified, we have per-cluster DNS + // resolvers that are created here but ownership resides with + // StrictDnsClusterImpl/LogicalDnsCluster. + if (!cluster.dns_resolvers().empty()) { + const auto& resolver_addrs = cluster.dns_resolvers(); + std::vector resolvers; + resolvers.reserve(resolver_addrs.size()); + for (const auto& resolver_addr : resolver_addrs) { + resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); + } + return context.dispatcher().createDnsResolver(resolvers); + } + + return context.dnsResolver(); +} + +ClusterSharedPtr ClusterFactoryImplBase::create(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context) { + + auto stats_scope = generateStatsScope(cluster, context.stats()); + Server::Configuration::TransportSocketFactoryContextImpl factory_context( + context.admin(), context.sslContextManager(), *stats_scope, context.clusterManager(), + context.localInfo(), context.dispatcher(), context.random(), context.stats(), + context.singletonManager(), context.tls(), context.api()); + + ClusterImplBaseSharedPtr new_cluster = + createClusterImpl(cluster, context, factory_context, std::move(stats_scope)); + + if (!cluster.health_checks().empty()) { + // TODO(htuch): Need to support multiple health checks in v2. + if (cluster.health_checks().size() != 1) { + throw EnvoyException("Multiple health checks not supported"); + } else { + new_cluster->setHealthChecker(HealthCheckerFactory::create( + cluster.health_checks()[0], *new_cluster, context.runtime(), context.random(), + context.dispatcher(), context.logManager())); + } + } + + new_cluster->setOutlierDetector(Outlier::DetectorImplFactory::createForCluster( + *new_cluster, cluster, context.dispatcher(), context.runtime(), + context.outlierEventLogger())); + return std::move(new_cluster); +} + +ClusterImplBaseSharedPtr StaticClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + return std::make_unique(cluster, context.runtime(), socket_factory_context, + std::move(stats_scope), context.addedViaApi()); +} + +/** + * Static registration for the static cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(StaticClusterFactory, ClusterFactory); + +ClusterImplBaseSharedPtr StrictDnsClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + auto selected_dns_resolver = selectDnsResolver(cluster, context); + + return std::make_unique(cluster, context.runtime(), selected_dns_resolver, + socket_factory_context, std::move(stats_scope), + context.addedViaApi()); +} + +/** + * Static registration for the strict dns cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(StrictDnsClusterFactory, ClusterFactory); + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/cluster_factory_impl.h b/source/common/upstream/cluster_factory_impl.h new file mode 100644 index 0000000000000..6d4c8b95c62e5 --- /dev/null +++ b/source/common/upstream/cluster_factory_impl.h @@ -0,0 +1,216 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "envoy/api/v2/core/base.pb.h" +#include "envoy/api/v2/endpoint/endpoint.pb.h" +#include "envoy/config/typed_metadata.h" +#include "envoy/event/timer.h" +#include "envoy/local_info/local_info.h" +#include "envoy/network/dns.h" +#include "envoy/runtime/runtime.h" +#include "envoy/secret/secret_manager.h" +#include "envoy/server/transport_socket_config.h" +#include "envoy/ssl/context_manager.h" +#include "envoy/stats/scope.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_factory.h" +#include "envoy/upstream/cluster_manager.h" +#include "envoy/upstream/health_checker.h" +#include "envoy/upstream/load_balancer.h" +#include "envoy/upstream/locality.h" +#include "envoy/upstream/upstream.h" + +#include "common/common/callback_impl.h" +#include "common/common/enum_to_int.h" +#include "common/common/logger.h" +#include "common/config/metadata.h" +#include "common/config/utility.h" +#include "common/config/well_known_names.h" +#include "common/network/utility.h" +#include "common/protobuf/utility.h" +#include "common/stats/isolated_store_impl.h" +#include "common/upstream/load_balancer_impl.h" +#include "common/upstream/outlier_detection_impl.h" +#include "common/upstream/resource_manager_impl.h" +#include "common/upstream/upstream_impl.h" + +#include "server/init_manager_impl.h" + +#include "extensions/clusters/well_known_names.h" + +namespace Envoy { +namespace Upstream { + +class ClusterFactoryContextImpl : public ClusterFactoryContext { + +public: + ClusterFactoryContextImpl( + ClusterManager& cluster_manager, Stats::Store& stats, ThreadLocal::SlotAllocator& tls, + Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, + Server::Admin& admin, Singleton::Manager& singleton_manager, + Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api) + : cluster_manager_(cluster_manager), stats_(stats), tls_(tls), + dns_resolver_(std::move(dns_resolver)), ssl_context_manager_(ssl_context_manager), + runtime_(runtime), random_(random), dispatcher_(dispatcher), log_manager_(log_manager), + local_info_(local_info), admin_(admin), singleton_manager_(singleton_manager), + outlier_event_logger_(std::move(outlier_event_logger)), added_via_api_(added_via_api), + api_(api) {} + + ClusterManager& clusterManager() override { return cluster_manager_; } + Stats::Store& stats() override { return stats_; } + ThreadLocal::SlotAllocator& tls() override { return tls_; } + Network::DnsResolverSharedPtr dnsResolver() override { return dns_resolver_; } + Ssl::ContextManager& sslContextManager() override { return ssl_context_manager_; } + Runtime::Loader& runtime() override { return runtime_; } + Runtime::RandomGenerator& random() override { return random_; } + Event::Dispatcher& dispatcher() override { return dispatcher_; } + AccessLog::AccessLogManager& logManager() override { return log_manager_; } + const LocalInfo::LocalInfo& localInfo() override { return local_info_; } + Server::Admin& admin() override { return admin_; } + Singleton::Manager& singletonManager() override { return singleton_manager_; } + Outlier::EventLoggerSharedPtr outlierEventLogger() override { return outlier_event_logger_; } + bool addedViaApi() override { return added_via_api_; } + Api::Api& api() override { return api_; } + +private: + ClusterManager& cluster_manager_; + Stats::Store& stats_; + ThreadLocal::SlotAllocator& tls_; + Network::DnsResolverSharedPtr dns_resolver_; + Ssl::ContextManager& ssl_context_manager_; + Runtime::Loader& runtime_; + Runtime::RandomGenerator& random_; + Event::Dispatcher& dispatcher_; + AccessLog::AccessLogManager& log_manager_; + const LocalInfo::LocalInfo& local_info_; + Server::Admin& admin_; + Singleton::Manager& singleton_manager_; + Outlier::EventLoggerSharedPtr outlier_event_logger_; + const bool added_via_api_; + Api::Api& api_; +}; + +/** + * Base class for all cluster factory implementation. This class can be directly extended if the + * custom cluster does not have any custom configuration. For custom cluster with custom + * configuration, use ConfigurableClusterFactoryBase instead. + */ +class ClusterFactoryImplBase : public ClusterFactory { +public: + /** + * Static method to get the registered cluster factory and create an instance of cluster. + */ + static ClusterSharedPtr + create(const envoy::api::v2::Cluster& cluster, ClusterManager& cluster_manager, + Stats::Store& stats, ThreadLocal::Instance& tls, + Network::DnsResolverSharedPtr dns_resolver, Ssl::ContextManager& ssl_context_manager, + Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, + AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, + Server::Admin& admin, Singleton::Manager& singleton_manager, + Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api); + + /** + * Create a dns resolver to be used by the cluster. + */ + Network::DnsResolverSharedPtr selectDnsResolver(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context); + + // Upstream::ClusterFactory + ClusterSharedPtr create(const envoy::api::v2::Cluster& cluster, + ClusterFactoryContext& context) override; + std::string name() override { return name_; } + +protected: + ClusterFactoryImplBase(const std::string& name) : name_(name) {} + +private: + /** + * Create an instance of ClusterImplBase. + */ + virtual ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) PURE; + const std::string name_; +}; + +/** + * Factory for StaticClusterImpl cluster. + */ +class StaticClusterFactory : public ClusterFactoryImplBase { +public: + StaticClusterFactory() + : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().Static) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; + +/** + * Factory for StrictDnsClusterImpl + */ +class StrictDnsClusterFactory : public ClusterFactoryImplBase { +public: + StrictDnsClusterFactory() + : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().StrictDns) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; + +/** + * Common base class for custom cluster factory with custom configuration. + * @param ConfigProto is the configuration protobuf. + */ +template class ConfigurableClusterFactoryBase : public ClusterFactoryImplBase { +public: + /** + * @return ProtobufTypes::MessagePtr create empty config proto message. + */ + virtual ProtobufTypes::MessagePtr createEmptyConfigProto() { + return std::make_unique(); + } + +protected: + ConfigurableClusterFactoryBase(const std::string& name) : ClusterFactoryImplBase(name) {} + +private: + virtual ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override { + ProtobufTypes::MessagePtr config = createEmptyConfigProto(); + Config::Utility::translateOpaqueConfig(cluster.cluster_type().typed_config(), + ProtobufWkt::Struct::default_instance(), *config); + return createClusterWithConfig(cluster, + MessageUtil::downcastAndValidate(*config), + context, socket_factory_context, std::move(stats_scope)); + } + + virtual ClusterImplBaseSharedPtr createClusterWithConfig( + const envoy::api::v2::Cluster& cluster, const ConfigProto& proto_config, + ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) PURE; +}; + +} // namespace Upstream +} // namespace Envoy diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index ba9b0d6622bff..d178871866286 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1240,10 +1240,10 @@ Tcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool( ClusterSharedPtr ProdClusterManagerFactory::clusterFromProto( const envoy::api::v2::Cluster& cluster, ClusterManager& cm, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) { - return ClusterImplBase::create(cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, - runtime_, random_, main_thread_dispatcher_, log_manager_, - local_info_, admin_, singleton_manager_, outlier_event_logger, - added_via_api, api_); + return ClusterFactoryImplBase::create( + cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, + main_thread_dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + outlier_event_logger, added_via_api, api_); } CdsApiPtr ProdClusterManagerFactory::createCds(const envoy::api::v2::core::ConfigSource& cds_config, diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 07d63b062ef07..0e9e8eb52667d 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -178,6 +178,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable callback) override { init_helper_.setInitializedCb(callback); } + ClusterInfoMap clusters() override { // TODO(mattklein123): Add ability to see warming clusters in admin output. ClusterInfoMap clusters_map; diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 8d74c3e5ae150..91c12b9695d15 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -1,19 +1,8 @@ #include "common/upstream/eds.h" #include "envoy/api/v2/eds.pb.validate.h" -#include "envoy/common/exception.h" -#include "envoy/stats/scope.h" -#include "common/common/fmt.h" -#include "common/config/metadata.h" #include "common/config/subscription_factory.h" -#include "common/config/utility.h" -#include "common/config/well_known_names.h" -#include "common/network/address_impl.h" -#include "common/network/resolver_impl.h" -#include "common/network/utility.h" -#include "common/protobuf/utility.h" -#include "common/upstream/load_balancer_impl.h" namespace Envoy { namespace Upstream { @@ -173,5 +162,22 @@ void EdsClusterImpl::onConfigUpdateFailed(const EnvoyException* e) { onPreInitComplete(); } +ClusterImplBaseSharedPtr EdsClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + if (!cluster.has_eds_cluster_config()) { + throw EnvoyException("cannot create an EDS cluster without an EDS config"); + } + + return std::make_unique(cluster, context.runtime(), socket_factory_context, + std::move(stats_scope), context.addedViaApi()); +} + +/** + * Static registration for the strict dns cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(EdsClusterFactory, ClusterFactory); + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/eds.h b/source/common/upstream/eds.h index 05a2010105231..197d654a57815 100644 --- a/source/common/upstream/eds.h +++ b/source/common/upstream/eds.h @@ -8,8 +8,11 @@ #include "envoy/stats/scope.h" #include "envoy/upstream/locality.h" +#include "common/upstream/cluster_factory_impl.h" #include "common/upstream/upstream_impl.h" +#include "extensions/clusters/well_known_names.h" + namespace Envoy { namespace Upstream { @@ -28,7 +31,12 @@ class EdsClusterImpl : public BaseDynamicClusterImpl, InitializePhase initializePhase() const override { return InitializePhase::Secondary; } // Config::SubscriptionCallbacks + // TODO(fredlas) deduplicate void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).cluster_name(); @@ -68,5 +76,16 @@ class EdsClusterImpl : public BaseDynamicClusterImpl, HostMap all_hosts_; }; +class EdsClusterFactory : public ClusterFactoryImplBase { +public: + EdsClusterFactory() : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().Eds) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/logical_dns_cluster.cc b/source/common/upstream/logical_dns_cluster.cc index b884fea2a0958..6f1c4547180d3 100644 --- a/source/common/upstream/logical_dns_cluster.cc +++ b/source/common/upstream/logical_dns_cluster.cc @@ -151,5 +151,21 @@ Upstream::Host::CreateConnectionData LogicalDnsCluster::LogicalHost::createConne parent_.lbEndpoint(), shared_from_this())}}; } +ClusterImplBaseSharedPtr LogicalDnsClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + auto selected_dns_resolver = selectDnsResolver(cluster, context); + + return std::make_unique(cluster, context.runtime(), selected_dns_resolver, + context.tls(), socket_factory_context, + std::move(stats_scope), context.addedViaApi()); +} + +/** + * Static registration for the strict dns cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(LogicalDnsClusterFactory, ClusterFactory); + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/logical_dns_cluster.h b/source/common/upstream/logical_dns_cluster.h index b75b7858da03b..79e60b9698c56 100644 --- a/source/common/upstream/logical_dns_cluster.h +++ b/source/common/upstream/logical_dns_cluster.h @@ -9,8 +9,11 @@ #include "envoy/thread_local/thread_local.h" #include "common/common/empty_string.h" +#include "common/upstream/cluster_factory_impl.h" #include "common/upstream/upstream_impl.h" +#include "extensions/clusters/well_known_names.h" + namespace Envoy { namespace Upstream { @@ -154,5 +157,17 @@ class LogicalDnsCluster : public ClusterImplBase { const envoy::api::v2::ClusterLoadAssignment load_assignment_; }; +class LogicalDnsClusterFactory : public ClusterFactoryImplBase { +public: + LogicalDnsClusterFactory() + : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().LogicalDns) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index f339491069ebd..15aa74facc998 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -160,16 +160,18 @@ void OriginalDstCluster::cleanup() { // Given the current config, only EDS clusters support multiple priorities. ASSERT(priority_set_.hostSetsPerPriority().size() == 1); const auto& host_set = priority_set_.getOrCreateHostSet(0); - - ENVOY_LOG(debug, "Cleaning up stale original dst hosts."); - for (const HostSharedPtr& host : host_set.hosts()) { - if (host->used()) { - ENVOY_LOG(debug, "Keeping active host {}.", host->address()->asString()); - new_hosts->emplace_back(host); - host->used(false); // Mark to be removed during the next round. - } else { - ENVOY_LOG(debug, "Removing stale host {}.", host->address()->asString()); - to_be_removed.emplace_back(host); + ENVOY_LOG(trace, "Stale original dst hosts cleanup triggered."); + if (!host_set.hosts().empty()) { + ENVOY_LOG(debug, "Cleaning up stale original dst hosts."); + for (const HostSharedPtr& host : host_set.hosts()) { + if (host->used()) { + ENVOY_LOG(debug, "Keeping active host {}.", host->address()->asString()); + new_hosts->emplace_back(host); + host->used(false); // Mark to be removed during the next round. + } else { + ENVOY_LOG(debug, "Removing stale host {}.", host->address()->asString()); + to_be_removed.emplace_back(host); + } } } @@ -182,5 +184,27 @@ void OriginalDstCluster::cleanup() { cleanup_timer_->enableTimer(cleanup_interval_ms_); } +ClusterImplBaseSharedPtr OriginalDstClusterFactory::createClusterImpl( + const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) { + if (cluster.lb_policy() != envoy::api::v2::Cluster::ORIGINAL_DST_LB) { + throw EnvoyException(fmt::format( + "cluster: cluster type 'original_dst' may only be used with LB type 'original_dst_lb'")); + } + if (cluster.has_lb_subset_config() && cluster.lb_subset_config().subset_selectors_size() != 0) { + throw EnvoyException( + fmt::format("cluster: cluster type 'original_dst' may not be used with lb_subset_config")); + } + + return std::make_unique(cluster, context.runtime(), socket_factory_context, + std::move(stats_scope), context.addedViaApi()); +} + +/** + * Static registration for the strict dns cluster factory. @see RegisterFactory. + */ +REGISTER_FACTORY(OriginalDstClusterFactory, ClusterFactory); + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/original_dst_cluster.h b/source/common/upstream/original_dst_cluster.h index b88f18d57be29..a061e8ee18475 100644 --- a/source/common/upstream/original_dst_cluster.h +++ b/source/common/upstream/original_dst_cluster.h @@ -12,8 +12,11 @@ #include "common/common/empty_string.h" #include "common/common/logger.h" +#include "common/upstream/cluster_factory_impl.h" #include "common/upstream/upstream_impl.h" +#include "extensions/clusters/well_known_names.h" + namespace Envoy { namespace Upstream { @@ -116,5 +119,17 @@ class OriginalDstCluster : public ClusterImplBase { Event::TimerPtr cleanup_timer_; }; +class OriginalDstClusterFactory : public ClusterFactoryImplBase { +public: + OriginalDstClusterFactory() + : ClusterFactoryImplBase(Extensions::Clusters::ClusterTypes::get().OriginalDst) {} + +private: + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override; +}; + } // namespace Upstream } // namespace Envoy diff --git a/source/common/upstream/resource_manager_impl.h b/source/common/upstream/resource_manager_impl.h index 35cf4bf87b7ff..3c564d4c8bb07 100644 --- a/source/common/upstream/resource_manager_impl.h +++ b/source/common/upstream/resource_manager_impl.h @@ -44,20 +44,21 @@ class ResourceManagerImpl : public ResourceManager { private: struct ResourceImpl : public Resource { ResourceImpl(uint64_t max, Runtime::Loader& runtime, const std::string& runtime_key, - Stats::Gauge& open_gauge) - : max_(max), runtime_(runtime), runtime_key_(runtime_key), open_gauge_(open_gauge) {} + Stats::BoolIndicator& circuit_breaker_open) + : max_(max), runtime_(runtime), runtime_key_(runtime_key), + circuit_breaker_open_(circuit_breaker_open) {} ~ResourceImpl() { ASSERT(current_ == 0); } // Upstream::Resource bool canCreate() override { return current_ < max(); } void inc() override { current_++; - open_gauge_.set(canCreate() ? 0 : 1); + circuit_breaker_open_.set(!canCreate()); } void dec() override { ASSERT(current_ > 0); current_--; - open_gauge_.set(canCreate() ? 0 : 1); + circuit_breaker_open_.set(!canCreate()); } uint64_t max() override { return runtime_.snapshot().getInteger(runtime_key_, max_); } @@ -67,11 +68,10 @@ class ResourceManagerImpl : public ResourceManager { const std::string runtime_key_; /** - * A gauge to notify the live circuit breaker state. The gauge is set to 0 - * to notify that the circuit breaker is closed, or to 1 to notify that it - * is open. + * The live circuit breaker state: false when the circuit breaker is closed, + * true when open. */ - Stats::Gauge& open_gauge_; + Stats::BoolIndicator& circuit_breaker_open_; }; ResourceImpl connections_; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index ea8f6f6170620..fe2a434286da8 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -607,15 +607,6 @@ ClusterInfoImpl::extensionProtocolOptions(const std::string& name) const { return nullptr; } -namespace { - -Stats::ScopePtr generateStatsScope(const envoy::api::v2::Cluster& config, Stats::Store& stats) { - return stats.createScope(fmt::format( - "cluster.{}.", config.alt_stat_name().empty() ? config.name() : config.alt_stat_name())); -} - -} // namespace - Network::TransportSocketFactoryPtr createTransportSocketFactory( const envoy::api::v2::Cluster& config, Server::Configuration::TransportSocketFactoryContext& factory_context) { @@ -640,93 +631,6 @@ Network::TransportSocketFactoryPtr createTransportSocketFactory( return config_factory.createTransportSocketFactory(*message, factory_context); } -ClusterSharedPtr ClusterImplBase::create( - const envoy::api::v2::Cluster& cluster, ClusterManager& cm, Stats::Store& stats, - ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, - Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, - AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, - Server::Admin& admin, Singleton::Manager& singleton_manager, - Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api) { - std::unique_ptr new_cluster; - - // We make this a shared pointer to deal with the distinct ownership - // scenarios that can exist: in one case, we pass in the "default" - // DNS resolver that is owned by the Server::Instance. In the case - // where 'dns_resolvers' is specified, we have per-cluster DNS - // resolvers that are created here but ownership resides with - // StrictDnsClusterImpl/LogicalDnsCluster. - auto selected_dns_resolver = dns_resolver; - if (!cluster.dns_resolvers().empty()) { - const auto& resolver_addrs = cluster.dns_resolvers(); - std::vector resolvers; - resolvers.reserve(resolver_addrs.size()); - for (const auto& resolver_addr : resolver_addrs) { - resolvers.push_back(Network::Address::resolveProtoAddress(resolver_addr)); - } - selected_dns_resolver = dispatcher.createDnsResolver(resolvers); - } - - auto stats_scope = generateStatsScope(cluster, stats); - Server::Configuration::TransportSocketFactoryContextImpl factory_context( - admin, ssl_context_manager, *stats_scope, cm, local_info, dispatcher, random, stats, - singleton_manager, tls, api); - - switch (cluster.type()) { - case envoy::api::v2::Cluster::STATIC: - new_cluster = std::make_unique(cluster, runtime, factory_context, - std::move(stats_scope), added_via_api); - break; - case envoy::api::v2::Cluster::STRICT_DNS: - new_cluster = std::make_unique(cluster, runtime, selected_dns_resolver, - factory_context, std::move(stats_scope), - added_via_api); - break; - case envoy::api::v2::Cluster::LOGICAL_DNS: - new_cluster = - std::make_unique(cluster, runtime, selected_dns_resolver, tls, - factory_context, std::move(stats_scope), added_via_api); - break; - case envoy::api::v2::Cluster::ORIGINAL_DST: - if (cluster.lb_policy() != envoy::api::v2::Cluster::ORIGINAL_DST_LB) { - throw EnvoyException(fmt::format( - "cluster: cluster type 'original_dst' may only be used with LB type 'original_dst_lb'")); - } - if (cluster.has_lb_subset_config() && cluster.lb_subset_config().subset_selectors_size() != 0) { - throw EnvoyException(fmt::format( - "cluster: cluster type 'original_dst' may not be used with lb_subset_config")); - } - new_cluster = std::make_unique(cluster, runtime, factory_context, - std::move(stats_scope), added_via_api); - break; - case envoy::api::v2::Cluster::EDS: - if (!cluster.has_eds_cluster_config()) { - throw EnvoyException("cannot create an EDS cluster without an EDS config"); - } - - // We map SDS to EDS, since EDS provides backwards compatibility with SDS. - new_cluster = std::make_unique(cluster, runtime, factory_context, - std::move(stats_scope), added_via_api); - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } - - if (!cluster.health_checks().empty()) { - // TODO(htuch): Need to support multiple health checks in v2. - if (cluster.health_checks().size() != 1) { - throw EnvoyException("Multiple health checks not supported"); - } else { - new_cluster->setHealthChecker(HealthCheckerFactory::create( - cluster.health_checks()[0], *new_cluster, runtime, random, dispatcher, log_manager)); - } - } - - new_cluster->setOutlierDetector(Outlier::DetectorImplFactory::createForCluster( - *new_cluster, cluster, dispatcher, runtime, outlier_event_logger)); - return std::move(new_cluster); -} - ClusterImplBase::ClusterImplBase( const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContext& factory_context, @@ -914,7 +818,7 @@ ClusterInfoImpl::ResourceManagers::ResourceManagers(const envoy::api::v2::Cluste ClusterCircuitBreakersStats ClusterInfoImpl::generateCircuitBreakersStats(Stats::Scope& scope, const std::string& stat_prefix) { std::string prefix(fmt::format("circuit_breakers.{}.", stat_prefix)); - return {ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE_PREFIX(scope, prefix))}; + return {ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_BOOL_INDICATOR_PREFIX(scope, prefix))}; } ResourceManagerImplPtr diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 4fa85a239ad66..878f8163c019b 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -599,14 +599,6 @@ createTransportSocketFactory(const envoy::api::v2::Cluster& config, class ClusterImplBase : public Cluster, protected Logger::Loggable { public: - static ClusterSharedPtr - create(const envoy::api::v2::Cluster& cluster, ClusterManager& cm, Stats::Store& stats, - ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, - Ssl::ContextManager& ssl_context_manager, Runtime::Loader& runtime, - Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, - AccessLog::AccessLogManager& log_manager, const LocalInfo::LocalInfo& local_info, - Server::Admin& admin, Singleton::Manager& singleton_manager, - Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api, Api::Api& api); // Upstream::Cluster PrioritySet& prioritySet() override { return priority_set_; } const PrioritySet& prioritySet() const override { return priority_set_; } @@ -688,6 +680,8 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable; + /** * Manages PriorityState of a cluster. PriorityState is a per-priority binding of a set of hosts * with its corresponding locality weight map. This is useful to store priorities/hosts/localities diff --git a/source/docs/flow_control.md b/source/docs/flow_control.md index a0b3a46e768e8..0d9e6172ede05 100644 --- a/source/docs/flow_control.md +++ b/source/docs/flow_control.md @@ -33,7 +33,7 @@ Flow control for the upstream path is much the same. * The upstream `Network::ConnectionImpl::write_buffer_` buffers too much data. It calls `Network::ConnectionCallbacks::onAboveWriteBufferHighWatermark()`. - * The Network::TcpProxy::UpstreamCallbacks receives + * The `Network::TcpProxy::UpstreamCallbacks` receives `onAboveWriteBufferHighWatermark()` and calls `readDisable(true)` on the downstream connection. * When the upstream buffer is drained, it calls diff --git a/source/exe/main_common.h b/source/exe/main_common.h index 2af78ce27c71a..aa1821a57dc40 100644 --- a/source/exe/main_common.h +++ b/source/exe/main_common.h @@ -84,6 +84,8 @@ class MainCommon { public: MainCommon(int argc, const char* const* argv); bool run() { return base_.run(); } + // Only tests have a legitimate need for this today. + Event::Dispatcher& dispatcherForTest() { return base_.server()->dispatcher(); } // Makes an admin-console request by path, calling handler() when complete. // The caller can initiate this from any thread, but it posts the request diff --git a/source/extensions/clusters/BUILD b/source/extensions/clusters/BUILD new file mode 100644 index 0000000000000..7a4780afbdab2 --- /dev/null +++ b/source/extensions/clusters/BUILD @@ -0,0 +1,18 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "well_known_names", + hdrs = ["well_known_names.h"], + deps = [ + "//source/common/config:well_known_names", + "//source/common/singleton:const_singleton", + ], +) diff --git a/source/extensions/clusters/well_known_names.h b/source/extensions/clusters/well_known_names.h new file mode 100644 index 0000000000000..820c612280680 --- /dev/null +++ b/source/extensions/clusters/well_known_names.h @@ -0,0 +1,37 @@ +#pragma once + +#include "common/config/well_known_names.h" + +namespace Envoy { +namespace Extensions { +namespace Clusters { + +/** + * Well-known cluster types, this supersede the service discovery types + */ +class ClusterTypeValues { +public: + // Static clusters (cluster that have a fixed number of hosts with resolved IP addresses). + const std::string Static = "envoy.cluster.static"; + + // Strict DNS (cluster that periodic DNS resolution and updates the host member set if the DNS + // members change). + const std::string StrictDns = "envoy.cluster.strict_dns"; + + // Logical DNS (cluster that creates a single logical host that wraps an async DNS resolver). + const std::string LogicalDns = "envoy.cluster.logical_dns"; + + // Endpoint Discovery Service (dynamic cluster that reads host information from the Endpoint + // Discovery Service). + const std::string Eds = "envoy.cluster.eds"; + + // Original destination (dynamic cluster that automatically adds hosts as needed based on the + // original destination address of the downstream connection). + const std::string OriginalDst = "envoy.cluster.original_dst"; +}; + +using ClusterTypes = ConstSingleton; + +} // namespace Clusters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc index c7703ba29e1f0..0a012b9116015 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.cc @@ -10,8 +10,7 @@ namespace HttpFilters { namespace GrpcHttp1ReverseBridge { Http::FilterFactoryCb Config::createFilterFactoryFromProtoTyped( - const envoy::extensions::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig& - config, + const envoy::config::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig& config, const std::string&, Server::Configuration::FactoryContext&) { return [config](Envoy::Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamFilter( diff --git a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.h b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.h index 803f5660e5051..073fbb5e958a0 100644 --- a/source/extensions/filters/http/grpc_http1_reverse_bridge/config.h +++ b/source/extensions/filters/http/grpc_http1_reverse_bridge/config.h @@ -11,15 +11,13 @@ namespace Extensions { namespace HttpFilters { namespace GrpcHttp1ReverseBridge { -class Config - : public Common::FactoryBase< - envoy::extensions::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig> { +class Config : public Common::FactoryBase< + envoy::config::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig> { public: Config() : FactoryBase(HttpFilterNames::get().GrpcHttp1ReverseBridge) {} Http::FilterFactoryCb createFilterFactoryFromProtoTyped( - const envoy::extensions::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig& - config, + const envoy::config::filter::http::grpc_http1_reverse_bridge::v2alpha1::FilterConfig& config, const std::string& stat_prefix, Envoy::Server::Configuration::FactoryContext& context) override; }; diff --git a/source/extensions/filters/network/common/redis/BUILD b/source/extensions/filters/network/common/redis/BUILD new file mode 100644 index 0000000000000..c8b07c4ceb5bf --- /dev/null +++ b/source/extensions/filters/network/common/redis/BUILD @@ -0,0 +1,64 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_library( + name = "codec_interface", + hdrs = ["codec.h"], + deps = ["//include/envoy/buffer:buffer_interface"], +) + +envoy_cc_library( + name = "codec_lib", + srcs = ["codec_impl.cc"], + hdrs = ["codec_impl.h"], + deps = [ + ":codec_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/common/common:stack_array", + "//source/common/common:utility_lib", + ], +) + +envoy_cc_library( + name = "supported_commands_lib", + hdrs = ["supported_commands.h"], + deps = [ + "//source/common/common:macros", + ], +) + +envoy_cc_library( + name = "client_interface", + hdrs = ["client.h"], + deps = [ + ":codec_lib", + "//include/envoy/upstream:cluster_manager_interface", + ], +) + +envoy_cc_library( + name = "client_lib", + srcs = ["client_impl.cc"], + hdrs = ["client_impl.h"], + deps = [ + ":client_interface", + ":codec_lib", + "//include/envoy/router:router_interface", + "//include/envoy/thread_local:thread_local_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/common/network:filter_lib", + "//source/common/protobuf:utility_lib", + "//source/common/upstream:load_balancer_lib", + "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", + ], +) diff --git a/source/extensions/filters/network/common/redis/client.h b/source/extensions/filters/network/common/redis/client.h new file mode 100644 index 0000000000000..e7dde1a2f0b92 --- /dev/null +++ b/source/extensions/filters/network/common/redis/client.h @@ -0,0 +1,125 @@ +#pragma once + +#include "envoy/upstream/cluster_manager.h" + +#include "extensions/filters/network/common/redis/codec_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { +namespace Client { + +/** + * A handle to an outbound request. + */ +class PoolRequest { +public: + virtual ~PoolRequest() {} + + /** + * Cancel the request. No further request callbacks will be called. + */ + virtual void cancel() PURE; +}; + +/** + * Outbound request callbacks. + */ +class PoolCallbacks { +public: + virtual ~PoolCallbacks() {} + + /** + * Called when a pipelined response is received. + * @param value supplies the response which is now owned by the callee. + */ + virtual void onResponse(RespValuePtr&& value) PURE; + + /** + * Called when a network/protocol error occurs and there is no response. + */ + virtual void onFailure() PURE; +}; + +/** + * A single redis client connection. + */ +class Client : public Event::DeferredDeletable { +public: + virtual ~Client() {} + + /** + * Adds network connection callbacks to the underlying network connection. + */ + virtual void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) PURE; + + /** + * Closes the underlying network connection. + */ + virtual void close() PURE; + + /** + * Make a pipelined request to the remote redis server. + * @param request supplies the RESP request to make. + * @param callbacks supplies the request callbacks. + * @return PoolRequest* a handle to the active request or nullptr if the request could not be made + * for some reason. + */ + virtual PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) PURE; +}; + +typedef std::unique_ptr ClientPtr; + +/** + * Configuration for a redis connection pool. + */ +class Config { +public: + virtual ~Config() {} + + /** + * @return std::chrono::milliseconds the timeout for an individual redis operation. Currently, + * all operations use the same timeout. + */ + virtual std::chrono::milliseconds opTimeout() const PURE; + + /** + * @return bool disable outlier events even if the cluster has it enabled. This is used by the + * healthchecker's connection pool to avoid double counting active healthcheck operations as + * passive healthcheck operations. + */ + virtual bool disableOutlierEvents() const PURE; + + /** + * @return when enabled, a hash tagging function will be used to guarantee that keys with the + * same hash tag will be forwarded to the same upstream. + */ + virtual bool enableHashtagging() const PURE; +}; + +/** + * A factory for individual redis client connections. + */ +class ClientFactory { +public: + virtual ~ClientFactory() {} + + /** + * Create a client given an upstream host. + * @param host supplies the upstream host. + * @param dispatcher supplies the owning thread's dispatcher. + * @param config supplies the connection pool configuration. + * @return ClientPtr a new connection pool client. + */ + virtual ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + const Config& config) PURE; +}; + +} // namespace Client +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/common/redis/client_impl.cc b/source/extensions/filters/network/common/redis/client_impl.cc new file mode 100644 index 0000000000000..a7d778f8d8b30 --- /dev/null +++ b/source/extensions/filters/network/common/redis/client_impl.cc @@ -0,0 +1,191 @@ +#include "extensions/filters/network/common/redis/client_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { +namespace Client { + +ConfigImpl::ConfigImpl( + const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config) + : op_timeout_(PROTOBUF_GET_MS_REQUIRED(config, op_timeout)), + enable_hashtagging_(config.enable_hashtagging()) {} + +ClientPtr ClientImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + EncoderPtr&& encoder, DecoderFactory& decoder_factory, + const Config& config) { + + std::unique_ptr client( + new ClientImpl(host, dispatcher, std::move(encoder), decoder_factory, config)); + client->connection_ = host->createConnection(dispatcher, nullptr, nullptr).connection_; + client->connection_->addConnectionCallbacks(*client); + client->connection_->addReadFilter(Network::ReadFilterSharedPtr{new UpstreamReadFilter(*client)}); + client->connection_->connect(); + client->connection_->noDelay(true); + return std::move(client); +} + +ClientImpl::ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + EncoderPtr&& encoder, DecoderFactory& decoder_factory, const Config& config) + : host_(host), encoder_(std::move(encoder)), decoder_(decoder_factory.create(*this)), + config_(config), + connect_or_op_timer_(dispatcher.createTimer([this]() -> void { onConnectOrOpTimeout(); })) { + host->cluster().stats().upstream_cx_total_.inc(); + host->stats().cx_total_.inc(); + host->cluster().stats().upstream_cx_active_.inc(); + host->stats().cx_active_.inc(); + connect_or_op_timer_->enableTimer(host->cluster().connectTimeout()); +} + +ClientImpl::~ClientImpl() { + ASSERT(pending_requests_.empty()); + ASSERT(connection_->state() == Network::Connection::State::Closed); + host_->cluster().stats().upstream_cx_active_.dec(); + host_->stats().cx_active_.dec(); +} + +void ClientImpl::close() { connection_->close(Network::ConnectionCloseType::NoFlush); } + +PoolRequest* ClientImpl::makeRequest(const RespValue& request, PoolCallbacks& callbacks) { + ASSERT(connection_->state() == Network::Connection::State::Open); + + pending_requests_.emplace_back(*this, callbacks); + encoder_->encode(request, encoder_buffer_); + connection_->write(encoder_buffer_, false); + + // Only boost the op timeout if: + // - We are not already connected. Otherwise, we are governed by the connect timeout and the timer + // will be reset when/if connection occurs. This allows a relatively long connection spin up + // time for example if TLS is being used. + // - This is the first request on the pipeline. Otherwise the timeout would effectively start on + // the last operation. + if (connected_ && pending_requests_.size() == 1) { + connect_or_op_timer_->enableTimer(config_.opTimeout()); + } + + return &pending_requests_.back(); +} + +void ClientImpl::onConnectOrOpTimeout() { + putOutlierEvent(Upstream::Outlier::Result::TIMEOUT); + if (connected_) { + host_->cluster().stats().upstream_rq_timeout_.inc(); + host_->stats().rq_timeout_.inc(); + } else { + host_->cluster().stats().upstream_cx_connect_timeout_.inc(); + host_->stats().cx_connect_fail_.inc(); + } + + connection_->close(Network::ConnectionCloseType::NoFlush); +} + +void ClientImpl::onData(Buffer::Instance& data) { + try { + decoder_->decode(data); + } catch (ProtocolError&) { + putOutlierEvent(Upstream::Outlier::Result::REQUEST_FAILED); + host_->cluster().stats().upstream_cx_protocol_error_.inc(); + host_->stats().rq_error_.inc(); + connection_->close(Network::ConnectionCloseType::NoFlush); + } +} + +void ClientImpl::putOutlierEvent(Upstream::Outlier::Result result) { + if (!config_.disableOutlierEvents()) { + host_->outlierDetector().putResult(result); + } +} + +void ClientImpl::onEvent(Network::ConnectionEvent event) { + if (event == Network::ConnectionEvent::RemoteClose || + event == Network::ConnectionEvent::LocalClose) { + if (!pending_requests_.empty()) { + host_->cluster().stats().upstream_cx_destroy_with_active_rq_.inc(); + if (event == Network::ConnectionEvent::RemoteClose) { + putOutlierEvent(Upstream::Outlier::Result::SERVER_FAILURE); + host_->cluster().stats().upstream_cx_destroy_remote_with_active_rq_.inc(); + } + if (event == Network::ConnectionEvent::LocalClose) { + host_->cluster().stats().upstream_cx_destroy_local_with_active_rq_.inc(); + } + } + + while (!pending_requests_.empty()) { + PendingRequest& request = pending_requests_.front(); + if (!request.canceled_) { + request.callbacks_.onFailure(); + } else { + host_->cluster().stats().upstream_rq_cancelled_.inc(); + } + pending_requests_.pop_front(); + } + + connect_or_op_timer_->disableTimer(); + } else if (event == Network::ConnectionEvent::Connected) { + connected_ = true; + ASSERT(!pending_requests_.empty()); + connect_or_op_timer_->enableTimer(config_.opTimeout()); + } + + if (event == Network::ConnectionEvent::RemoteClose && !connected_) { + host_->cluster().stats().upstream_cx_connect_fail_.inc(); + host_->stats().cx_connect_fail_.inc(); + } +} + +void ClientImpl::onRespValue(RespValuePtr&& value) { + ASSERT(!pending_requests_.empty()); + PendingRequest& request = pending_requests_.front(); + if (!request.canceled_) { + request.callbacks_.onResponse(std::move(value)); + } else { + host_->cluster().stats().upstream_rq_cancelled_.inc(); + } + pending_requests_.pop_front(); + + // If there are no remaining ops in the pipeline we need to disable the timer. + // Otherwise we boost the timer since we are receiving responses and there are more to flush out. + if (pending_requests_.empty()) { + connect_or_op_timer_->disableTimer(); + } else { + connect_or_op_timer_->enableTimer(config_.opTimeout()); + } + + putOutlierEvent(Upstream::Outlier::Result::SUCCESS); +} + +ClientImpl::PendingRequest::PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks) + : parent_(parent), callbacks_(callbacks) { + parent.host_->cluster().stats().upstream_rq_total_.inc(); + parent.host_->stats().rq_total_.inc(); + parent.host_->cluster().stats().upstream_rq_active_.inc(); + parent.host_->stats().rq_active_.inc(); +} + +ClientImpl::PendingRequest::~PendingRequest() { + parent_.host_->cluster().stats().upstream_rq_active_.dec(); + parent_.host_->stats().rq_active_.dec(); +} + +void ClientImpl::PendingRequest::cancel() { + // If we get a cancellation, we just mark the pending request as cancelled, and then we drop + // the response as it comes through. There is no reason to blow away the connection when the + // remote is already responding as fast as possible. + canceled_ = true; +} + +ClientFactoryImpl ClientFactoryImpl::instance_; + +ClientPtr ClientFactoryImpl::create(Upstream::HostConstSharedPtr host, + Event::Dispatcher& dispatcher, const Config& config) { + return ClientImpl::create(host, dispatcher, EncoderPtr{new EncoderImpl()}, decoder_factory_, + config); +} + +} // namespace Client +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/common/redis/client_impl.h b/source/extensions/filters/network/common/redis/client_impl.h new file mode 100644 index 0000000000000..8fa9e817df9df --- /dev/null +++ b/source/extensions/filters/network/common/redis/client_impl.h @@ -0,0 +1,123 @@ +#pragma once + +#include + +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" +#include "envoy/thread_local/thread_local.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/buffer/buffer_impl.h" +#include "common/common/hash.h" +#include "common/network/filter_impl.h" +#include "common/protobuf/utility.h" +#include "common/upstream/load_balancer_impl.h" + +#include "extensions/filters/network/common/redis/client.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { +namespace Client { + +// TODO(mattklein123): Circuit breaking +// TODO(rshriram): Fault injection + +class ConfigImpl : public Config { +public: + ConfigImpl( + const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config); + + bool disableOutlierEvents() const override { return false; } + std::chrono::milliseconds opTimeout() const override { return op_timeout_; } + bool enableHashtagging() const override { return enable_hashtagging_; } + +private: + const std::chrono::milliseconds op_timeout_; + const bool enable_hashtagging_; +}; + +class ClientImpl : public Client, public DecoderCallbacks, public Network::ConnectionCallbacks { +public: + static ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + EncoderPtr&& encoder, DecoderFactory& decoder_factory, + const Config& config); + + ~ClientImpl(); + + // Client + void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) override { + connection_->addConnectionCallbacks(callbacks); + } + void close() override; + PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) override; + +private: + struct UpstreamReadFilter : public Network::ReadFilterBaseImpl { + UpstreamReadFilter(ClientImpl& parent) : parent_(parent) {} + + // Network::ReadFilter + Network::FilterStatus onData(Buffer::Instance& data, bool) override { + parent_.onData(data); + return Network::FilterStatus::Continue; + } + + ClientImpl& parent_; + }; + + struct PendingRequest : public PoolRequest { + PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks); + ~PendingRequest(); + + // PoolRequest + void cancel() override; + + ClientImpl& parent_; + PoolCallbacks& callbacks_; + bool canceled_{}; + }; + + ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, EncoderPtr&& encoder, + DecoderFactory& decoder_factory, const Config& config); + void onConnectOrOpTimeout(); + void onData(Buffer::Instance& data); + void putOutlierEvent(Upstream::Outlier::Result result); + + // DecoderCallbacks + void onRespValue(RespValuePtr&& value) override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override {} + void onBelowWriteBufferLowWatermark() override {} + + Upstream::HostConstSharedPtr host_; + Network::ClientConnectionPtr connection_; + EncoderPtr encoder_; + Buffer::OwnedImpl encoder_buffer_; + DecoderPtr decoder_; + const Config& config_; + std::list pending_requests_; + Event::TimerPtr connect_or_op_timer_; + bool connected_{}; +}; + +class ClientFactoryImpl : public ClientFactory { +public: + // RedisProxy::ConnPool::ClientFactoryImpl + ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, + const Config& config) override; + + static ClientFactoryImpl instance_; + +private: + DecoderFactoryImpl decoder_factory_; +}; + +} // namespace Client +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/codec.h b/source/extensions/filters/network/common/redis/codec.h similarity index 97% rename from source/extensions/filters/network/redis_proxy/codec.h rename to source/extensions/filters/network/common/redis/codec.h index 09a0b025f829a..8774ff454c299 100644 --- a/source/extensions/filters/network/redis_proxy/codec.h +++ b/source/extensions/filters/network/common/redis/codec.h @@ -10,7 +10,8 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace RedisProxy { +namespace Common { +namespace Redis { /** * All RESP types as defined here: https://redis.io/topics/protocol @@ -133,7 +134,8 @@ class ProtocolError : public EnvoyException { ProtocolError(const std::string& error) : EnvoyException(error) {} }; -} // namespace RedisProxy +} // namespace Redis +} // namespace Common } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/codec_impl.cc b/source/extensions/filters/network/common/redis/codec_impl.cc similarity index 98% rename from source/extensions/filters/network/redis_proxy/codec_impl.cc rename to source/extensions/filters/network/common/redis/codec_impl.cc index ecd506a136f44..3f57533d9d437 100644 --- a/source/extensions/filters/network/redis_proxy/codec_impl.cc +++ b/source/extensions/filters/network/common/redis/codec_impl.cc @@ -1,4 +1,4 @@ -#include "extensions/filters/network/redis_proxy/codec_impl.h" +#include "extensions/filters/network/common/redis/codec_impl.h" #include #include @@ -13,7 +13,8 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace RedisProxy { +namespace Common { +namespace Redis { std::string RespValue::toString() const { switch (type_) { @@ -421,7 +422,8 @@ void EncoderImpl::encodeSimpleString(const std::string& string, Buffer::Instance out.add("\r\n", 2); } -} // namespace RedisProxy +} // namespace Redis +} // namespace Common } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/codec_impl.h b/source/extensions/filters/network/common/redis/codec_impl.h similarity index 94% rename from source/extensions/filters/network/redis_proxy/codec_impl.h rename to source/extensions/filters/network/common/redis/codec_impl.h index 0c214baafdcec..678e537883f32 100644 --- a/source/extensions/filters/network/redis_proxy/codec_impl.h +++ b/source/extensions/filters/network/common/redis/codec_impl.h @@ -7,12 +7,13 @@ #include "common/common/logger.h" -#include "extensions/filters/network/redis_proxy/codec.h" +#include "extensions/filters/network/common/redis/codec.h" namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace RedisProxy { +namespace Common { +namespace Redis { /** * Decoder implementation of https://redis.io/topics/protocol @@ -91,7 +92,8 @@ class EncoderImpl : public Encoder { void encodeSimpleString(const std::string& string, Buffer::Instance& out); }; -} // namespace RedisProxy +} // namespace Redis +} // namespace Common } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/filters/network/redis_proxy/supported_commands.h b/source/extensions/filters/network/common/redis/supported_commands.h similarity index 96% rename from source/extensions/filters/network/redis_proxy/supported_commands.h rename to source/extensions/filters/network/common/redis/supported_commands.h index 9561576cfb141..54c06d0bcb025 100644 --- a/source/extensions/filters/network/redis_proxy/supported_commands.h +++ b/source/extensions/filters/network/common/redis/supported_commands.h @@ -8,7 +8,8 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace RedisProxy { +namespace Common { +namespace Redis { struct SupportedCommands { /** @@ -60,7 +61,8 @@ struct SupportedCommands { static const std::string& ping() { CONSTRUCT_ON_FIRST_USE(std::string, "ping"); } }; -} // namespace RedisProxy +} // namespace Redis +} // namespace Common } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 9dbf53387032f..e5069bbf39e69 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -321,11 +321,11 @@ HttpConnectionManagerConfig::createCodec(Network::Connection& connection, Http::ServerConnectionCallbacks& callbacks) { switch (codec_type_) { case CodecType::HTTP1: - return Http::ServerConnectionPtr{ - new Http::Http1::ServerConnectionImpl(connection, callbacks, http1_settings_)}; + return std::make_unique( + connection, callbacks, http1_settings_, maxRequestHeadersKb()); case CodecType::HTTP2: - return Http::ServerConnectionPtr{new Http::Http2::ServerConnectionImpl( - connection, callbacks, context_.scope(), http2_settings_, maxRequestHeadersKb())}; + return std::make_unique( + connection, callbacks, context_.scope(), http2_settings_, maxRequestHeadersKb()); case CodecType::AUTO: return Http::ConnectionManagerUtility::autoCreateCodec(connection, data, callbacks, context_.scope(), http1_settings_, diff --git a/source/extensions/filters/network/redis_proxy/BUILD b/source/extensions/filters/network/redis_proxy/BUILD index 2d3767310c6f2..8cd0a234462e0 100644 --- a/source/extensions/filters/network/redis_proxy/BUILD +++ b/source/extensions/filters/network/redis_proxy/BUILD @@ -12,37 +12,21 @@ load( envoy_package() -envoy_cc_library( - name = "codec_interface", - hdrs = ["codec.h"], - deps = ["//include/envoy/buffer:buffer_interface"], -) - envoy_cc_library( name = "command_splitter_interface", hdrs = ["command_splitter.h"], - deps = [":codec_interface"], + deps = [ + "//source/extensions/filters/network/common/redis:codec_interface", + ], ) envoy_cc_library( name = "conn_pool_interface", hdrs = ["conn_pool.h"], deps = [ - ":codec_interface", "//include/envoy/upstream:cluster_manager_interface", - ], -) - -envoy_cc_library( - name = "codec_lib", - srcs = ["codec_impl.cc"], - hdrs = ["codec_impl.h"], - deps = [ - ":codec_interface", - "//source/common/common:assert_lib", - "//source/common/common:minimal_logger_lib", - "//source/common/common:stack_array", - "//source/common/common:utility_lib", + "//source/extensions/filters/network/common/redis:client_interface", + "//source/extensions/filters/network/common/redis:codec_interface", ], ) @@ -53,13 +37,14 @@ envoy_cc_library( deps = [ ":command_splitter_interface", ":conn_pool_interface", - ":supported_commands_lib", "//include/envoy/stats:stats_macros", "//include/envoy/stats:timespan", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", "//source/common/common:to_lower_table_lib", "//source/common/common:utility_lib", + "//source/extensions/filters/network/common/redis:client_lib", + "//source/extensions/filters/network/common/redis:supported_commands_lib", ], ) @@ -68,7 +53,6 @@ envoy_cc_library( srcs = ["conn_pool_impl.cc"], hdrs = ["conn_pool_impl.h"], deps = [ - ":codec_lib", ":conn_pool_interface", "//include/envoy/router:router_interface", "//include/envoy/thread_local:thread_local_interface", @@ -78,6 +62,7 @@ envoy_cc_library( "//source/common/network:filter_lib", "//source/common/protobuf:utility_lib", "//source/common/upstream:load_balancer_lib", + "//source/extensions/filters/network/common/redis:client_lib", "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", ], ) @@ -87,7 +72,6 @@ envoy_cc_library( srcs = ["proxy_filter.cc"], hdrs = ["proxy_filter.h"], deps = [ - ":codec_interface", ":command_splitter_interface", "//include/envoy/network:drain_decision_interface", "//include/envoy/network:filter_interface", @@ -95,18 +79,11 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/config:utility_lib", + "//source/extensions/filters/network/common/redis:codec_interface", "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", ], ) -envoy_cc_library( - name = "supported_commands_lib", - hdrs = ["supported_commands.h"], - deps = [ - "//source/common/common:macros", - ], -) - envoy_cc_library( name = "config", srcs = ["config.cc"], @@ -116,7 +93,7 @@ envoy_cc_library( "//source/common/config:filter_json_lib", "//source/extensions/filters/network:well_known_names", "//source/extensions/filters/network/common:factory_base_lib", - "//source/extensions/filters/network/redis_proxy:codec_lib", + "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", "//source/extensions/filters/network/redis_proxy:conn_pool_lib", "//source/extensions/filters/network/redis_proxy:proxy_filter_lib", diff --git a/source/extensions/filters/network/redis_proxy/command_splitter.h b/source/extensions/filters/network/redis_proxy/command_splitter.h index 7c1a78289aaa6..6f517c4cb42c6 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter.h @@ -4,7 +4,7 @@ #include "envoy/common/pure.h" -#include "extensions/filters/network/redis_proxy/codec.h" +#include "extensions/filters/network/common/redis/codec.h" namespace Envoy { namespace Extensions { @@ -38,7 +38,7 @@ class SplitCallbacks { * Called when the response is ready. * @param value supplies the response which is now owned by the callee. */ - virtual void onResponse(RespValuePtr&& value) PURE; + virtual void onResponse(Common::Redis::RespValuePtr&& value) PURE; }; /** @@ -57,7 +57,8 @@ class Instance { * been satisfied (via onResponse() being called). The splitter ALWAYS calls * onResponse() for a given request. */ - virtual SplitRequestPtr makeRequest(const RespValue& request, SplitCallbacks& callbacks) PURE; + virtual SplitRequestPtr makeRequest(const Common::Redis::RespValue& request, + SplitCallbacks& callbacks) PURE; }; } // namespace CommandSplitter diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc index 52d6f6beeebb8..763698458d9f6 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.cc @@ -10,7 +10,7 @@ #include "common/common/assert.h" #include "common/common/fmt.h" -#include "extensions/filters/network/redis_proxy/supported_commands.h" +#include "extensions/filters/network/common/redis/supported_commands.h" namespace Envoy { namespace Extensions { @@ -18,15 +18,15 @@ namespace NetworkFilters { namespace RedisProxy { namespace CommandSplitter { -RespValuePtr Utility::makeError(const std::string& error) { - RespValuePtr response(new RespValue()); - response->type(RespType::Error); +Common::Redis::RespValuePtr Utility::makeError(const std::string& error) { + Common::Redis::RespValuePtr response(new Common::Redis::RespValue()); + response->type(Common::Redis::RespType::Error); response->asString() = error; return response; } void SplitRequestBase::onWrongNumberOfArguments(SplitCallbacks& callbacks, - const RespValue& request) { + const Common::Redis::RespValue& request) { callbacks.onResponse(Utility::makeError( fmt::format("wrong number of arguments for '{}' command", request.asArray()[0].asString()))); } @@ -42,7 +42,7 @@ void SplitRequestBase::updateStats(const bool success) { SingleServerRequest::~SingleServerRequest() { ASSERT(!handle_); } -void SingleServerRequest::onResponse(RespValuePtr&& response) { +void SingleServerRequest::onResponse(Common::Redis::RespValuePtr&& response) { handle_ = nullptr; updateStats(true); callbacks_.onResponse(std::move(response)); @@ -60,8 +60,9 @@ void SingleServerRequest::cancel() { } SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) { + const Common::Redis::RespValue& incoming_request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source) { std::unique_ptr request_ptr{ new SimpleRequest(callbacks, command_stats, time_source)}; @@ -76,8 +77,9 @@ SplitRequestPtr SimpleRequest::create(ConnPool::Instance& conn_pool, } SplitRequestPtr EvalRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) { + const Common::Redis::RespValue& incoming_request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source) { // EVAL looks like: EVAL script numkeys key [key ...] arg [arg ...] // Ensure there are at least three args to the command or it cannot be hashed. @@ -121,24 +123,25 @@ void FragmentedRequest::onChildFailure(uint32_t index) { } SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) { + const Common::Redis::RespValue& incoming_request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source) { std::unique_ptr request_ptr{new MGETRequest(callbacks, command_stats, time_source)}; request_ptr->num_pending_responses_ = incoming_request.asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); - request_ptr->pending_response_ = std::make_unique(); - request_ptr->pending_response_->type(RespType::Array); - std::vector responses(request_ptr->num_pending_responses_); + request_ptr->pending_response_ = std::make_unique(); + request_ptr->pending_response_->type(Common::Redis::RespType::Array); + std::vector responses(request_ptr->num_pending_responses_); request_ptr->pending_response_->asArray().swap(responses); - std::vector values(2); - values[0].type(RespType::BulkString); + std::vector values(2); + values[0].type(Common::Redis::RespType::BulkString); values[0].asString() = "get"; - values[1].type(RespType::BulkString); - RespValue single_mget; - single_mget.type(RespType::Array); + values[1].type(Common::Redis::RespType::BulkString); + Common::Redis::RespValue single_mget; + single_mget.type(Common::Redis::RespType::Array); single_mget.asArray().swap(values); for (uint64_t i = 1; i < incoming_request.asArray().size(); i++) { @@ -157,28 +160,28 @@ SplitRequestPtr MGETRequest::create(ConnPool::Instance& conn_pool, return request_ptr->num_pending_responses_ > 0 ? std::move(request_ptr) : nullptr; } -void MGETRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { +void MGETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) { pending_requests_[index].handle_ = nullptr; pending_response_->asArray()[index].type(value->type()); switch (value->type()) { - case RespType::Array: - case RespType::Integer: - case RespType::SimpleString: { - pending_response_->asArray()[index].type(RespType::Error); + case Common::Redis::RespType::Array: + case Common::Redis::RespType::Integer: + case Common::Redis::RespType::SimpleString: { + pending_response_->asArray()[index].type(Common::Redis::RespType::Error); pending_response_->asArray()[index].asString() = Response::get().UpstreamProtocolError; error_count_++; break; } - case RespType::Error: { + case Common::Redis::RespType::Error: { error_count_++; FALLTHRU; } - case RespType::BulkString: { + case Common::Redis::RespType::BulkString: { pending_response_->asArray()[index].asString().swap(value->asString()); break; } - case RespType::Null: + case Common::Redis::RespType::Null: break; } @@ -191,8 +194,9 @@ void MGETRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { } SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, SplitCallbacks& callbacks, - CommandStats& command_stats, TimeSource& time_source) { + const Common::Redis::RespValue& incoming_request, + SplitCallbacks& callbacks, CommandStats& command_stats, + TimeSource& time_source) { if ((incoming_request.asArray().size() - 1) % 2 != 0) { onWrongNumberOfArguments(callbacks, incoming_request); command_stats.error_.inc(); @@ -203,16 +207,16 @@ SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, request_ptr->num_pending_responses_ = (incoming_request.asArray().size() - 1) / 2; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); - request_ptr->pending_response_ = std::make_unique(); - request_ptr->pending_response_->type(RespType::SimpleString); + request_ptr->pending_response_ = std::make_unique(); + request_ptr->pending_response_->type(Common::Redis::RespType::SimpleString); - std::vector values(3); - values[0].type(RespType::BulkString); + std::vector values(3); + values[0].type(Common::Redis::RespType::BulkString); values[0].asString() = "set"; - values[1].type(RespType::BulkString); - values[2].type(RespType::BulkString); - RespValue single_mset; - single_mset.type(RespType::Array); + values[1].type(Common::Redis::RespType::BulkString); + values[2].type(Common::Redis::RespType::BulkString); + Common::Redis::RespValue single_mset; + single_mset.type(Common::Redis::RespType::Array); single_mset.asArray().swap(values); uint64_t fragment_index = 0; @@ -234,11 +238,11 @@ SplitRequestPtr MSETRequest::create(ConnPool::Instance& conn_pool, return request_ptr->num_pending_responses_ > 0 ? std::move(request_ptr) : nullptr; } -void MSETRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { +void MSETRequest::onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) { pending_requests_[index].handle_ = nullptr; switch (value->type()) { - case RespType::SimpleString: { + case Common::Redis::RespType::SimpleString: { if (value->asString() == Response::get().OK) { break; } @@ -264,7 +268,7 @@ void MSETRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { } SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, - const RespValue& incoming_request, + const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) { @@ -274,15 +278,15 @@ SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, request_ptr->num_pending_responses_ = incoming_request.asArray().size() - 1; request_ptr->pending_requests_.reserve(request_ptr->num_pending_responses_); - request_ptr->pending_response_ = std::make_unique(); - request_ptr->pending_response_->type(RespType::Integer); + request_ptr->pending_response_ = std::make_unique(); + request_ptr->pending_response_->type(Common::Redis::RespType::Integer); - std::vector values(2); - values[0].type(RespType::BulkString); + std::vector values(2); + values[0].type(Common::Redis::RespType::BulkString); values[0].asString() = incoming_request.asArray()[0].asString(); - values[1].type(RespType::BulkString); - RespValue single_fragment; - single_fragment.type(RespType::Array); + values[1].type(Common::Redis::RespType::BulkString); + Common::Redis::RespValue single_fragment; + single_fragment.type(Common::Redis::RespType::Array); single_fragment.asArray().swap(values); for (uint64_t i = 1; i < incoming_request.asArray().size(); i++) { @@ -302,11 +306,12 @@ SplitRequestPtr SplitKeysSumResultRequest::create(ConnPool::Instance& conn_pool, return request_ptr->num_pending_responses_ > 0 ? std::move(request_ptr) : nullptr; } -void SplitKeysSumResultRequest::onChildResponse(RespValuePtr&& value, uint32_t index) { +void SplitKeysSumResultRequest::onChildResponse(Common::Redis::RespValuePtr&& value, + uint32_t index) { pending_requests_[index].handle_ = nullptr; switch (value->type()) { - case RespType::Integer: { + case Common::Redis::RespType::Integer: { total_ += value->asInteger(); break; } @@ -336,24 +341,26 @@ InstanceImpl::InstanceImpl(ConnPool::InstancePtr&& conn_pool, Stats::Scope& scop split_keys_sum_result_handler_(*conn_pool_), stats_{ALL_COMMAND_SPLITTER_STATS(POOL_COUNTER_PREFIX(scope, stat_prefix + "splitter."))}, time_source_(time_source) { - for (const std::string& command : SupportedCommands::simpleCommands()) { + for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) { addHandler(scope, stat_prefix, command, simple_command_handler_); } - for (const std::string& command : SupportedCommands::evalCommands()) { + for (const std::string& command : Common::Redis::SupportedCommands::evalCommands()) { addHandler(scope, stat_prefix, command, eval_command_handler_); } - for (const std::string& command : SupportedCommands::hashMultipleSumResultCommands()) { + for (const std::string& command : + Common::Redis::SupportedCommands::hashMultipleSumResultCommands()) { addHandler(scope, stat_prefix, command, split_keys_sum_result_handler_); } - addHandler(scope, stat_prefix, SupportedCommands::mget(), mget_handler_); - addHandler(scope, stat_prefix, SupportedCommands::mset(), mset_handler_); + addHandler(scope, stat_prefix, Common::Redis::SupportedCommands::mget(), mget_handler_); + addHandler(scope, stat_prefix, Common::Redis::SupportedCommands::mset(), mset_handler_); } -SplitRequestPtr InstanceImpl::makeRequest(const RespValue& request, SplitCallbacks& callbacks) { - if (request.type() != RespType::Array) { +SplitRequestPtr InstanceImpl::makeRequest(const Common::Redis::RespValue& request, + SplitCallbacks& callbacks) { + if (request.type() != Common::Redis::RespType::Array) { onInvalidRequest(callbacks); return nullptr; } @@ -361,10 +368,10 @@ SplitRequestPtr InstanceImpl::makeRequest(const RespValue& request, SplitCallbac std::string to_lower_string(request.asArray()[0].asString()); to_lower_table_.toLowerCase(to_lower_string); - if (to_lower_string == SupportedCommands::ping()) { + if (to_lower_string == Common::Redis::SupportedCommands::ping()) { // Respond to PING locally. - RespValuePtr pong(new RespValue()); - pong->type(RespType::SimpleString); + Common::Redis::RespValuePtr pong(new Common::Redis::RespValue()); + pong->type(Common::Redis::RespType::SimpleString); pong->asString() = "PONG"; callbacks.onResponse(std::move(pong)); return nullptr; @@ -376,8 +383,8 @@ SplitRequestPtr InstanceImpl::makeRequest(const RespValue& request, SplitCallbac return nullptr; } - for (const RespValue& value : request.asArray()) { - if (value.type() != RespType::BulkString) { + for (const Common::Redis::RespValue& value : request.asArray()) { + if (value.type() != Common::Redis::RespType::BulkString) { onInvalidRequest(callbacks); return nullptr; } diff --git a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h index 009871bb63e41..e79d9510a2922 100644 --- a/source/extensions/filters/network/redis_proxy/command_splitter_impl.h +++ b/source/extensions/filters/network/redis_proxy/command_splitter_impl.h @@ -14,6 +14,7 @@ #include "common/common/utility.h" #include "common/singleton/const_singleton.h" +#include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" @@ -35,7 +36,7 @@ typedef ConstSingleton Response; class Utility { public: - static RespValuePtr makeError(const std::string& error); + static Common::Redis::RespValuePtr makeError(const std::string& error); }; /** @@ -60,7 +61,7 @@ class CommandHandler { public: virtual ~CommandHandler() {} - virtual SplitRequestPtr startRequest(const RespValue& request, SplitCallbacks& callbacks, + virtual SplitRequestPtr startRequest(const Common::Redis::RespValue& request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) PURE; }; @@ -73,7 +74,7 @@ class CommandHandlerBase { class SplitRequestBase : public SplitRequest { protected: - static void onWrongNumberOfArguments(SplitCallbacks& callbacks, const RespValue& request); + static void onWrongNumberOfArguments(SplitCallbacks& callbacks, const Common::Redis::RespValue& request); void updateStats(const bool success); SplitRequestBase(CommandStats& command_stats, TimeSource& time_source) @@ -87,12 +88,12 @@ class SplitRequestBase : public SplitRequest { /** * SingleServerRequest is a base class for commands that hash to a single backend. */ -class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallbacks { +class SingleServerRequest : public SplitRequestBase, public Common::Redis::Client::PoolCallbacks { public: ~SingleServerRequest(); - // RedisProxy::ConnPool::PoolCallbacks - void onResponse(RespValuePtr&& response) override; + // Common::Redis::Client::PoolCallbacks + void onResponse(Common::Redis::RespValuePtr&& response) override; void onFailure() override; // RedisProxy::CommandSplitter::SplitRequest @@ -104,7 +105,7 @@ class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallba : SplitRequestBase(command_stats, time_source), callbacks_(callbacks) {} SplitCallbacks& callbacks_; - ConnPool::PoolRequest* handle_{}; + Common::Redis::Client::PoolRequest* handle_{}; }; /** @@ -112,7 +113,7 @@ class SingleServerRequest : public SplitRequestBase, public ConnPool::PoolCallba */ class SimpleRequest : public SingleServerRequest { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source); @@ -126,7 +127,7 @@ class SimpleRequest : public SingleServerRequest { */ class EvalRequest : public SingleServerRequest { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source); @@ -151,25 +152,25 @@ class FragmentedRequest : public SplitRequestBase { FragmentedRequest(SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) : SplitRequestBase(command_stats, time_source), callbacks_(callbacks) {} - struct PendingRequest : public ConnPool::PoolCallbacks { + struct PendingRequest : public Common::Redis::Client::PoolCallbacks { PendingRequest(FragmentedRequest& parent, uint32_t index) : parent_(parent), index_(index) {} - // RedisProxy::ConnPool::PoolCallbacks - void onResponse(RespValuePtr&& value) override { + // Common::Redis::Client::PoolCallbacks + void onResponse(Common::Redis::RespValuePtr&& value) override { parent_.onChildResponse(std::move(value), index_); } void onFailure() override { parent_.onChildFailure(index_); } FragmentedRequest& parent_; const uint32_t index_; - ConnPool::PoolRequest* handle_{}; + Common::Redis::Client::PoolRequest* handle_{}; }; - virtual void onChildResponse(RespValuePtr&& value, uint32_t index) PURE; + virtual void onChildResponse(Common::Redis::RespValuePtr&& value, uint32_t index) PURE; void onChildFailure(uint32_t index); SplitCallbacks& callbacks_; - RespValuePtr pending_response_; + Common::Redis::RespValuePtr pending_response_; std::vector pending_requests_; uint32_t num_pending_responses_; uint32_t error_count_{0}; @@ -181,7 +182,7 @@ class FragmentedRequest : public SplitRequestBase { */ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source); @@ -190,7 +191,7 @@ class MGETRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source); @@ -211,7 +212,7 @@ class SplitKeysSumResultRequest : public FragmentedRequest, Logger::Loggable { public: - static SplitRequestPtr create(ConnPool::Instance& conn_pool, const RespValue& incoming_request, + static SplitRequestPtr create(ConnPool::Instance& conn_pool, const Common::Redis::RespValue& incoming_request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source); @@ -232,7 +233,7 @@ class MSETRequest : public FragmentedRequest, Logger::Loggable class CommandHandlerFactory : public CommandHandler, CommandHandlerBase { public: CommandHandlerFactory(ConnPool::Instance& conn_pool) : CommandHandlerBase(conn_pool) {} - SplitRequestPtr startRequest(const RespValue& request, SplitCallbacks& callbacks, + SplitRequestPtr startRequest(const Common::Redis::RespValue& request, SplitCallbacks& callbacks, CommandStats& command_stats, TimeSource& time_source) { return RequestClass::create(conn_pool_, request, callbacks, command_stats, time_source); } @@ -271,7 +272,8 @@ class InstanceImpl : public Instance, Logger::Loggable { const std::string& stat_prefix, TimeSource& time_source); // RedisProxy::CommandSplitter::Instance - SplitRequestPtr makeRequest(const RespValue& request, SplitCallbacks& callbacks) override; + SplitRequestPtr makeRequest(const Common::Redis::RespValue& request, + SplitCallbacks& callbacks) override; private: struct HandlerData { diff --git a/source/extensions/filters/network/redis_proxy/config.cc b/source/extensions/filters/network/redis_proxy/config.cc index fba73107f1bd9..a589c4031bb95 100644 --- a/source/extensions/filters/network/redis_proxy/config.cc +++ b/source/extensions/filters/network/redis_proxy/config.cc @@ -8,7 +8,8 @@ #include "common/config/filter_json.h" -#include "extensions/filters/network/redis_proxy/codec_impl.h" +#include "extensions/filters/network/common/redis/client_impl.h" +#include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" #include "extensions/filters/network/redis_proxy/conn_pool_impl.h" #include "extensions/filters/network/redis_proxy/proxy_filter.h" @@ -28,15 +29,17 @@ Network::FilterFactoryCb RedisProxyFilterConfigFactory::createFilterFactoryFromP ProxyFilterConfigSharedPtr filter_config(std::make_shared( proto_config, context.scope(), context.drainDecision(), context.runtime())); - ConnPool::InstancePtr conn_pool(new ConnPool::InstanceImpl( - filter_config->cluster_name_, context.clusterManager(), - ConnPool::ClientFactoryImpl::instance_, context.threadLocal(), proto_config.settings())); + ConnPool::InstancePtr conn_pool( + new ConnPool::InstanceImpl(filter_config->cluster_name_, context.clusterManager(), + Common::Redis::Client::ClientFactoryImpl::instance_, + context.threadLocal(), proto_config.settings())); std::shared_ptr splitter(new CommandSplitter::InstanceImpl( std::move(conn_pool), context.scope(), filter_config->stat_prefix_, context.timeSource())); return [splitter, filter_config](Network::FilterManager& filter_manager) -> void { - DecoderFactoryImpl factory; + Common::Redis::DecoderFactoryImpl factory; filter_manager.addReadFilter(std::make_shared( - factory, EncoderPtr{new EncoderImpl()}, *splitter, filter_config)); + factory, Common::Redis::EncoderPtr{new Common::Redis::EncoderImpl()}, *splitter, + filter_config)); }; } diff --git a/source/extensions/filters/network/redis_proxy/conn_pool.h b/source/extensions/filters/network/redis_proxy/conn_pool.h index c37a46dc185c6..442219e79b547 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool.h @@ -6,7 +6,8 @@ #include "envoy/upstream/cluster_manager.h" -#include "extensions/filters/network/redis_proxy/codec.h" +#include "extensions/filters/network/common/redis/client.h" +#include "extensions/filters/network/common/redis/codec.h" namespace Envoy { namespace Extensions { @@ -14,112 +15,6 @@ namespace NetworkFilters { namespace RedisProxy { namespace ConnPool { -/** - * A handle to an outbound request. - */ -class PoolRequest { -public: - virtual ~PoolRequest() {} - - /** - * Cancel the request. No further request callbacks will be called. - */ - virtual void cancel() PURE; -}; - -/** - * Outbound request callbacks. - */ -class PoolCallbacks { -public: - virtual ~PoolCallbacks() {} - - /** - * Called when a pipelined response is received. - * @param value supplies the response which is now owned by the callee. - */ - virtual void onResponse(RespValuePtr&& value) PURE; - - /** - * Called when a network/protocol error occurs and there is no response. - */ - virtual void onFailure() PURE; -}; - -/** - * A single redis client connection. - */ -class Client : public Event::DeferredDeletable { -public: - virtual ~Client() {} - - /** - * Adds network connection callbacks to the underlying network connection. - */ - virtual void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) PURE; - - /** - * Closes the underlying network connection. - */ - virtual void close() PURE; - - /** - * Make a pipelined request to the remote redis server. - * @param request supplies the RESP request to make. - * @param callbacks supplies the request callbacks. - * @return PoolRequest* a handle to the active request or nullptr if the request could not be made - * for some reason. - */ - virtual PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) PURE; -}; - -typedef std::unique_ptr ClientPtr; - -/** - * Configuration for a redis connection pool. - */ -class Config { -public: - virtual ~Config() {} - - /** - * @return std::chrono::milliseconds the timeout for an individual redis operation. Currently, - * all operations use the same timeout. - */ - virtual std::chrono::milliseconds opTimeout() const PURE; - - /** - * @return bool disable outlier events even if the cluster has it enabled. This is used by the - * healthchecker's connection pool to avoid double counting active healthcheck operations as - * passive healthcheck operations. - */ - virtual bool disableOutlierEvents() const PURE; - - /** - * @return when enabled, a hash tagging function will be used to guarantee that keys with the - * same hash tag will be forwarded to the same upstream. - */ - virtual bool enableHashtagging() const PURE; -}; - -/** - * A factory for individual redis client connections. - */ -class ClientFactory { -public: - virtual ~ClientFactory() {} - - /** - * Create a client given an upstream host. - * @param host supplies the upstream host. - * @param dispatcher supplies the owning thread's dispatcher. - * @param config supplies the connection pool configuration. - * @return ClientPtr a new connection pool client. - */ - virtual ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - const Config& config) PURE; -}; - /** * A redis connection pool. Wraps M connections to N upstream hosts, consistent hashing, * pipelining, failure handling, etc. @@ -136,8 +31,9 @@ class Instance { * @return PoolRequest* a handle to the active request or nullptr if the request could not be made * for some reason. */ - virtual PoolRequest* makeRequest(const std::string& hash_key, const RespValue& request, - PoolCallbacks& callbacks) PURE; + virtual Common::Redis::Client::PoolRequest* + makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) PURE; }; typedef std::unique_ptr InstancePtr; diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc index 596233af4f55c..edc12ac5c3a88 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc @@ -13,185 +13,9 @@ namespace NetworkFilters { namespace RedisProxy { namespace ConnPool { -ConfigImpl::ConfigImpl( - const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config) - : op_timeout_(PROTOBUF_GET_MS_REQUIRED(config, op_timeout)), - enable_hashtagging_(config.enable_hashtagging()) {} - -ClientPtr ClientImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - EncoderPtr&& encoder, DecoderFactory& decoder_factory, - const Config& config) { - - std::unique_ptr client( - new ClientImpl(host, dispatcher, std::move(encoder), decoder_factory, config)); - client->connection_ = host->createConnection(dispatcher, nullptr, nullptr).connection_; - client->connection_->addConnectionCallbacks(*client); - client->connection_->addReadFilter(Network::ReadFilterSharedPtr{new UpstreamReadFilter(*client)}); - client->connection_->connect(); - client->connection_->noDelay(true); - return std::move(client); -} - -ClientImpl::ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - EncoderPtr&& encoder, DecoderFactory& decoder_factory, const Config& config) - : host_(host), encoder_(std::move(encoder)), decoder_(decoder_factory.create(*this)), - config_(config), - connect_or_op_timer_(dispatcher.createTimer([this]() -> void { onConnectOrOpTimeout(); })) { - host->cluster().stats().upstream_cx_total_.inc(); - host->stats().cx_total_.inc(); - host->cluster().stats().upstream_cx_active_.inc(); - host->stats().cx_active_.inc(); - connect_or_op_timer_->enableTimer(host->cluster().connectTimeout()); -} - -ClientImpl::~ClientImpl() { - ASSERT(pending_requests_.empty()); - ASSERT(connection_->state() == Network::Connection::State::Closed); - host_->cluster().stats().upstream_cx_active_.dec(); - host_->stats().cx_active_.dec(); -} - -void ClientImpl::close() { connection_->close(Network::ConnectionCloseType::NoFlush); } - -PoolRequest* ClientImpl::makeRequest(const RespValue& request, PoolCallbacks& callbacks) { - ASSERT(connection_->state() == Network::Connection::State::Open); - - pending_requests_.emplace_back(*this, callbacks); - encoder_->encode(request, encoder_buffer_); - connection_->write(encoder_buffer_, false); - - // Only boost the op timeout if: - // - We are not already connected. Otherwise, we are governed by the connect timeout and the timer - // will be reset when/if connection occurs. This allows a relatively long connection spin up - // time for example if TLS is being used. - // - This is the first request on the pipeline. Otherwise the timeout would effectively start on - // the last operation. - if (connected_ && pending_requests_.size() == 1) { - connect_or_op_timer_->enableTimer(config_.opTimeout()); - } - - return &pending_requests_.back(); -} - -void ClientImpl::onConnectOrOpTimeout() { - putOutlierEvent(Upstream::Outlier::Result::TIMEOUT); - if (connected_) { - host_->cluster().stats().upstream_rq_timeout_.inc(); - host_->stats().rq_timeout_.inc(); - } else { - host_->cluster().stats().upstream_cx_connect_timeout_.inc(); - host_->stats().cx_connect_fail_.inc(); - } - - connection_->close(Network::ConnectionCloseType::NoFlush); -} - -void ClientImpl::onData(Buffer::Instance& data) { - try { - decoder_->decode(data); - } catch (ProtocolError&) { - putOutlierEvent(Upstream::Outlier::Result::REQUEST_FAILED); - host_->cluster().stats().upstream_cx_protocol_error_.inc(); - host_->stats().rq_error_.inc(); - connection_->close(Network::ConnectionCloseType::NoFlush); - } -} - -void ClientImpl::putOutlierEvent(Upstream::Outlier::Result result) { - if (!config_.disableOutlierEvents()) { - host_->outlierDetector().putResult(result); - } -} - -void ClientImpl::onEvent(Network::ConnectionEvent event) { - if (event == Network::ConnectionEvent::RemoteClose || - event == Network::ConnectionEvent::LocalClose) { - if (!pending_requests_.empty()) { - host_->cluster().stats().upstream_cx_destroy_with_active_rq_.inc(); - if (event == Network::ConnectionEvent::RemoteClose) { - putOutlierEvent(Upstream::Outlier::Result::SERVER_FAILURE); - host_->cluster().stats().upstream_cx_destroy_remote_with_active_rq_.inc(); - } - if (event == Network::ConnectionEvent::LocalClose) { - host_->cluster().stats().upstream_cx_destroy_local_with_active_rq_.inc(); - } - } - - while (!pending_requests_.empty()) { - PendingRequest& request = pending_requests_.front(); - if (!request.canceled_) { - request.callbacks_.onFailure(); - } else { - host_->cluster().stats().upstream_rq_cancelled_.inc(); - } - pending_requests_.pop_front(); - } - - connect_or_op_timer_->disableTimer(); - } else if (event == Network::ConnectionEvent::Connected) { - connected_ = true; - ASSERT(!pending_requests_.empty()); - connect_or_op_timer_->enableTimer(config_.opTimeout()); - } - - if (event == Network::ConnectionEvent::RemoteClose && !connected_) { - host_->cluster().stats().upstream_cx_connect_fail_.inc(); - host_->stats().cx_connect_fail_.inc(); - } -} - -void ClientImpl::onRespValue(RespValuePtr&& value) { - ASSERT(!pending_requests_.empty()); - PendingRequest& request = pending_requests_.front(); - if (!request.canceled_) { - request.callbacks_.onResponse(std::move(value)); - } else { - host_->cluster().stats().upstream_rq_cancelled_.inc(); - } - pending_requests_.pop_front(); - - // If there are no remaining ops in the pipeline we need to disable the timer. - // Otherwise we boost the timer since we are receiving responses and there are more to flush out. - if (pending_requests_.empty()) { - connect_or_op_timer_->disableTimer(); - } else { - connect_or_op_timer_->enableTimer(config_.opTimeout()); - } - - putOutlierEvent(Upstream::Outlier::Result::SUCCESS); -} - -ClientImpl::PendingRequest::PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks) - : parent_(parent), callbacks_(callbacks) { - parent.host_->cluster().stats().upstream_rq_total_.inc(); - parent.host_->stats().rq_total_.inc(); - parent.host_->cluster().stats().upstream_rq_active_.inc(); - parent.host_->stats().rq_active_.inc(); -} - -ClientImpl::PendingRequest::~PendingRequest() { - parent_.host_->cluster().stats().upstream_rq_active_.dec(); - parent_.host_->stats().rq_active_.dec(); -} - -void ClientImpl::PendingRequest::cancel() { - // If we get a cancellation, we just mark the pending request as cancelled, and then we drop - // the response as it comes through. There is no reason to blow away the connection when the - // remote is already responding as fast as possible. - canceled_ = true; -} - -ClientFactoryImpl ClientFactoryImpl::instance_; - -ClientPtr ClientFactoryImpl::create(Upstream::HostConstSharedPtr host, - Event::Dispatcher& dispatcher, const Config& config) { - return ClientImpl::create(host, dispatcher, EncoderPtr{new EncoderImpl()}, decoder_factory_, - config); -} - InstanceImpl::InstanceImpl( - const std::string& cluster_name, Upstream::ClusterManager& cm, ClientFactory& client_factory, - ThreadLocal::SlotAllocator& tls, + const std::string& cluster_name, Upstream::ClusterManager& cm, + Common::Redis::Client::ClientFactory& client_factory, ThreadLocal::SlotAllocator& tls, const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config) : cm_(cm), client_factory_(client_factory), tls_(tls.allocateSlot()), config_(config) { tls_->set([this, cluster_name]( @@ -200,8 +24,9 @@ InstanceImpl::InstanceImpl( }); } -PoolRequest* InstanceImpl::makeRequest(const std::string& key, const RespValue& value, - PoolCallbacks& callbacks) { +Common::Redis::Client::PoolRequest* +InstanceImpl::makeRequest(const std::string& key, const Common::Redis::RespValue& value, + Common::Redis::Client::PoolCallbacks& callbacks) { return tls_->getTyped().makeRequest(key, value, callbacks); } @@ -273,9 +98,10 @@ void InstanceImpl::ThreadLocalPool::onHostsRemoved( } } -PoolRequest* InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, - const RespValue& request, - PoolCallbacks& callbacks) { +Common::Redis::Client::PoolRequest* +InstanceImpl::ThreadLocalPool::makeRequest(const std::string& key, + const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) { if (cluster_ == nullptr) { ASSERT(client_map_.empty()); ASSERT(host_set_member_update_cb_handle_ == nullptr); diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h index ba89f3098a8d1..1dfb363573ab2 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.h +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.h @@ -18,7 +18,8 @@ #include "common/protobuf/utility.h" #include "common/upstream/load_balancer_impl.h" -#include "extensions/filters/network/redis_proxy/codec_impl.h" +#include "extensions/filters/network/common/redis/client_impl.h" +#include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" namespace Envoy { @@ -30,107 +31,17 @@ namespace ConnPool { // TODO(mattklein123): Circuit breaking // TODO(rshriram): Fault injection -class ConfigImpl : public Config { -public: - ConfigImpl( - const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config); - - bool disableOutlierEvents() const override { return false; } - std::chrono::milliseconds opTimeout() const override { return op_timeout_; } - bool enableHashtagging() const override { return enable_hashtagging_; } - -private: - const std::chrono::milliseconds op_timeout_; - const bool enable_hashtagging_; -}; - -class ClientImpl : public Client, public DecoderCallbacks, public Network::ConnectionCallbacks { -public: - static ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - EncoderPtr&& encoder, DecoderFactory& decoder_factory, - const Config& config); - - ~ClientImpl(); - - // RedisProxy::ConnPool::Client - void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) override { - connection_->addConnectionCallbacks(callbacks); - } - void close() override; - PoolRequest* makeRequest(const RespValue& request, PoolCallbacks& callbacks) override; - -private: - struct UpstreamReadFilter : public Network::ReadFilterBaseImpl { - UpstreamReadFilter(ClientImpl& parent) : parent_(parent) {} - - // Network::ReadFilter - Network::FilterStatus onData(Buffer::Instance& data, bool) override { - parent_.onData(data); - return Network::FilterStatus::Continue; - } - - ClientImpl& parent_; - }; - - struct PendingRequest : public PoolRequest { - PendingRequest(ClientImpl& parent, PoolCallbacks& callbacks); - ~PendingRequest(); - - // RedisProxy::ConnPool::PoolRequest - void cancel() override; - - ClientImpl& parent_; - PoolCallbacks& callbacks_; - bool canceled_{}; - }; - - ClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, EncoderPtr&& encoder, - DecoderFactory& decoder_factory, const Config& config); - void onConnectOrOpTimeout(); - void onData(Buffer::Instance& data); - void putOutlierEvent(Upstream::Outlier::Result result); - - // RedisProxy::DecoderCallbacks - void onRespValue(RespValuePtr&& value) override; - - // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override; - void onAboveWriteBufferHighWatermark() override {} - void onBelowWriteBufferLowWatermark() override {} - - Upstream::HostConstSharedPtr host_; - Network::ClientConnectionPtr connection_; - EncoderPtr encoder_; - Buffer::OwnedImpl encoder_buffer_; - DecoderPtr decoder_; - const Config& config_; - std::list pending_requests_; - Event::TimerPtr connect_or_op_timer_; - bool connected_{}; -}; - -class ClientFactoryImpl : public ClientFactory { -public: - // RedisProxy::ConnPool::ClientFactoryImpl - ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, - const Config& config) override; - - static ClientFactoryImpl instance_; - -private: - DecoderFactoryImpl decoder_factory_; -}; - class InstanceImpl : public Instance { public: InstanceImpl( - const std::string& cluster_name, Upstream::ClusterManager& cm, ClientFactory& client_factory, - ThreadLocal::SlotAllocator& tls, + const std::string& cluster_name, Upstream::ClusterManager& cm, + Common::Redis::Client::ClientFactory& client_factory, ThreadLocal::SlotAllocator& tls, const envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings& config); // RedisProxy::ConnPool::Instance - PoolRequest* makeRequest(const std::string& key, const RespValue& request, - PoolCallbacks& callbacks) override; + Common::Redis::Client::PoolRequest* + makeRequest(const std::string& key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks) override; private: struct ThreadLocalPool; @@ -145,7 +56,7 @@ class InstanceImpl : public Instance { ThreadLocalPool& parent_; Upstream::HostConstSharedPtr host_; - ClientPtr redis_client_; + Common::Redis::Client::ClientPtr redis_client_; }; typedef std::unique_ptr ThreadLocalActiveClientPtr; @@ -154,8 +65,9 @@ class InstanceImpl : public Instance { public Upstream::ClusterUpdateCallbacks { ThreadLocalPool(InstanceImpl& parent, Event::Dispatcher& dispatcher, std::string cluster_name); ~ThreadLocalPool(); - PoolRequest* makeRequest(const std::string& key, const RespValue& request, - PoolCallbacks& callbacks); + Common::Redis::Client::PoolRequest* + makeRequest(const std::string& key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks); void onClusterAddOrUpdateNonVirtual(Upstream::ThreadLocalCluster& cluster); void onHostsRemoved(const std::vector& hosts_removed); @@ -186,9 +98,9 @@ class InstanceImpl : public Instance { }; Upstream::ClusterManager& cm_; - ClientFactory& client_factory_; + Common::Redis::Client::ClientFactory& client_factory_; ThreadLocal::SlotPtr tls_; - ConfigImpl config_; + Common::Redis::Client::ConfigImpl config_; }; } // namespace ConnPool diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.cc b/source/extensions/filters/network/redis_proxy/proxy_filter.cc index b676332136ae3..d5fc143e9be09 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.cc +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.cc @@ -26,8 +26,9 @@ ProxyStats ProxyFilterConfig::generateStats(const std::string& prefix, Stats::Sc ALL_REDIS_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix), POOL_GAUGE_PREFIX(scope, prefix))}; } -ProxyFilter::ProxyFilter(DecoderFactory& factory, EncoderPtr&& encoder, - CommandSplitter::Instance& splitter, ProxyFilterConfigSharedPtr config) +ProxyFilter::ProxyFilter(Common::Redis::DecoderFactory& factory, + Common::Redis::EncoderPtr&& encoder, CommandSplitter::Instance& splitter, + ProxyFilterConfigSharedPtr config) : decoder_(factory.create(*this)), encoder_(std::move(encoder)), splitter_(splitter), config_(config) { config_->stats_.downstream_cx_total_.inc(); @@ -49,7 +50,7 @@ void ProxyFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& ca nullptr, nullptr}); } -void ProxyFilter::onRespValue(RespValuePtr&& value) { +void ProxyFilter::onRespValue(Common::Redis::RespValuePtr&& value) { pending_requests_.emplace_back(*this); PendingRequest& request = pending_requests_.back(); CommandSplitter::SplitRequestPtr split = splitter_.makeRequest(*value, request); @@ -72,7 +73,7 @@ void ProxyFilter::onEvent(Network::ConnectionEvent event) { } } -void ProxyFilter::onResponse(PendingRequest& request, RespValuePtr&& value) { +void ProxyFilter::onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value) { ASSERT(!pending_requests_.empty()); request.pending_response_ = std::move(value); request.request_handle_ = nullptr; @@ -100,10 +101,10 @@ Network::FilterStatus ProxyFilter::onData(Buffer::Instance& data, bool) { try { decoder_->decode(data); return Network::FilterStatus::Continue; - } catch (ProtocolError&) { + } catch (Common::Redis::ProtocolError&) { config_->stats_.downstream_cx_protocol_error_.inc(); - RespValue error; - error.type(RespType::Error); + Common::Redis::RespValue error; + error.type(Common::Redis::RespType::Error); error.asString() = "downstream protocol error"; encoder_->encode(error, encoder_buffer_); callbacks_->connection().write(encoder_buffer_, false); diff --git a/source/extensions/filters/network/redis_proxy/proxy_filter.h b/source/extensions/filters/network/redis_proxy/proxy_filter.h index c0b825a85b91f..3f8dc62d6eecd 100644 --- a/source/extensions/filters/network/redis_proxy/proxy_filter.h +++ b/source/extensions/filters/network/redis_proxy/proxy_filter.h @@ -13,7 +13,7 @@ #include "common/buffer/buffer_impl.h" -#include "extensions/filters/network/redis_proxy/codec.h" +#include "extensions/filters/network/common/redis/codec.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" namespace Envoy { @@ -72,11 +72,11 @@ typedef std::shared_ptr ProxyFilterConfigSharedPtr; * multiplex them onto a consistently hashed connection pool of backend servers. */ class ProxyFilter : public Network::ReadFilter, - public DecoderCallbacks, + public Common::Redis::DecoderCallbacks, public Network::ConnectionCallbacks { public: - ProxyFilter(DecoderFactory& factory, EncoderPtr&& encoder, CommandSplitter::Instance& splitter, - ProxyFilterConfigSharedPtr config); + ProxyFilter(Common::Redis::DecoderFactory& factory, Common::Redis::EncoderPtr&& encoder, + CommandSplitter::Instance& splitter, ProxyFilterConfigSharedPtr config); ~ProxyFilter(); // Network::ReadFilter @@ -89,8 +89,8 @@ class ProxyFilter : public Network::ReadFilter, void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} - // RedisProxy::DecoderCallbacks - void onRespValue(RespValuePtr&& value) override; + // Common::Redis::DecoderCallbacks + void onRespValue(Common::Redis::RespValuePtr&& value) override; private: struct PendingRequest : public CommandSplitter::SplitCallbacks { @@ -98,17 +98,19 @@ class ProxyFilter : public Network::ReadFilter, ~PendingRequest(); // RedisProxy::CommandSplitter::SplitCallbacks - void onResponse(RespValuePtr&& value) override { parent_.onResponse(*this, std::move(value)); } + void onResponse(Common::Redis::RespValuePtr&& value) override { + parent_.onResponse(*this, std::move(value)); + } ProxyFilter& parent_; - RespValuePtr pending_response_; + Common::Redis::RespValuePtr pending_response_; CommandSplitter::SplitRequestPtr request_handle_; }; - void onResponse(PendingRequest& request, RespValuePtr&& value); + void onResponse(PendingRequest& request, Common::Redis::RespValuePtr&& value); - DecoderPtr decoder_; - EncoderPtr encoder_; + Common::Redis::DecoderPtr decoder_; + Common::Redis::EncoderPtr encoder_; CommandSplitter::Instance& splitter_; ProxyFilterConfigSharedPtr config_; Buffer::OwnedImpl encoder_buffer_; diff --git a/source/extensions/health_checkers/redis/BUILD b/source/extensions/health_checkers/redis/BUILD index 3148d7c09711f..1c92f366295c3 100644 --- a/source/extensions/health_checkers/redis/BUILD +++ b/source/extensions/health_checkers/redis/BUILD @@ -16,6 +16,7 @@ envoy_cc_library( hdrs = ["redis.h"], deps = [ "//source/common/upstream:health_checker_base_lib", + "//source/extensions/filters/network/common/redis:client_lib", "//source/extensions/filters/network/redis_proxy:conn_pool_lib", "@envoy_api//envoy/api/v2/core:health_check_cc", "@envoy_api//envoy/config/health_checker/redis/v2:redis_cc", diff --git a/source/extensions/health_checkers/redis/config.cc b/source/extensions/health_checkers/redis/config.cc index 7102b0b74f4c0..eeb0206374837 100644 --- a/source/extensions/health_checkers/redis/config.cc +++ b/source/extensions/health_checkers/redis/config.cc @@ -17,7 +17,7 @@ Upstream::HealthCheckerSharedPtr RedisHealthCheckerFactory::createCustomHealthCh return std::make_shared( context.cluster(), config, getRedisHealthCheckConfig(config), context.dispatcher(), context.runtime(), context.random(), context.eventLogger(), - NetworkFilters::RedisProxy::ConnPool::ClientFactoryImpl::instance_); + NetworkFilters::Common::Redis::Client::ClientFactoryImpl::instance_); }; /** diff --git a/source/extensions/health_checkers/redis/redis.cc b/source/extensions/health_checkers/redis/redis.cc index db3ea51b69cef..3b396955f6a1e 100644 --- a/source/extensions/health_checkers/redis/redis.cc +++ b/source/extensions/health_checkers/redis/redis.cc @@ -10,7 +10,7 @@ RedisHealthChecker::RedisHealthChecker( const envoy::config::health_checker::redis::v2::Redis& redis_config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Upstream::HealthCheckEventLoggerPtr&& event_logger, - Extensions::NetworkFilters::RedisProxy::ConnPool::ClientFactory& client_factory) + Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory) : HealthCheckerImplBase(cluster, config, dispatcher, runtime, random, std::move(event_logger)), client_factory_(client_factory), key_(redis_config.key()) { if (!key_.empty()) { @@ -65,12 +65,12 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onInterval() { } void RedisHealthChecker::RedisActiveHealthCheckSession::onResponse( - Extensions::NetworkFilters::RedisProxy::RespValuePtr&& value) { + NetworkFilters::Common::Redis::RespValuePtr&& value) { current_request_ = nullptr; switch (parent_.type_) { case Type::Exists: - if (value->type() == Extensions::NetworkFilters::RedisProxy::RespType::Integer && + if (value->type() == NetworkFilters::Common::Redis::RespType::Integer && value->asInteger() == 0) { handleSuccess(); } else { @@ -78,7 +78,7 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onResponse( } break; case Type::Ping: - if (value->type() == Extensions::NetworkFilters::RedisProxy::RespType::SimpleString && + if (value->type() == NetworkFilters::Common::Redis::RespType::SimpleString && value->asString() == "PONG") { handleSuccess(); } else { @@ -106,20 +106,20 @@ void RedisHealthChecker::RedisActiveHealthCheckSession::onTimeout() { } RedisHealthChecker::HealthCheckRequest::HealthCheckRequest(const std::string& key) { - std::vector values(2); - values[0].type(Extensions::NetworkFilters::RedisProxy::RespType::BulkString); + std::vector values(2); + values[0].type(NetworkFilters::Common::Redis::RespType::BulkString); values[0].asString() = "EXISTS"; - values[1].type(Extensions::NetworkFilters::RedisProxy::RespType::BulkString); + values[1].type(NetworkFilters::Common::Redis::RespType::BulkString); values[1].asString() = key; - request_.type(Extensions::NetworkFilters::RedisProxy::RespType::Array); + request_.type(NetworkFilters::Common::Redis::RespType::Array); request_.asArray().swap(values); } RedisHealthChecker::HealthCheckRequest::HealthCheckRequest() { - std::vector values(1); - values[0].type(Extensions::NetworkFilters::RedisProxy::RespType::BulkString); + std::vector values(1); + values[0].type(NetworkFilters::Common::Redis::RespType::BulkString); values[0].asString() = "PING"; - request_.type(Extensions::NetworkFilters::RedisProxy::RespType::Array); + request_.type(NetworkFilters::Common::Redis::RespType::Array); request_.asArray().swap(values); } diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 5aa702e5665d5..8b287a5a81c3e 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -4,6 +4,7 @@ #include "common/upstream/health_checker_base_impl.h" +#include "extensions/filters/network/common/redis/client_impl.h" #include "extensions/filters/network/redis_proxy/conn_pool_impl.h" namespace Envoy { @@ -21,14 +22,14 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { const envoy::config::health_checker::redis::v2::Redis& redis_config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Upstream::HealthCheckEventLoggerPtr&& event_logger, - Extensions::NetworkFilters::RedisProxy::ConnPool::ClientFactory& client_factory); + Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory); - static const Extensions::NetworkFilters::RedisProxy::RespValue& pingHealthCheckRequest() { + static const NetworkFilters::Common::Redis::RespValue& pingHealthCheckRequest() { static HealthCheckRequest* request = new HealthCheckRequest(); return request->request_; } - static const Extensions::NetworkFilters::RedisProxy::RespValue& + static const NetworkFilters::Common::Redis::RespValue& existsHealthCheckRequest(const std::string& key) { static HealthCheckRequest* request = new HealthCheckRequest(key); return request->request_; @@ -42,8 +43,8 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { private: struct RedisActiveHealthCheckSession : public ActiveHealthCheckSession, - public Extensions::NetworkFilters::RedisProxy::ConnPool::Config, - public Extensions::NetworkFilters::RedisProxy::ConnPool::PoolCallbacks, + public Extensions::NetworkFilters::Common::Redis::Client::Config, + public Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks, public Network::ConnectionCallbacks { RedisActiveHealthCheckSession(RedisHealthChecker& parent, const Upstream::HostSharedPtr& host); ~RedisActiveHealthCheckSession(); @@ -51,7 +52,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { void onInterval() override; void onTimeout() override; - // Extensions::NetworkFilters::RedisProxy::ConnPool::Config + // Extensions::NetworkFilters::Common::Redis::Client::Config bool disableOutlierEvents() const override { return true; } std::chrono::milliseconds opTimeout() const override { // Allow the main Health Check infra to control timeout. @@ -59,8 +60,8 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { } bool enableHashtagging() const override { return false; } - // Extensions::NetworkFilters::RedisProxy::ConnPool::PoolCallbacks - void onResponse(Extensions::NetworkFilters::RedisProxy::RespValuePtr&& value) override; + // Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks + void onResponse(NetworkFilters::Common::Redis::RespValuePtr&& value) override; void onFailure() override; // Network::ConnectionCallbacks @@ -69,8 +70,8 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { void onBelowWriteBufferLowWatermark() override {} RedisHealthChecker& parent_; - Extensions::NetworkFilters::RedisProxy::ConnPool::ClientPtr client_; - Extensions::NetworkFilters::RedisProxy::ConnPool::PoolRequest* current_request_{}; + Extensions::NetworkFilters::Common::Redis::Client::ClientPtr client_; + Extensions::NetworkFilters::Common::Redis::Client::PoolRequest* current_request_{}; }; enum class Type { Ping, Exists }; @@ -79,7 +80,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { HealthCheckRequest(const std::string& key); HealthCheckRequest(); - Extensions::NetworkFilters::RedisProxy::RespValue request_; + NetworkFilters::Common::Redis::RespValue request_; }; typedef std::unique_ptr RedisActiveHealthCheckSessionPtr; @@ -89,7 +90,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { return std::make_unique(*this, host); } - Extensions::NetworkFilters::RedisProxy::ConnPool::ClientFactory& client_factory_; + Extensions::NetworkFilters::Common::Redis::Client::ClientFactory& client_factory_; Type type_; const std::string key_; }; diff --git a/source/extensions/quic_listeners/quiche/platform/BUILD b/source/extensions/quic_listeners/quiche/platform/BUILD index bd710d3cc8014..fd6954d952fbb 100644 --- a/source/extensions/quic_listeners/quiche/platform/BUILD +++ b/source/extensions/quic_listeners/quiche/platform/BUILD @@ -75,6 +75,7 @@ envoy_cc_library( "quic_mock_log_impl.h", "quic_singleton_impl.h", "quic_stack_trace_impl.h", + "quic_thread_impl.h", ]), external_deps = [ "abseil_base", @@ -89,6 +90,7 @@ envoy_cc_library( deps = [ "@com_googlesource_quiche//:quic_platform_export", ] + envoy_select_quiche([ + "//include/envoy/thread:thread_interface", "//source/common/common:assert_lib", "//source/server:backtrace_lib", "//source/common/singleton:threadsafe_singleton", @@ -100,21 +102,32 @@ envoy_cc_library( srcs = [ "quic_cert_utils_impl.cc", "quic_text_utils_impl.cc", - ], + ] + envoy_select_quiche([ + "quic_hostname_utils_impl.cc", + "quic_test_output_impl.cc", + ]), hdrs = [ "quic_cert_utils_impl.h", "quic_mutex_impl.h", "quic_str_cat_impl.h", "quic_string_utils_impl.h", "quic_text_utils_impl.h", - ], + ] + envoy_select_quiche([ + "quic_hostname_utils_impl.h", + "quic_test_output_impl.h", + ]), external_deps = [ "quiche_quic_platform_base", "abseil_str_format", "abseil_synchronization", + "abseil_time", "ssl", ], visibility = ["//visibility:public"], + deps = envoy_select_quiche([ + "//source/common/filesystem:filesystem_lib", + "//source/common/http:utility_lib", + ]), ) envoy_cc_library( diff --git a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc new file mode 100644 index 0000000000000..c38ae408889e9 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.cc @@ -0,0 +1,48 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h" + +#include + +#include "common/http/utility.h" + +#include "absl/strings/ascii.h" +#include "absl/strings/str_cat.h" + +// TODO(wub): Implement both functions on top of GoogleUrl, then enable +// quiche/quic/platform/api/quic_hostname_utils_test.cc. + +namespace quic { + +// static +bool QuicHostnameUtilsImpl::IsValidSNI(QuicStringPiece sni) { + // TODO(wub): Implement it on top of GoogleUrl, once it is available. + + return sni.find_last_of('.') != std::string::npos && + Envoy::Http::Utility::Url().initialize(absl::StrCat("http://", sni)); +} + +// static +QuicString QuicHostnameUtilsImpl::NormalizeHostname(QuicStringPiece hostname) { + // TODO(wub): Implement it on top of GoogleUrl, once it is available. + QuicString host = absl::AsciiStrToLower(hostname); + + // Walk backwards over the string, stopping at the first trailing dot. + size_t host_end = host.length(); + while (host_end != 0 && host[host_end - 1] == '.') { + host_end--; + } + + // Erase the trailing dots. + if (host_end != host.length()) { + host.erase(host_end, host.length() - host_end); + } + + return host; +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h new file mode 100644 index 0000000000000..e288b2333e54e --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_hostname_utils_impl.h @@ -0,0 +1,37 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "quiche/quic/platform/api/quic_export.h" +#include "quiche/quic/platform/api/quic_string.h" +#include "quiche/quic/platform/api/quic_string_piece.h" + +namespace quic { + +class QUIC_EXPORT_PRIVATE QuicHostnameUtilsImpl { +public: + // Returns true if the sni is valid, false otherwise. + // (1) disallow IP addresses; + // (2) check that the hostname contains valid characters only; and + // (3) contains at least one dot. + // NOTE(wub): Only (3) is implemented for now. + static bool IsValidSNI(QuicStringPiece sni); + + // Normalize a hostname: + // (1) Canonicalize it, similar to what Chromium does in + // https://cs.chromium.org/chromium/src/net/base/url_util.h?q=net::CanonicalizeHost + // (2) Convert it to lower case. + // (3) Remove the trailing '.'. + // WARNING: May mutate |hostname| in place. + // NOTE(wub): Only (2) and (3) are implemented for now. + static QuicString NormalizeHostname(QuicStringPiece hostname); + +private: + QuicHostnameUtilsImpl() = delete; +}; + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_test_output_impl.cc b/source/extensions/quic_listeners/quiche/platform/quic_test_output_impl.cc new file mode 100644 index 0000000000000..2ab6a1fa66615 --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_test_output_impl.cc @@ -0,0 +1,73 @@ +// NOLINT(namespace-envoy) + +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "extensions/quic_listeners/quiche/platform/quic_test_output_impl.h" + +#include + +#include "common/filesystem/filesystem_impl.h" + +#include "absl/time/clock.h" +#include "absl/time/time.h" +#include "fmt/printf.h" +#include "gtest/gtest.h" +#include "quiche/quic/platform/api/quic_logging.h" + +namespace quic { +namespace { + +void QuicRecordTestOutputToFile(const std::string& filename, QuicStringPiece data) { + const char* output_dir_env = std::getenv("QUIC_TEST_OUTPUT_DIR"); + if (output_dir_env == nullptr) { + QUIC_LOG(WARNING) << "Could not save test output since QUIC_TEST_OUTPUT_DIR is not set"; + return; + } + + std::string output_dir = output_dir_env; + if (output_dir.empty()) { + QUIC_LOG(WARNING) << "Could not save test output since QUIC_TEST_OUTPUT_DIR is empty"; + return; + } + + if (output_dir.back() != '/') { + output_dir += '/'; + } + + Envoy::Filesystem::InstanceImpl file_system; + if (!file_system.directoryExists(output_dir)) { + QUIC_LOG(ERROR) << "Directory does not exist while writing test output: " << output_dir; + return; + } + + const std::string output_path = output_dir + filename; + Envoy::Filesystem::FilePtr file = file_system.createFile(output_path); + if (!file->open().rc_) { + QUIC_LOG(ERROR) << "Failed to open test output file: " << output_path; + return; + } + + if (file->write(data).rc_ != static_cast(data.size())) { + QUIC_LOG(ERROR) << "Failed to write to test output file: " << output_path; + } else { + QUIC_LOG(INFO) << "Recorded test output into " << output_path; + } + + file->close(); +} +} // namespace + +void QuicRecordTestOutputImpl(QuicStringPiece identifier, QuicStringPiece data) { + const testing::TestInfo* test_info = testing::UnitTest::GetInstance()->current_test_info(); + + std::string timestamp = absl::FormatTime("%Y%m%d%H%M%S", absl::Now(), absl::LocalTimeZone()); + + std::string filename = fmt::sprintf("%s.%s.%s.%s.qtr", test_info->name(), + test_info->test_case_name(), identifier.data(), timestamp); + + QuicRecordTestOutputToFile(filename, data); +} + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_test_output_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_test_output_impl.h new file mode 100644 index 0000000000000..a611ad7af6edb --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_test_output_impl.h @@ -0,0 +1,15 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include "quiche/quic/platform/api/quic_string_piece.h" + +namespace quic { + +void QuicRecordTestOutputImpl(QuicStringPiece identifier, QuicStringPiece data); + +} // namespace quic diff --git a/source/extensions/quic_listeners/quiche/platform/quic_thread_impl.h b/source/extensions/quic_listeners/quiche/platform/quic_thread_impl.h new file mode 100644 index 0000000000000..42e84bb27c7dc --- /dev/null +++ b/source/extensions/quic_listeners/quiche/platform/quic_thread_impl.h @@ -0,0 +1,67 @@ +#pragma once + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +#include + +#include "envoy/thread/thread.h" + +#include "common/common/assert.h" + +#include "absl/synchronization/notification.h" + +namespace quic { + +// A class representing a thread of execution in QUIC. +class QuicThreadImpl { +public: + QuicThreadImpl(const std::string& /*name*/) {} + QuicThreadImpl(const QuicThreadImpl&) = delete; + QuicThreadImpl& operator=(const QuicThreadImpl&) = delete; + + virtual ~QuicThreadImpl() { + if (thread_ != nullptr) { + PANIC("QuicThread should be joined before destruction."); + } + } + + void Start() { + if (thread_ != nullptr || thread_is_set_.HasBeenNotified()) { + PANIC("QuicThread can only be started once."); + } + thread_ = Envoy::Thread::ThreadFactorySingleton::get().createThread([this]() { + thread_is_set_.WaitForNotification(); + this->Run(); + }); + thread_is_set_.Notify(); + } + + void Join() { + if (thread_ == nullptr) { + PANIC("QuicThread has not been started."); + } + thread_->join(); + thread_ = nullptr; + } + +protected: + virtual void Run() { + // We don't want this function to be pure virtual, because it will be called if: + // 1. An object of a derived class calls Start(), which starts the child thread + // but has not called Run() yet. + // 2. The destructor of the derived class is called, but not the destructor + // of this base class. + // 3. The child thread calls QuicThreadImpl::Run()(this function), since the destructor of the + // derived class has been called. + } + +private: + Envoy::Thread::ThreadPtr thread_; + absl::Notification thread_is_set_; // Whether |thread_| is set in parent. +}; + +} // namespace quic diff --git a/source/server/BUILD b/source/server/BUILD index 70d5d13b0a6bf..3943a76618b28 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -151,13 +151,26 @@ envoy_cc_library( envoy_cc_library( name = "options_lib", - srcs = ["options_impl.cc"], - hdrs = ["options_impl.h"], + srcs = ["options_impl.cc"] + select({ + "@bazel_tools//src/conditions:linux_x86_64": ["options_impl_platform_linux.cc"], + "@bazel_tools//src/conditions:linux_aarch64": ["options_impl_platform_linux.cc"], + "//conditions:default": ["options_impl_platform_default.cc"], + }), + hdrs = [ + "options_impl.h", + "options_impl_platform.h", + ] + select({ + "@bazel_tools//src/conditions:linux_x86_64": ["options_impl_platform_linux.h"], + "@bazel_tools//src/conditions:linux_aarch64": ["options_impl_platform_linux.h"], + "//conditions:default": [], + }), external_deps = ["tclap"], deps = [ "//include/envoy/network:address_interface", "//include/envoy/server:options_interface", "//include/envoy/stats:stats_interface", + "//source/common/api:os_sys_calls_lib", + "//source/common/common:logger_lib", "//source/common/common:macros", "//source/common/common:version_lib", "//source/common/protobuf:utility_lib", diff --git a/source/server/lds_api.h b/source/server/lds_api.h index fefea2e171564..713ead3f118a6 100644 --- a/source/server/lds_api.h +++ b/source/server/lds_api.h @@ -34,7 +34,12 @@ class LdsApiImpl : public LdsApi, void initialize(std::function callback) override; // Config::SubscriptionCallbacks + // TODO(fredlas) deduplicate void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override; + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } void onConfigUpdateFailed(const EnvoyException* e) override; std::string resourceName(const ProtobufWkt::Any& resource) override { return MessageUtil::anyConvert(resource).name(); diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 62971fa3e6a35..92e05e3fa3190 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -11,6 +11,8 @@ #include "common/common/version.h" #include "common/protobuf/utility.h" +#include "server/options_impl_platform.h" + #include "absl/strings/str_split.h" #include "spdlog/spdlog.h" #include "tclap/CmdLine.h" @@ -117,6 +119,8 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, "Disable hot restart functionality", cmd, false); TCLAP::SwitchArg enable_mutex_tracing( "", "enable-mutex-tracing", "Enable mutex contention tracing functionality", cmd, false); + TCLAP::SwitchArg cpuset_threads( + "", "cpuset-threads", "Get the default # of worker threads from cpuset size", cmd, false); cmd.setExceptionHandling(false); try { @@ -154,6 +158,8 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, mutex_tracing_enabled_ = enable_mutex_tracing.getValue(); + cpuset_threads_ = cpuset_threads.getValue(); + log_level_ = default_log_level; for (size_t i = 0; i < ARRAY_SIZE(spdlog::level::level_string_views); i++) { if (log_level.getValue() == spdlog::level::level_string_views[i]) { @@ -188,7 +194,20 @@ OptionsImpl::OptionsImpl(int argc, const char* const* argv, // For base ID, scale what the user inputs by 10 so that we have spread for domain sockets. base_id_ = base_id.getValue() * 10; - concurrency_ = std::max(1U, concurrency.getValue()); + + if (!concurrency.isSet() && cpuset_threads_) { + // The 'concurrency' command line option wasn't set but the 'cpuset-threads' + // option was set. Use the number of CPUs assigned to the process cpuset, if + // that can be known. + concurrency_ = OptionsImplPlatform::getCpuCount(); + } else { + if (concurrency.isSet() && cpuset_threads_ && cpuset_threads.isSet()) { + ENVOY_LOG(warn, "Both --concurrency and --cpuset-threads options are set; not applying " + "--cpuset-threads."); + } + concurrency_ = std::max(1U, concurrency.getValue()); + } + config_path_ = config_path.getValue(); config_yaml_ = config_yaml.getValue(); allow_unknown_fields_ = allow_unknown_fields.getValue(); @@ -291,6 +310,7 @@ Server::CommandLineOptionsPtr OptionsImpl::toCommandLineOptions() const { command_line_options->set_max_obj_name_len(statsOptions().maxObjNameLength()); command_line_options->set_disable_hot_restart(hotRestartDisabled()); command_line_options->set_enable_mutex_tracing(mutexTracingEnabled()); + command_line_options->set_cpuset_threads(cpusetThreadsEnabled()); command_line_options->set_restart_epoch(restartEpoch()); return command_line_options; } @@ -303,6 +323,6 @@ OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& service_cluster_(service_cluster), service_node_(service_node), service_zone_(service_zone), file_flush_interval_msec_(10000), drain_time_(600), parent_shutdown_time_(900), mode_(Server::Mode::Serve), max_stats_(ENVOY_DEFAULT_MAX_STATS), hot_restart_disabled_(false), - signal_handling_enabled_(true), mutex_tracing_enabled_(false) {} + signal_handling_enabled_(true), mutex_tracing_enabled_(false), cpuset_threads_(false) {} } // namespace Envoy diff --git a/source/server/options_impl.h b/source/server/options_impl.h index cd0ec6a9d180f..b23faf2c34bbb 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -8,6 +8,7 @@ #include "envoy/server/options.h" #include "envoy/stats/stats_options.h" +#include "common/common/logger.h" #include "common/stats/stats_options_impl.h" #include "spdlog/spdlog.h" @@ -16,7 +17,7 @@ namespace Envoy { /** * Implementation of Server::Options. */ -class OptionsImpl : public Server::Options { +class OptionsImpl : public Server::Options, protected Logger::Loggable { public: /** * Parameters are max_num_stats, max_stat_name_len, hot_restart_enabled @@ -73,6 +74,7 @@ class OptionsImpl : public Server::Options { void setSignalHandling(bool signal_handling_enabled) { signal_handling_enabled_ = signal_handling_enabled; } + void setCpusetThreads(bool cpuset_threads_enabled) { cpuset_threads_ = cpuset_threads_enabled; } // Server::Options uint64_t baseId() const override { return base_id_; } @@ -107,6 +109,7 @@ class OptionsImpl : public Server::Options { bool mutexTracingEnabled() const override { return mutex_tracing_enabled_; } virtual Server::CommandLineOptionsPtr toCommandLineOptions() const override; void parseComponentLogLevels(const std::string& component_log_levels); + bool cpusetThreadsEnabled() const override { return cpuset_threads_; } uint32_t count() const; private: @@ -137,6 +140,7 @@ class OptionsImpl : public Server::Options { bool hot_restart_disabled_; bool signal_handling_enabled_; bool mutex_tracing_enabled_; + bool cpuset_threads_; uint32_t count_; }; diff --git a/source/server/options_impl_platform.h b/source/server/options_impl_platform.h new file mode 100644 index 0000000000000..7d628d3699025 --- /dev/null +++ b/source/server/options_impl_platform.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +#include "common/common/logger.h" + +namespace Envoy { +class OptionsImplPlatform : protected Logger::Loggable { +public: + static uint32_t getCpuCount(); +}; +} // namespace Envoy diff --git a/source/server/options_impl_platform_default.cc b/source/server/options_impl_platform_default.cc new file mode 100644 index 0000000000000..3b4cbbe3118e9 --- /dev/null +++ b/source/server/options_impl_platform_default.cc @@ -0,0 +1,14 @@ +#include + +#include "common/common/logger.h" + +#include "server/options_impl_platform.h" + +namespace Envoy { + +uint32_t OptionsImplPlatform::getCpuCount() { + ENVOY_LOG(warn, "CPU number provided by HW thread count (instead of cpuset)."); + return std::thread::hardware_concurrency(); +} + +} // namespace Envoy diff --git a/source/server/options_impl_platform_linux.cc b/source/server/options_impl_platform_linux.cc new file mode 100644 index 0000000000000..069c68ab83da1 --- /dev/null +++ b/source/server/options_impl_platform_linux.cc @@ -0,0 +1,46 @@ +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include "server/options_impl_platform_linux.h" + +#include + +#include + +#include "common/api/os_sys_calls_impl_linux.h" + +#include "server/options_impl_platform.h" + +namespace Envoy { + +uint32_t OptionsImplPlatformLinux::getCpuAffinityCount(unsigned int hw_threads) { + unsigned int threads = 0; + pid_t pid = getpid(); + cpu_set_t mask; + auto& linux_os_syscalls = Api::LinuxOsSysCallsSingleton::get(); + + CPU_ZERO(&mask); + const Api::SysCallIntResult result = + linux_os_syscalls.sched_getaffinity(pid, sizeof(cpu_set_t), &mask); + if (result.rc_ == -1) { + // Fall back to number of hardware threads. + return hw_threads; + } + + threads = CPU_COUNT(&mask); + + // Sanity check. + if (threads > 0 && threads <= hw_threads) { + return threads; + } + + return hw_threads; +} + +uint32_t OptionsImplPlatform::getCpuCount() { + unsigned int hw_threads = std::max(1U, std::thread::hardware_concurrency()); + return OptionsImplPlatformLinux::getCpuAffinityCount(hw_threads); +} + +} // namespace Envoy diff --git a/source/server/options_impl_platform_linux.h b/source/server/options_impl_platform_linux.h new file mode 100644 index 0000000000000..dfdb0c7efae0b --- /dev/null +++ b/source/server/options_impl_platform_linux.h @@ -0,0 +1,15 @@ +#pragma once + +#if !defined(__linux__) +#error "Linux platform file is part of non-Linux build." +#endif + +#include +#include + +namespace Envoy { +class OptionsImplPlatformLinux { +public: + static uint32_t getCpuAffinityCount(unsigned int hw_threads); +}; +} // namespace Envoy diff --git a/source/server/overload_manager_impl.cc b/source/server/overload_manager_impl.cc index 364f739bfaa1a..6a9401208d001 100644 --- a/source/server/overload_manager_impl.cc +++ b/source/server/overload_manager_impl.cc @@ -41,7 +41,7 @@ std::string StatsName(const std::string& a, const std::string& b) { OverloadAction::OverloadAction(const envoy::config::overload::v2alpha::OverloadAction& config, Stats::Scope& stats_scope) - : active_gauge_(stats_scope.gauge(StatsName(config.name(), "active"))) { + : active_indicator_(stats_scope.boolIndicator(StatsName(config.name(), "active"))) { for (const auto& trigger_config : config.triggers()) { TriggerPtr trigger; @@ -59,7 +59,7 @@ OverloadAction::OverloadAction(const envoy::config::overload::v2alpha::OverloadA } } - active_gauge_.set(0); + active_indicator_.set(false); } bool OverloadAction::updateResourcePressure(const std::string& name, double pressure) { @@ -69,11 +69,11 @@ bool OverloadAction::updateResourcePressure(const std::string& name, double pres ASSERT(it != triggers_.end()); if (it->second->updateValue(pressure)) { if (it->second->isFired()) { - active_gauge_.set(1); + active_indicator_.set(true); const auto result = fired_triggers_.insert(name); ASSERT(result.second); } else { - active_gauge_.set(0); + active_indicator_.set(false); const auto result = fired_triggers_.erase(name); ASSERT(result == 1); } diff --git a/source/server/overload_manager_impl.h b/source/server/overload_manager_impl.h index 9b7293ed231c9..a2519003211ca 100644 --- a/source/server/overload_manager_impl.h +++ b/source/server/overload_manager_impl.h @@ -46,7 +46,7 @@ class OverloadAction { private: std::unordered_map triggers_; std::unordered_set fired_triggers_; - Stats::Gauge& active_gauge_; + Stats::BoolIndicator& active_indicator_; }; class OverloadManagerImpl : Logger::Loggable, public OverloadManager { diff --git a/source/server/server.cc b/source/server/server.cc index 382e260181a56..62e9ae99a902f 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -67,7 +67,6 @@ InstanceImpl::InstanceImpl(const Options& options, Event::TimeSystem& time_syste terminated_(false), mutex_tracer_(options.mutexTracingEnabled() ? &Envoy::MutexTracerImpl::getOrCreateTracer() : nullptr) { - try { if (!options.logPath().empty()) { try { @@ -125,10 +124,7 @@ void InstanceImpl::drainListeners() { drain_manager_->startDrainSequence(nullptr); } -void InstanceImpl::failHealthcheck(bool fail) { - // We keep liveness state in shared memory so the parent process sees the same state. - server_stats_->live_.set(!fail); -} +void InstanceImpl::failHealthcheck(bool fail) { server_stats_->live_.set(!fail); } void InstanceUtil::flushMetricsToSinks(const std::list& sinks, Stats::Source& source) { @@ -168,7 +164,7 @@ void InstanceImpl::getParentStats(HotRestart::GetParentStatsInfo& info) { info.num_connections_ = numConnections(); } -bool InstanceImpl::healthCheckFailed() { return server_stats_->live_.value() == 0; } +bool InstanceImpl::healthCheckFailed() { return !server_stats_->live_.value(); } InstanceUtil::BootstrapVersion InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v2::Bootstrap& bootstrap, @@ -235,7 +231,8 @@ void InstanceImpl::initialize(const Options& options, const std::string server_stats_prefix = "server."; server_stats_ = std::make_unique( - ServerStats{ALL_SERVER_STATS(POOL_COUNTER_PREFIX(stats_store_, server_stats_prefix), + ServerStats{ALL_SERVER_STATS(POOL_BOOL_INDICATOR_PREFIX(stats_store_, server_stats_prefix), + POOL_COUNTER_PREFIX(stats_store_, server_stats_prefix), POOL_GAUGE_PREFIX(stats_store_, server_stats_prefix))}); server_stats_->concurrency_.set(options_.concurrency()); diff --git a/source/server/server.h b/source/server/server.h index 27b02a0722f43..aa026fb65ce13 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -46,22 +46,22 @@ namespace Server { * All server wide stats. @see stats_macros.h */ // clang-format off -#define ALL_SERVER_STATS(COUNTER, GAUGE) \ - GAUGE(uptime) \ +#define ALL_SERVER_STATS(BOOL_INDICATOR, COUNTER, GAUGE) \ + BOOL_INDICATOR(live) \ + COUNTER(debug_assertion_failures) \ GAUGE(concurrency) \ + GAUGE(days_until_first_cert_expiring) \ + GAUGE(hot_restart_epoch) \ GAUGE(memory_allocated) \ GAUGE(memory_heap_size) \ - GAUGE(live) \ GAUGE(parent_connections) \ GAUGE(total_connections) \ - GAUGE(version) \ - GAUGE(days_until_first_cert_expiring) \ - GAUGE(hot_restart_epoch) \ - COUNTER(debug_assertion_failures) + GAUGE(uptime) \ + GAUGE(version) // clang-format on struct ServerStats { - ALL_SERVER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) + ALL_SERVER_STATS(GENERATE_BOOL_INDICATOR_STRUCT, GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; /** diff --git a/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5654939127250944 b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5654939127250944 new file mode 100644 index 0000000000000..6a0f316806248 --- /dev/null +++ b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5654939127250944 @@ -0,0 +1,78 @@ +actions { + add_string: 11927552 +} +actions { + target_index: 4 + add_buffer: 0 +} +actions { + prepend_string: 1869177088 +} +actions { +} +actions { + move { + source_index: 4294967293 + } +} +actions { +} +actions { + linearize: 8388608 +} +actions { + prepend_string: 1869177088 +} +actions { + linearize: 1 +} +actions { + copy_out { + length: 4194304 + } +} +actions { + drain: 1 +} +actions { +} +actions { + add_string: 65534 +} +actions { + target_index: 1769235297 +} +actions { + add_string: 11927552 +} +actions { + target_index: 3053453312 + add_string: 11927552 +} +actions { +} +actions { + target_index: 11927552 + drain: 1 +} +actions { + target_index: 1769235297 +} +actions { + write { + } +} +actions { + target_index: 1769235297 +} +actions { + linearize: 1 +} +actions { + add_buffer_fragment: 1 +} +actions { + copy_out { + length: 4194304 + } +} diff --git a/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5716850116132864 b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5716850116132864 new file mode 100644 index 0000000000000..250ea3ecc9b52 --- /dev/null +++ b/test/common/buffer/buffer_corpus/clusterfuzz-testcase-minimized-buffer_fuzz_test-5716850116132864 @@ -0,0 +1,32 @@ +actions { + add_string: 6 +} +actions { + reserve_commit { + reserve_length: 971 + commit_length: 1 + } +} +actions { + target_index: 1 + add_string: 2 +} +actions { + target_index: 1 + prepend_string: 3 +} +actions { + target_index: 1 + prepend_buffer: 0 +} +actions { + move { + source_index: 1 + } +} +actions { + target_index: 1 + move { + length: 11 + } +} diff --git a/test/common/buffer/buffer_fuzz_test.cc b/test/common/buffer/buffer_fuzz_test.cc index 088395de50409..329f41756a01e 100644 --- a/test/common/buffer/buffer_fuzz_test.cc +++ b/test/common/buffer/buffer_fuzz_test.cc @@ -330,9 +330,9 @@ uint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, Buff FUZZ_ASSERT(rc == 0); } else { FUZZ_ASSERT(rc > 0); - STACK_ARRAY(buf, char, rc); - FUZZ_ASSERT(::read(pipe_fds[0], buf.begin(), rc) == rc); - FUZZ_ASSERT(::memcmp(buf.begin(), previous_data.data(), rc) == 0); + auto buf = std::make_unique(rc); + FUZZ_ASSERT(::read(pipe_fds[0], buf.get(), rc) == rc); + FUZZ_ASSERT(::memcmp(buf.get(), previous_data.data(), rc) == 0); } } while (rc > 0); FUZZ_ASSERT(::close(pipe_fds[0]) == 0); @@ -366,7 +366,8 @@ DEFINE_PROTO_FUZZER(const test::common::buffer::BufferFuzzTestCase& input) { // Soft bound on the available memory for allocation to avoid OOMs and // timeouts. uint32_t available_alloc = 2 * MaxAllocation; - for (int i = 0; i < input.actions().size(); ++i) { + constexpr auto max_actions = 1024; + for (int i = 0; i < std::min(max_actions, input.actions().size()); ++i) { const char insert_value = 'a' + i % 26; const auto& action = input.actions(i); const uint64_t current_allocated_bytes = Memory::Stats::totalCurrentlyAllocated(); diff --git a/test/common/config/config_provider_impl_test.cc b/test/common/config/config_provider_impl_test.cc index 086ab59d98d14..f25f77a0b1dd8 100644 --- a/test/common/config/config_provider_impl_test.cc +++ b/test/common/config/config_provider_impl_test.cc @@ -52,6 +52,7 @@ class DummyConfigSubscription void start() override {} // Envoy::Config::SubscriptionCallbacks + // TODO(fredlas) deduplicate void onConfigUpdate(const ResourceVector& resources, const std::string& version_info) override { const auto& config = resources[0]; if (checkAndApplyConfig(config, "dummy_config", version_info)) { @@ -60,6 +61,10 @@ class DummyConfigSubscription ConfigSubscriptionInstanceBase::onConfigUpdate(); } + void onConfigUpdate(const Protobuf::RepeatedPtrField&, + const Protobuf::RepeatedPtrField&, const std::string&) override { + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } // Envoy::Config::SubscriptionCallbacks void onConfigUpdateFailed(const EnvoyException*) override {} diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index d0b178a123279..2b056dbf5666b 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -108,7 +108,7 @@ TEST_F(GrpcMuxImplTest, MultipleTypeUrlStreams) { expectSendMessage("foo", {"x", "y"}, ""); expectSendMessage("bar", {}, ""); grpc_mux_->start(); - EXPECT_EQ(1, stats_.gauge("control_plane.connected_state").value()); + EXPECT_TRUE(stats_.boolIndicator("control_plane.connected_state").value()); expectSendMessage("bar", {"z"}, ""); auto bar_z_sub = grpc_mux_->subscribe("bar", {"z"}, callbacks_); expectSendMessage("bar", {"zz", "z"}, ""); @@ -146,7 +146,7 @@ TEST_F(GrpcMuxImplTest, ResetStream) { ASSERT_TRUE(timer != nullptr); // initialized from dispatcher mock. EXPECT_CALL(*timer, enableTimer(_)); grpc_mux_->onRemoteClose(Grpc::Status::GrpcStatus::Canceled, ""); - EXPECT_EQ(0, stats_.gauge("control_plane.connected_state").value()); + EXPECT_FALSE(stats_.boolIndicator("control_plane.connected_state").value()); EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); expectSendMessage("foo", {"x", "y"}, ""); expectSendMessage("bar", {}, ""); @@ -269,9 +269,7 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { envoy::api::v2::ClusterLoadAssignment load_assignment; load_assignment.set_cluster_name("x"); response->add_resources()->PackFrom(load_assignment); - EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "1")) - .WillOnce(Invoke([](const Protobuf::RepeatedPtrField& resources, - const std::string&) { EXPECT_TRUE(resources.empty()); })); + EXPECT_CALL(bar_callbacks, onConfigUpdate(_, "1")).Times(0); EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) .WillOnce( Invoke([&load_assignment](const Protobuf::RepeatedPtrField& resources, @@ -329,6 +327,53 @@ TEST_F(GrpcMuxImplTest, WatchDemux) { expectSendMessage(type_url, {}, "2"); } +// Validate behavior when we have multiple watchers that send empty updates. +TEST_F(GrpcMuxImplTest, MultipleWatcherWithEmptyUpdates) { + setup(); + InSequence s; + const std::string& type_url = Config::TypeUrl::get().ClusterLoadAssignment; + NiceMock foo_callbacks; + auto foo_sub = grpc_mux_->subscribe(type_url, {"x", "y"}, foo_callbacks); + + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(type_url, {"x", "y"}, ""); + grpc_mux_->start(); + + std::unique_ptr response( + new envoy::api::v2::DiscoveryResponse()); + response->set_type_url(type_url); + response->set_version_info("1"); + + EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")).Times(0); + expectSendMessage(type_url, {"x", "y"}, "1"); + grpc_mux_->onReceiveMessage(std::move(response)); + + expectSendMessage(type_url, {}, "1"); +} + +// Validate behavior when we have Single Watcher that sends Empty updates. +TEST_F(GrpcMuxImplTest, SingleWatcherWithEmptyUpdates) { + setup(); + const std::string& type_url = Config::TypeUrl::get().Cluster; + NiceMock foo_callbacks; + auto foo_sub = grpc_mux_->subscribe(type_url, {}, foo_callbacks); + + EXPECT_CALL(*async_client_, start(_, _)).WillOnce(Return(&async_stream_)); + expectSendMessage(type_url, {}, ""); + grpc_mux_->start(); + + std::unique_ptr response( + new envoy::api::v2::DiscoveryResponse()); + response->set_type_url(type_url); + response->set_version_info("1"); + // Validate that onConfigUpdate is called with empty resources. + EXPECT_CALL(foo_callbacks, onConfigUpdate(_, "1")) + .WillOnce(Invoke([](const Protobuf::RepeatedPtrField& resources, + const std::string&) { EXPECT_TRUE(resources.empty()); })); + expectSendMessage(type_url, {}, "1"); + grpc_mux_->onReceiveMessage(std::move(response)); +} + // Exactly one test requires a mock time system to provoke behavior that cannot // easily be achieved with a SimulatedTimeSystem. class GrpcMuxImplTestWithMockTimeSystem : public GrpcMuxImplTestBase { diff --git a/test/common/config/subscription_factory_test.cc b/test/common/config/subscription_factory_test.cc index 9c457c5f078dd..81babecf65c48 100644 --- a/test/common/config/subscription_factory_test.cc +++ b/test/common/config/subscription_factory_test.cc @@ -172,9 +172,9 @@ TEST_F(SubscriptionFactoryTest, GrpcClusterMultiton) { EXPECT_CALL(*cluster.info_, addedViaApi()).WillRepeatedly(Return(false)); EXPECT_CALL(*cluster.info_, type()).WillRepeatedly(Return(envoy::api::v2::Cluster::STATIC)); - EXPECT_THROW_WITH_REGEX( - subscriptionFromConfigSource(config), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must have a single gRPC service specified:"); + EXPECT_THROW_WITH_REGEX(subscriptionFromConfigSource(config), EnvoyException, + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must have a " + "single gRPC service specified:"); } TEST_F(SubscriptionFactoryTest, FilesystemSubscription) { diff --git a/test/common/config/subscription_test_harness.h b/test/common/config/subscription_test_harness.h index 0df305a00f581..1b50774a256e0 100644 --- a/test/common/config/subscription_test_harness.h +++ b/test/common/config/subscription_test_harness.h @@ -58,8 +58,8 @@ class SubscriptionTestHarness { EXPECT_EQ(version, stats_.version_.value()); } - virtual void verifyControlPlaneStats(uint32_t connected_state) { - EXPECT_EQ(connected_state, stats_store_.gauge("control_plane.connected_state").value()); + virtual void verifyControlPlaneStats(bool connected_state) { + EXPECT_EQ(connected_state, stats_store_.boolIndicator("control_plane.connected_state").value()); } Stats::IsolatedStoreImpl stats_store_; diff --git a/test/common/config/utility_test.cc b/test/common/config/utility_test.cc index c48eacd759077..9c4ab302f695b 100644 --- a/test/common/config/utility_test.cc +++ b/test/common/config/utility_test.cc @@ -268,7 +268,8 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must have a single gRPC service specified:"); + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must have a single gRPC service " + "specified:"); } { @@ -279,7 +280,8 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must not have a cluster name specified:"); + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must not have a cluster name " + "specified:"); } { @@ -290,7 +292,8 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must not have a cluster name specified:"); + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must not have a cluster name " + "specified:"); } { @@ -301,7 +304,7 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), EnvoyException, - "envoy::api::v2::core::ConfigSource, if not of type gRPC, must not have a gRPC service " + "envoy::api::v2::core::ConfigSource, if not a gRPC type, must not have a gRPC service " "specified:"); } @@ -311,7 +314,7 @@ TEST(UtilityTest, FactoryForGrpcApiConfigSource) { api_config_source.add_cluster_names("foo"); EXPECT_THROW_WITH_REGEX( Utility::factoryForGrpcApiConfigSource(async_client_manager, api_config_source, scope), - EnvoyException, "envoy::api::v2::core::ConfigSource type must be GRPC:"); + EnvoyException, "envoy::api::v2::core::ConfigSource type must be gRPC:"); } { @@ -388,7 +391,8 @@ TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, GrpcClusterTestAcrossTy EXPECT_THROW_WITH_REGEX( Utility::checkApiConfigSourceSubscriptionBackingCluster(cluster_map, *api_config_source), EnvoyException, - "envoy::api::v2::core::ConfigSource::GRPC must not have a cluster name specified:"); + "envoy::api::v2::core::ConfigSource::.DELTA_.GRPC must not have a cluster name " + "specified:"); } TEST(CheckApiConfigSourceSubscriptionBackingClusterTest, RestClusterTestAcrossTypes) { diff --git a/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5720162173452288 b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5720162173452288 new file mode 100644 index 0000000000000..cdd0aad714ba7 --- /dev/null +++ b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5720162173452288 @@ -0,0 +1 @@ +actions { new_stream { request_headers { headers { key: ":path" } headers { key: ":method" } headers { key: "transfer-encodinG\0 " } } } } diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index ca23aa5060c50..f0c4b6ccfbac9 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -37,23 +37,7 @@ namespace Http { constexpr bool DebugMode = false; Http::TestHeaderMapImpl fromSanitizedHeaders(const test::fuzz::Headers& headers) { - // When we are injecting headers, we don't allow the key to ever be empty, - // since calling code is not supposed to do this. Also disallowed - // transfer-encoding. - test::fuzz::Headers sanitized_headers; - for (const auto& header : headers.headers()) { - const std::string key = StringUtil::toLower(header.key()); - - if (key == "transfer-encoding") { - continue; - } - - auto* sane_header = sanitized_headers.add_headers(); - sane_header->set_key(key.empty() ? "non-empty" : key); - sane_header->set_value(header.value()); - } - - return Fuzz::fromHeaders(sanitized_headers); + return Fuzz::fromHeaders(headers, {"transfer-encoding"}); } // Convert from test proto Http1ServerSettings to Http1Settings. @@ -385,8 +369,8 @@ void codecFuzz(const test::common::http::CodecImplFuzzTestCase& input, HttpVersi max_request_headers_kb); } else { const Http1Settings server_http1settings{fromHttp1Settings(input.h1_settings().server())}; - server = absl::make_unique(server_connection, server_callbacks, - server_http1settings); + server = absl::make_unique( + server_connection, server_callbacks, server_http1settings, max_request_headers_kb); } ReorderBuffer client_write_buf{*server}; diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 6a2f9c20ab11d..716ceff9a8ca4 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -31,7 +31,8 @@ namespace Http1 { class Http1ServerConnectionImplTest : public testing::Test { public: void initialize() { - codec_ = std::make_unique(connection_, callbacks_, codec_settings_); + codec_ = std::make_unique(connection_, callbacks_, codec_settings_, + max_request_headers_kb_); } NiceMock connection_; @@ -42,6 +43,9 @@ class Http1ServerConnectionImplTest : public testing::Test { void expectHeadersTest(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer, TestHeaderMapImpl& expected_headers); void expect400(Protocol p, bool allow_absolute_url, Buffer::OwnedImpl& buffer); + +protected: + uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; }; void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_url, @@ -53,7 +57,8 @@ void Http1ServerConnectionImplTest::expect400(Protocol p, bool allow_absolute_ur if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = std::make_unique(connection_, callbacks_, codec_settings_); + codec_ = std::make_unique(connection_, callbacks_, codec_settings_, + max_request_headers_kb_); } Http::MockStreamDecoder decoder; @@ -72,7 +77,8 @@ void Http1ServerConnectionImplTest::expectHeadersTest(Protocol p, bool allow_abs // Make a new 'codec' with the right settings if (allow_absolute_url) { codec_settings_.allow_absolute_url_ = allow_absolute_url; - codec_ = std::make_unique(connection_, callbacks_, codec_settings_); + codec_ = std::make_unique(connection_, callbacks_, codec_settings_, + max_request_headers_kb_); } Http::MockStreamDecoder decoder; @@ -1006,9 +1012,8 @@ TEST_F(Http1ClientConnectionImplTest, HighwatermarkMultipleResponses) { static_cast(codec_.get()) ->onUnderlyingConnectionBelowWriteBufferLowWatermark(); } - -// For issue #1421 regression test that Envoy's HTTP parser applies header limits early. -TEST_F(Http1ServerConnectionImplTest, TestCodecHeaderLimits) { +TEST_F(Http1ServerConnectionImplTest, TestLargeRequestHeadersRejected) { + // Default limit of 60 KiB initialize(); std::string exception_reason; @@ -1022,14 +1027,102 @@ TEST_F(Http1ServerConnectionImplTest, TestCodecHeaderLimits) { Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); codec_->dispatch(buffer); - std::string long_string = "foo: " + std::string(1024, 'q') + "\r\n"; - for (int i = 0; i < 79; ++i) { - buffer = Buffer::OwnedImpl(long_string); + std::string long_string = "big: " + std::string(60 * 1024, 'q') + "\r\n"; + buffer = Buffer::OwnedImpl(long_string); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); +} + +TEST_F(Http1ServerConnectionImplTest, TestLargeRequestHeadersSplitRejected) { + // Default limit of 60 KiB + initialize(); + + std::string exception_reason; + NiceMock decoder; + Http::StreamEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamEncoder& encoder, bool) -> Http::StreamDecoder& { + response_encoder = &encoder; + return decoder; + })); + Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); + codec_->dispatch(buffer); + + std::string long_string = std::string(1024, 'q'); + for (int i = 0; i < 59; i++) { + buffer = Buffer::OwnedImpl(fmt::format("big: {}\r\n", long_string)); codec_->dispatch(buffer); } + // the 60th 1kb header should induce overflow + buffer = Buffer::OwnedImpl(fmt::format("big: {}\r\n", long_string)); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); +} + +TEST_F(Http1ServerConnectionImplTest, TestLargeRequestHeadersAccepted) { + max_request_headers_kb_ = 65; + initialize(); + + NiceMock decoder; + Http::StreamEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamEncoder& encoder, bool) -> Http::StreamDecoder& { + response_encoder = &encoder; + return decoder; + })); + + Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); + codec_->dispatch(buffer); + std::string long_string = "big: " + std::string(64 * 1024, 'q') + "\r\n"; + buffer = Buffer::OwnedImpl(long_string); + codec_->dispatch(buffer); +} + +TEST_F(Http1ServerConnectionImplTest, TestLargeRequestHeadersAcceptedMaxConfigurable) { + max_request_headers_kb_ = 96; + initialize(); + + NiceMock decoder; + Http::StreamEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamEncoder& encoder, bool) -> Http::StreamDecoder& { + response_encoder = &encoder; + return decoder; + })); + + Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n"); + codec_->dispatch(buffer); + std::string long_string = "big: " + std::string(95 * 1024, 'q') + "\r\n"; buffer = Buffer::OwnedImpl(long_string); - EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, - "http/1.1 protocol error: HPE_HEADER_OVERFLOW"); + codec_->dispatch(buffer); +} + +TEST_F(Http1ClientConnectionImplTest, TestLargeResponseHeadersRejected) { + initialize(); + + NiceMock response_decoder; + Http::StreamEncoder& request_encoder = codec_->newStream(response_decoder); + TestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); + codec_->dispatch(buffer); + std::string long_header = "big: " + std::string(80 * 1024, 'q') + "\r\n"; + buffer = Buffer::OwnedImpl(long_header); + EXPECT_THROW_WITH_MESSAGE(codec_->dispatch(buffer), EnvoyException, "headers size exceeds limit"); +} + +TEST_F(Http1ClientConnectionImplTest, TestLargeResponseHeadersAccepted) { + initialize(); + + NiceMock response_decoder; + Http::StreamEncoder& request_encoder = codec_->newStream(response_decoder); + TestHeaderMapImpl headers{{":method", "GET"}, {":path", "/"}, {":authority", "host"}}; + request_encoder.encodeHeaders(headers, true); + + Buffer::OwnedImpl buffer("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n"); + codec_->dispatch(buffer); + std::string long_header = "big: " + std::string(79 * 1024, 'q') + "\r\n"; + buffer = Buffer::OwnedImpl(long_header); + codec_->dispatch(buffer); } } // namespace Http1 diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index cbddc096fcae6..ed06a4734b1bd 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -295,7 +295,7 @@ TEST_F(Http1ConnPoolImplTest, MultipleRequestAndResponse) { TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { cluster_->resetResourceManager(1, 1, 1024, 1); - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); + EXPECT_FALSE(cluster_->circuit_breakers_stats_.rq_pending_open_.value()); NiceMock outer_decoder; ConnPoolCallbacks callbacks; @@ -309,7 +309,7 @@ TEST_F(Http1ConnPoolImplTest, MaxPendingRequests) { Http::ConnectionPool::Cancellable* handle2 = conn_pool_.newStream(outer_decoder2, callbacks2); EXPECT_EQ(nullptr, handle2); - EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.rq_pending_open_.value()); + EXPECT_TRUE(cluster_->circuit_breakers_stats_.rq_pending_open_.value()); handle->cancel(); @@ -436,7 +436,7 @@ TEST_F(Http1ConnPoolImplTest, DisconnectWhileBound) { TEST_F(Http1ConnPoolImplTest, MaxConnections) { InSequence s; - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.cx_open_.value()); + EXPECT_FALSE(cluster_->circuit_breakers_stats_.cx_open_.value()); // Request 1 should kick off a new connection. NiceMock outer_decoder1; @@ -451,7 +451,7 @@ TEST_F(Http1ConnPoolImplTest, MaxConnections) { ConnPoolCallbacks callbacks2; handle = conn_pool_.newStream(outer_decoder2, callbacks2); EXPECT_EQ(1U, cluster_->stats_.upstream_cx_overflow_.value()); - EXPECT_EQ(1U, cluster_->circuit_breakers_stats_.cx_open_.value()); + EXPECT_TRUE(cluster_->circuit_breakers_stats_.cx_open_.value()); EXPECT_NE(nullptr, handle); diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 49fc32a6938ee..4cc7579ae629a 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -66,7 +66,7 @@ class Http2CodecImplTest : public testing::TestWithParam setupDefaultConnectionMocks(); EXPECT_CALL(server_callbacks_, newStream(_, _)) - .WillOnce(Invoke([&](StreamEncoder& encoder, bool) -> StreamDecoder& { + .WillRepeatedly(Invoke([&](StreamEncoder& encoder, bool) -> StreamDecoder& { response_encoder_ = &encoder; encoder.getStream().addCallbacks(server_stream_callbacks_); return request_decoder_; @@ -865,7 +865,7 @@ TEST(Http2CodecUtility, reconstituteCrumbledCookies) { } } -TEST_P(Http2CodecImplTest, TestLargeHeadersInvokeResetStream) { +TEST_P(Http2CodecImplTest, TestLargeRequestHeadersInvokeResetStream) { initialize(); TestHeaderMapImpl request_headers; @@ -876,7 +876,7 @@ TEST_P(Http2CodecImplTest, TestLargeHeadersInvokeResetStream) { request_encoder_->encodeHeaders(request_headers, false); } -TEST_P(Http2CodecImplTest, TestLargeHeadersAcceptedIfConfigured) { +TEST_P(Http2CodecImplTest, TestLargeRequestHeadersAccepted) { max_request_headers_kb_ = 64; initialize(); @@ -890,7 +890,7 @@ TEST_P(Http2CodecImplTest, TestLargeHeadersAcceptedIfConfigured) { request_encoder_->encodeHeaders(request_headers, false); } -TEST_P(Http2CodecImplTest, TestLargeHeadersAtLimitAccepted) { +TEST_P(Http2CodecImplTest, TestLargeRequestHeadersAtLimitAccepted) { uint32_t codec_limit_kb = 64; max_request_headers_kb_ = codec_limit_kb; initialize(); @@ -913,6 +913,74 @@ TEST_P(Http2CodecImplTest, TestLargeHeadersAtLimitAccepted) { request_encoder_->encodeHeaders(request_headers, true); } +TEST_P(Http2CodecImplTest, TestLargeRequestHeadersOverDefaultCodecLibraryLimit) { + max_request_headers_kb_ = 66; + initialize(); + + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(65 * 1024, 'q'); + request_headers.addCopy("big", long_string); + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_)).Times(0); + request_encoder_->encodeHeaders(request_headers, true); +} + +TEST_P(Http2CodecImplTest, TestLargeRequestHeadersExceedPerHeaderLimit) { + // The name-value pair max is set by NGHTTP2_HD_MAX_NV in lib/nghttp2_hd.h to 64KB, and + // creates a per-request header limit for us in h2. Note that the nghttp2 + // calculated byte size will differ from envoy due to H2 compression and frames. + + max_request_headers_kb_ = 81; + initialize(); + + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(80 * 1024, 'q'); + request_headers.addCopy("big", long_string); + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(0); + EXPECT_CALL(client_callbacks_, onGoAway()); + server_->shutdownNotice(); + server_->goAway(); + request_encoder_->encodeHeaders(request_headers, true); +} + +TEST_P(Http2CodecImplTest, TestManyLargeRequestHeadersUnderPerHeaderLimit) { + max_request_headers_kb_ = 81; + initialize(); + + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(1024, 'q'); + for (int i = 0; i < 80; i++) { + request_headers.addCopy(fmt::format("{}", i), long_string); + } + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_)).Times(0); + request_encoder_->encodeHeaders(request_headers, true); +} + +TEST_P(Http2CodecImplTest, TestLargeRequestHeadersAtMaxConfigurable) { + // Raising the limit past this triggers some unexpected nghttp2 error. + // Further debugging required to increase past ~96 KiB. + max_request_headers_kb_ = 96; + initialize(); + + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + std::string long_string = std::string(1024, 'q'); + for (int i = 0; i < 95; i++) { + request_headers.addCopy(fmt::format("{}", i), long_string); + } + + EXPECT_CALL(request_decoder_, decodeHeaders_(_, _)).Times(1); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_)).Times(0); + request_encoder_->encodeHeaders(request_headers, true); +} + TEST_P(Http2CodecImplTest, TestCodecHeaderCompression) { initialize(); diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 75327b04e9684..95917de1f1bad 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -440,7 +440,7 @@ TEST_F(Http2ConnPoolImplTest, LocalReset) { EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_tx_reset_.value()); - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + EXPECT_FALSE(cluster_->circuit_breakers_stats_.rq_open_.value()); } TEST_F(Http2ConnPoolImplTest, RemoteReset) { @@ -581,7 +581,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { TEST_F(Http2ConnPoolImplTest, ConnectTimeout) { InSequence s; - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + EXPECT_FALSE(cluster_->circuit_breakers_stats_.rq_open_.value()); expectClientCreate(); ActiveTestRequest r1(*this, 0, false); @@ -591,7 +591,7 @@ TEST_F(Http2ConnPoolImplTest, ConnectTimeout) { EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); - EXPECT_EQ(0U, cluster_->circuit_breakers_stats_.rq_open_.value()); + EXPECT_FALSE(cluster_->circuit_breakers_stats_.rq_open_.value()); expectClientCreate(); ActiveTestRequest r2(*this, 1, false); diff --git a/test/common/router/retry_state_impl_test.cc b/test/common/router/retry_state_impl_test.cc index b126f70cca3f1..309b6592dca69 100644 --- a/test/common/router/retry_state_impl_test.cc +++ b/test/common/router/retry_state_impl_test.cc @@ -459,7 +459,7 @@ TEST_F(RouterRetryStateImplTest, MaxRetriesHeader) { EXPECT_CALL(callback_ready_, ready()); retry_timer_->callback_(); - EXPECT_EQ(1UL, cluster_.circuit_breakers_stats_.rq_retry_open_.value()); + EXPECT_TRUE(cluster_.circuit_breakers_stats_.rq_retry_open_.value()); EXPECT_EQ(RetryStatus::NoRetryLimitExceeded, state_->shouldRetryReset(connect_failure_, callback_)); @@ -499,7 +499,7 @@ TEST_F(RouterRetryStateImplTest, Backoff) { EXPECT_EQ(3UL, cluster_.stats().upstream_rq_retry_.value()); EXPECT_EQ(1UL, cluster_.stats().upstream_rq_retry_success_.value()); - EXPECT_EQ(0UL, cluster_.circuit_breakers_stats_.rq_retry_open_.value()); + EXPECT_FALSE(cluster_.circuit_breakers_stats_.rq_retry_open_.value()); } TEST_F(RouterRetryStateImplTest, HostSelectionAttempts) { diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 83cad58665f41..cc46c889a6923 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -31,16 +31,19 @@ #include "gtest/gtest.h" using testing::_; +using testing::Args; using testing::AssertionFailure; using testing::AssertionResult; using testing::AssertionSuccess; using testing::AtLeast; +using testing::Eq; using testing::InSequence; using testing::Invoke; using testing::Matcher; using testing::MockFunction; using testing::NiceMock; using testing::Ref; +using testing::ResultOf; using testing::Return; using testing::ReturnPointee; using testing::ReturnRef; @@ -209,6 +212,14 @@ class RouterTestBase : public testing::Test { ON_CALL(callbacks_, connection()).WillByDefault(Return(&connection_)); } + void enableHedgeOnPerTryTimeout() { + callbacks_.route_->route_entry_.hedge_policy_.initial_requests_ = 1; + callbacks_.route_->route_entry_.hedge_policy_.hedge_on_per_try_timeout_ = true; + callbacks_.route_->route_entry_.hedge_policy_.additional_request_chance_ = envoy::type::FractionalPercent{}; + callbacks_.route_->route_entry_.hedge_policy_.additional_request_chance_.set_numerator(0); + callbacks_.route_->route_entry_.hedge_policy_.additional_request_chance_.set_denominator(envoy::type::FractionalPercent::HUNDRED); + } + Event::SimulatedTimeSystem test_time_; std::string upstream_zone_{"to_az"}; envoy::api::v2::core::Locality upstream_locality_; @@ -812,6 +823,7 @@ TEST_F(RouterTest, NoRetriesOverflow) { Http::TestHeaderMapImpl headers{{"x-envoy-retry-on", "5xx"}, {"x-envoy-internal", "true"}}; HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); // 5xx response. @@ -1162,6 +1174,7 @@ TEST_F(RouterTest, UpstreamPerTryTimeout) { EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); EXPECT_CALL(callbacks_, encodeData(_, true)); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(504)); + per_try_timeout_->callback_(); EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_ @@ -1225,6 +1238,170 @@ TEST_F(RouterTest, UpstreamPerTryTimeoutExcludesNewStream) { EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } +// Tests that a retry is sent after the first request hits the per try timeout, but then +// headers received in response to the first request are still used (and the 2nd request +// canceled). +TEST_F(RouterTest, HedgedPerTryTimeoutFirstRequestSucceeds) { + enableHedgeOnPerTryTimeout(); + + NiceMock encoder1; + Http::StreamDecoder* response_decoder1 = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + response_decoder1 = &decoder; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_); + return nullptr; + })); + expectResponseTimerCreate(); + expectPerTryTimerCreate(); + + Http::TestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); + + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(504)); + EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0); + + NiceMock encoder2; + Http::StreamDecoder* response_decoder2 = nullptr; + router_.retry_state_->expectHedgedPerTryTimeoutRetry(); + per_try_timeout_->callback_(); + + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + response_decoder2 = &decoder; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_); + return nullptr; + })); + expectPerTryTimerCreate(); + router_.retry_state_->callback_(); + + // We should not have updated any stats yet because no requests have been + // canceled + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + + // Now write a 200 back. We expect the 2nd stream to be reset and stats to be + // incremented properly. + Http::HeaderMapPtr response_headers(new Http::TestHeaderMapImpl{{":status", "200"}}); + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); + EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0); + EXPECT_CALL(encoder2.stream_, resetStream(_)); + + EXPECT_CALL(callbacks_, encodeHeaders_(_, _)) + .WillOnce(Invoke([&](Http::HeaderMap& headers, bool end_stream) + -> void { + EXPECT_EQ(headers.Status()->value(), "200"); + EXPECT_TRUE(end_stream); + })); + response_decoder1->decodeHeaders(std::move(response_headers), true); + EXPECT_TRUE(verifyHostUpstreamStats(1, 0)); + EXPECT_EQ(1, cm_.conn_pool_.host_->stats_store_.counter("rq_hedge_abandoned").value()); + EXPECT_EQ(1, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_hedge_abandoned") + .value()); +} + +// Three requests sent: 1) 5xx error, 2) per try timeout, 3) gets good response +// headers. +TEST_F(RouterTest, HedgedPerTryTimeoutThirdRequestSucceeds) { + enableHedgeOnPerTryTimeout(); + + NiceMock encoder1; + Http::StreamDecoder* response_decoder1 = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + response_decoder1 = &decoder; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_); + return nullptr; + })); + expectResponseTimerCreate(); + expectPerTryTimerCreate(); + + Http::TestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); + + EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0); + + Http::HeaderMapPtr response_headers1(new Http::TestHeaderMapImpl{{":status", "500"}}); + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(500)); + EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0); + EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0); + router_.retry_state_->expectHeadersRetry(); + response_decoder1->decodeHeaders(std::move(response_headers1), true); + + NiceMock encoder2; + Http::StreamDecoder* response_decoder2 = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + response_decoder2 = &decoder; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_); + return nullptr; + })); + expectPerTryTimerCreate(); + router_.retry_state_->callback_(); + + EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + EXPECT_EQ(0, cm_.conn_pool_.host_->stats_store_.counter("rq_hedge_abandoned").value()); + EXPECT_EQ(0, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_hedge_abandoned") + .value()); + + // Now trigger a per try timeout on the 2nd request, expect a 3rd + router_.retry_state_->expectHedgedPerTryTimeoutRetry(); + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(504)); + NiceMock encoder3; + Http::StreamDecoder* response_decoder3 = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + response_decoder3 = &decoder; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + callbacks.onPoolReady(encoder3, cm_.conn_pool_.host_); + return nullptr; + })); + + EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0); + per_try_timeout_->callback_(); + expectPerTryTimerCreate(); + router_.retry_state_->callback_(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); + EXPECT_EQ(0, cm_.conn_pool_.host_->stats_store_.counter("rq_hedge_abandoned").value()); + EXPECT_EQ(0, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_hedge_abandoned") + .value()); + + // Now write a 200 back. We expect the 2nd stream to be reset and stats to be + // incremented properly. + Http::HeaderMapPtr response_headers2(new Http::TestHeaderMapImpl{{":status", "200"}}); + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(200)); + EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0); + EXPECT_CALL(encoder2.stream_, resetStream(_)); + EXPECT_CALL(encoder3.stream_, resetStream(_)).Times(0); + + EXPECT_CALL(callbacks_, encodeHeaders_(_, _)) + .WillOnce(Invoke([&](Http::HeaderMap& headers, bool end_stream) + -> void { + EXPECT_EQ(headers.Status()->value(), "200"); + EXPECT_TRUE(end_stream); + })); + EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); + response_decoder3->decodeHeaders(std::move(response_headers2), true); + EXPECT_TRUE(verifyHostUpstreamStats(1, 1)); + EXPECT_EQ(1, cm_.conn_pool_.host_->stats_store_.counter("rq_hedge_abandoned").value()); + EXPECT_EQ(1, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_hedge_abandoned") + .value()); +} + TEST_F(RouterTest, RetryRequestNotComplete) { NiceMock encoder1; Http::StreamDecoder* response_decoder = nullptr; @@ -1252,6 +1429,75 @@ TEST_F(RouterTest, RetryRequestNotComplete) { EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); } +// Two requests are sent (slow request + hedged retry) and then global timeout +// is hit. Verify everything gets cleaned up. +TEST_F(RouterTest, HedgedPerTryTimeoutGlobalTimeout) { + enableHedgeOnPerTryTimeout(); + + NiceMock encoder1; + Http::StreamDecoder* response_decoder1 = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + response_decoder1 = &decoder; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_); + return nullptr; + })); + expectResponseTimerCreate(); + expectPerTryTimerCreate(); + + Http::TestHeaderMapImpl headers{{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}}; + HttpTestUtility::addDefaultHeaders(headers); + router_.decodeHeaders(headers, true); + + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(504)); + EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(0); + EXPECT_CALL(callbacks_, encodeHeaders_(_, _)).Times(0); + router_.retry_state_->expectHedgedPerTryTimeoutRetry(); + per_try_timeout_->callback_(); + + NiceMock encoder2; + Http::StreamDecoder* response_decoder2 = nullptr; + EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) + -> Http::ConnectionPool::Cancellable* { + response_decoder2 = &decoder; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); + callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_); + return nullptr; + })); + expectPerTryTimerCreate(); + router_.retry_state_->callback_(); + + EXPECT_TRUE(verifyHostUpstreamStats(0, 0)); + EXPECT_EQ(0, cm_.conn_pool_.host_->stats_store_.counter("rq_hedge_abandoned").value()); + EXPECT_EQ(0, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_hedge_abandoned") + .value()); + + // Now trigger global timeout, expect everything to be reset + EXPECT_CALL(encoder1.stream_, resetStream(_)).Times(1); + EXPECT_CALL(encoder2.stream_, resetStream(_)).Times(1); + EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(504)); + + EXPECT_CALL(callbacks_, encodeHeaders_(_, _)) + .WillOnce(Invoke([&](Http::HeaderMap& headers, bool) + -> void { + EXPECT_EQ(headers.Status()->value(), "504"); + })); + response_timeout_->callback_(); + EXPECT_TRUE(verifyHostUpstreamStats(0, 2)); + EXPECT_EQ(0, cm_.conn_pool_.host_->stats_store_.counter("rq_hedge_abandoned").value()); + EXPECT_EQ(2, cm_.conn_pool_.host_->stats_store_.counter("rq_timeout").value()); + EXPECT_EQ(0, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_hedge_abandoned") + .value()); + EXPECT_EQ(2, cm_.thread_local_cluster_.cluster_.info_->stats_store_ + .counter("upstream_rq_timeout") + .value()); +} + TEST_F(RouterTest, RetryNoneHealthy) { NiceMock encoder1; Http::StreamDecoder* response_decoder = nullptr; @@ -1338,6 +1584,7 @@ TEST_F(RouterTest, RetryUpstreamPerTryTimeout) { .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { response_decoder = &decoder; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); callbacks.onPoolReady(encoder1, cm_.conn_pool_.host_); return nullptr; })); @@ -1350,7 +1597,6 @@ TEST_F(RouterTest, RetryUpstreamPerTryTimeout) { HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); - EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); router_.retry_state_->expectResetRetry(); EXPECT_CALL(cm_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(504)); per_try_timeout_->callback_(); @@ -1358,6 +1604,7 @@ TEST_F(RouterTest, RetryUpstreamPerTryTimeout) { // We expect this reset to kick off a new request. NiceMock encoder2; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); EXPECT_CALL(cm_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { @@ -1366,9 +1613,9 @@ TEST_F(RouterTest, RetryUpstreamPerTryTimeout) { return nullptr; })); expectPerTryTimerCreate(); + router_.retry_state_->callback_(); - EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); Http::HeaderMapPtr response_headers(new Http::TestHeaderMapImpl{{":status", "200"}}); @@ -1407,13 +1654,12 @@ TEST_F(RouterTest, RetryUpstreamConnectionFailure) { .WillOnce(Invoke([&](Http::StreamDecoder& decoder, Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { response_decoder = &decoder; + EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); callbacks.onPoolReady(encoder2, cm_.conn_pool_.host_); return nullptr; })); router_.retry_state_->callback_(); - EXPECT_CALL(*router_.retry_state_, onHostAttempted(_)); - // Normal response. EXPECT_CALL(*router_.retry_state_, shouldRetryHeaders(_, _)).WillOnce(Return(RetryStatus::No)); Http::HeaderMapPtr response_headers(new Http::TestHeaderMapImpl{{":status", "200"}}); @@ -2412,6 +2658,38 @@ TEST_F(RouterTest, UpstreamTimingTimeout) { EXPECT_EQ(stream_info.firstUpstreamRxByteReceived().value(), std::chrono::milliseconds(56)); } +TEST(RouterFilterUtilityTest, FinalHedgingParams) { + { // no chance of additional request + NiceMock route; + route.hedge_policy_.initial_requests_ = 10; + EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_)); + FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, 0); + EXPECT_EQ(10, hedgingParams.initial_requests_); + hedgingParams = FilterUtility::finalHedgingParams(route, 10); + EXPECT_EQ(10, hedgingParams.initial_requests_); + hedgingParams = FilterUtility::finalHedgingParams(route, 100); + EXPECT_EQ(10, hedgingParams.initial_requests_); + hedgingParams = FilterUtility::finalHedgingParams(route, 1000); + EXPECT_EQ(10, hedgingParams.initial_requests_); + } + { // 50% chance additional request + NiceMock route; + route.hedge_policy_.initial_requests_ = 10; + route.hedge_policy_.additional_request_chance_.set_numerator(50); + EXPECT_CALL(route, hedgePolicy).WillRepeatedly(ReturnRef(route.hedge_policy_)); + FilterUtility::HedgingParams hedgingParams = FilterUtility::finalHedgingParams(route, 0); + EXPECT_EQ(11, hedgingParams.initial_requests_); + hedgingParams = FilterUtility::finalHedgingParams(route, 49); + EXPECT_EQ(11, hedgingParams.initial_requests_); + hedgingParams = FilterUtility::finalHedgingParams(route, 50); + EXPECT_EQ(10, hedgingParams.initial_requests_); + hedgingParams = FilterUtility::finalHedgingParams(route, 99); + EXPECT_EQ(10, hedgingParams.initial_requests_); + hedgingParams = FilterUtility::finalHedgingParams(route, 100); + EXPECT_EQ(11, hedgingParams.initial_requests_); + } +} + TEST(RouterFilterUtilityTest, FinalTimeout) { { NiceMock route; diff --git a/test/common/runtime/runtime_impl_test.cc b/test/common/runtime/runtime_impl_test.cc index 6cb66c3d17856..6e26b4893b106 100644 --- a/test/common/runtime/runtime_impl_test.cc +++ b/test/common/runtime/runtime_impl_test.cc @@ -250,22 +250,22 @@ void testNewOverrides(Loader& loader, Stats::Store& store) { // New string loader.mergeValues({{"foo", "bar"}}); EXPECT_EQ("bar", loader.snapshot().get("foo")); - EXPECT_EQ(1, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_TRUE(store.boolIndicator("runtime.admin_overrides_active").value()); // Remove new string loader.mergeValues({{"foo", ""}}); EXPECT_EQ("", loader.snapshot().get("foo")); - EXPECT_EQ(0, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_FALSE(store.boolIndicator("runtime.admin_overrides_active").value()); // New integer loader.mergeValues({{"baz", "42"}}); EXPECT_EQ(42, loader.snapshot().getInteger("baz", 0)); - EXPECT_EQ(1, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_TRUE(store.boolIndicator("runtime.admin_overrides_active").value()); // Remove new integer loader.mergeValues({{"baz", ""}}); EXPECT_EQ(0, loader.snapshot().getInteger("baz", 0)); - EXPECT_EQ(0, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_FALSE(store.boolIndicator("runtime.admin_overrides_active").value()); } TEST_F(DiskBackedLoaderImplTest, mergeValues) { @@ -276,32 +276,32 @@ TEST_F(DiskBackedLoaderImplTest, mergeValues) { // Override string loader->mergeValues({{"file2", "new world"}}); EXPECT_EQ("new world", loader->snapshot().get("file2")); - EXPECT_EQ(1, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_TRUE(store.boolIndicator("runtime.admin_overrides_active").value()); // Remove overridden string loader->mergeValues({{"file2", ""}}); EXPECT_EQ("world", loader->snapshot().get("file2")); - EXPECT_EQ(0, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_FALSE(store.boolIndicator("runtime.admin_overrides_active").value()); // Override integer loader->mergeValues({{"file3", "42"}}); EXPECT_EQ(42, loader->snapshot().getInteger("file3", 1)); - EXPECT_EQ(1, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_TRUE(store.boolIndicator("runtime.admin_overrides_active").value()); // Remove overridden integer loader->mergeValues({{"file3", ""}}); EXPECT_EQ(2, loader->snapshot().getInteger("file3", 1)); - EXPECT_EQ(0, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_FALSE(store.boolIndicator("runtime.admin_overrides_active").value()); // Override override string loader->mergeValues({{"file1", "hello overridden override"}}); EXPECT_EQ("hello overridden override", loader->snapshot().get("file1")); - EXPECT_EQ(1, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_TRUE(store.boolIndicator("runtime.admin_overrides_active").value()); // Remove overridden override string loader->mergeValues({{"file1", ""}}); EXPECT_EQ("hello override", loader->snapshot().get("file1")); - EXPECT_EQ(0, store.gauge("runtime.admin_overrides_active").value()); + EXPECT_FALSE(store.boolIndicator("runtime.admin_overrides_active").value()); } TEST(LoaderImplTest, All) { diff --git a/test/common/stats/isolated_store_impl_test.cc b/test/common/stats/isolated_store_impl_test.cc index b2aaa80693fdd..a6401a063aa4a 100644 --- a/test/common/stats/isolated_store_impl_test.cc +++ b/test/common/stats/isolated_store_impl_test.cc @@ -22,7 +22,7 @@ TEST(StatsIsolatedStoreImplTest, All) { EXPECT_EQ("c1", c1.tagExtractedName()); EXPECT_EQ("scope1.c2", c2.tagExtractedName()); EXPECT_EQ(0, c1.tags().size()); - EXPECT_EQ(0, c1.tags().size()); + EXPECT_EQ(0, c2.tags().size()); Gauge& g1 = store.gauge("g1"); Gauge& g2 = scope1->gauge("g2"); @@ -31,7 +31,16 @@ TEST(StatsIsolatedStoreImplTest, All) { EXPECT_EQ("g1", g1.tagExtractedName()); EXPECT_EQ("scope1.g2", g2.tagExtractedName()); EXPECT_EQ(0, g1.tags().size()); - EXPECT_EQ(0, g1.tags().size()); + EXPECT_EQ(0, g2.tags().size()); + + BoolIndicator& b1 = store.boolIndicator("b1"); + BoolIndicator& b2 = scope1->boolIndicator("b2"); + EXPECT_EQ("b1", b1.name()); + EXPECT_EQ("scope1.b2", b2.name()); + EXPECT_EQ("b1", b1.tagExtractedName()); + EXPECT_EQ("scope1.b2", b2.tagExtractedName()); + EXPECT_EQ(0, b1.tags().size()); + EXPECT_EQ(0, b2.tags().size()); Histogram& h1 = store.histogram("h1"); Histogram& h2 = scope1->histogram("h2"); @@ -54,6 +63,7 @@ TEST(StatsIsolatedStoreImplTest, All) { EXPECT_EQ(4UL, store.counters().size()); EXPECT_EQ(2UL, store.gauges().size()); + EXPECT_EQ(2UL, store.boolIndicators().size()); } TEST(StatsIsolatedStoreImplTest, LongStatName) { @@ -70,20 +80,23 @@ TEST(StatsIsolatedStoreImplTest, LongStatName) { * Test stats macros. @see stats_macros.h */ // clang-format off -#define ALL_TEST_STATS(COUNTER, GAUGE, HISTOGRAM) \ - COUNTER (test_counter) \ - GAUGE (test_gauge) \ - HISTOGRAM(test_histogram) +#define ALL_TEST_STATS(COUNTER, GAUGE, BOOL_INDICATOR, HISTOGRAM) \ + COUNTER (test_counter) \ + GAUGE (test_gauge) \ + BOOL_INDICATOR(test_bool_indicator) \ + HISTOGRAM (test_histogram) // clang-format on struct TestStats { - ALL_TEST_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) + ALL_TEST_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_BOOL_INDICATOR_STRUCT, + GENERATE_HISTOGRAM_STRUCT) }; TEST(StatsMacros, All) { IsolatedStoreImpl stats_store; TestStats test_stats{ALL_TEST_STATS(POOL_COUNTER_PREFIX(stats_store, "test."), POOL_GAUGE_PREFIX(stats_store, "test."), + POOL_BOOL_INDICATOR_PREFIX(stats_store, "test."), POOL_HISTOGRAM_PREFIX(stats_store, "test."))}; Counter& counter = test_stats.test_counter_; @@ -92,6 +105,9 @@ TEST(StatsMacros, All) { Gauge& gauge = test_stats.test_gauge_; EXPECT_EQ("test.test_gauge", gauge.name()); + BoolIndicator& boolIndicator = test_stats.test_bool_indicator_; + EXPECT_EQ("test.test_bool_indicator", boolIndicator.name()); + Histogram& histogram = test_stats.test_histogram_; EXPECT_EQ("test.test_histogram", histogram.name()); } diff --git a/test/common/stats/source_impl_test.cc b/test/common/stats/source_impl_test.cc index bf5b15851d308..76f86905c326b 100644 --- a/test/common/stats/source_impl_test.cc +++ b/test/common/stats/source_impl_test.cc @@ -17,10 +17,12 @@ TEST(SourceImplTest, Caching) { NiceMock store; std::vector stored_counters; std::vector stored_gauges; + std::vector stored_bools; std::vector stored_histograms; ON_CALL(store, counters()).WillByDefault(ReturnPointee(&stored_counters)); ON_CALL(store, gauges()).WillByDefault(ReturnPointee(&stored_gauges)); + ON_CALL(store, boolIndicators()).WillByDefault(ReturnPointee(&stored_bools)); ON_CALL(store, histograms()).WillByDefault(ReturnPointee(&stored_histograms)); SourceImpl source(store); @@ -36,6 +38,11 @@ TEST(SourceImplTest, Caching) { stored_gauges.push_back(std::make_shared()); EXPECT_NE(source.cachedGauges(), stored_gauges); + stored_bools.push_back(std::make_shared()); + EXPECT_EQ(source.cachedBoolIndicators(), stored_bools); + stored_bools.push_back(std::make_shared()); + EXPECT_NE(source.cachedBoolIndicators(), stored_bools); + stored_histograms.push_back(std::make_shared()); EXPECT_EQ(source.cachedHistograms(), stored_histograms); stored_histograms.push_back(std::make_shared()); @@ -45,6 +52,7 @@ TEST(SourceImplTest, Caching) { source.clearCache(); EXPECT_EQ(source.cachedCounters(), stored_counters); EXPECT_EQ(source.cachedGauges(), stored_gauges); + EXPECT_EQ(source.cachedBoolIndicators(), stored_bools); EXPECT_EQ(source.cachedHistograms(), stored_histograms); } diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index dddfea3e6146e..1b47ff6128ce0 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -181,7 +181,7 @@ class HistogramTest : public testing::Test { TEST_F(StatsThreadLocalStoreTest, NoTls) { InSequence s; - EXPECT_CALL(*alloc_, alloc(_)).Times(2); + EXPECT_CALL(*alloc_, alloc(_)).Times(3); Counter& c1 = store_->counter("c1"); EXPECT_EQ(&c1, &store_->counter("c1")); @@ -189,6 +189,9 @@ TEST_F(StatsThreadLocalStoreTest, NoTls) { Gauge& g1 = store_->gauge("g1"); EXPECT_EQ(&g1, &store_->gauge("g1")); + BoolIndicator& b1 = store_->boolIndicator("b1"); + EXPECT_EQ(&b1, &store_->boolIndicator("b1")); + Histogram& h1 = store_->histogram("h1"); EXPECT_EQ(&h1, &store_->histogram("h1")); @@ -203,9 +206,12 @@ TEST_F(StatsThreadLocalStoreTest, NoTls) { EXPECT_EQ(1UL, store_->gauges().size()); EXPECT_EQ(&g1, store_->gauges().front().get()); // front() ok when size()==1 EXPECT_EQ(2L, store_->gauges().front().use_count()); + EXPECT_EQ(1UL, store_->boolIndicators().size()); + EXPECT_EQ(&b1, store_->boolIndicators().front().get()); // front() ok when size()==1 + EXPECT_EQ(2L, store_->boolIndicators().front().use_count()); // Includes overflow stat. - EXPECT_CALL(*alloc_, free(_)).Times(3); + EXPECT_CALL(*alloc_, free(_)).Times(4); store_->shutdownThreading(); } @@ -214,7 +220,7 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - EXPECT_CALL(*alloc_, alloc(_)).Times(2); + EXPECT_CALL(*alloc_, alloc(_)).Times(3); Counter& c1 = store_->counter("c1"); EXPECT_EQ(&c1, &store_->counter("c1")); @@ -222,6 +228,9 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { Gauge& g1 = store_->gauge("g1"); EXPECT_EQ(&g1, &store_->gauge("g1")); + BoolIndicator& b1 = store_->boolIndicator("b1"); + EXPECT_EQ(&b1, &store_->boolIndicator("b1")); + Histogram& h1 = store_->histogram("h1"); EXPECT_EQ(&h1, &store_->histogram("h1")); @@ -231,6 +240,9 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { EXPECT_EQ(1UL, store_->gauges().size()); EXPECT_EQ(&g1, store_->gauges().front().get()); // front() ok when size()==1 EXPECT_EQ(3L, store_->gauges().front().use_count()); + EXPECT_EQ(1UL, store_->boolIndicators().size()); + EXPECT_EQ(&b1, store_->boolIndicators().front().get()); // front() ok when size()==1 + EXPECT_EQ(3L, store_->boolIndicators().front().use_count()); store_->shutdownThreading(); tls_.shutdownThread(); @@ -241,9 +253,12 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { EXPECT_EQ(1UL, store_->gauges().size()); EXPECT_EQ(&g1, store_->gauges().front().get()); // front() ok when size()==1 EXPECT_EQ(2L, store_->gauges().front().use_count()); + EXPECT_EQ(1UL, store_->boolIndicators().size()); + EXPECT_EQ(&b1, store_->boolIndicators().front().get()); // front() ok when size()==1 + EXPECT_EQ(2L, store_->boolIndicators().front().use_count()); // Includes overflow stat. - EXPECT_CALL(*alloc_, free(_)).Times(3); + EXPECT_CALL(*alloc_, free(_)).Times(4); } TEST_F(StatsThreadLocalStoreTest, BasicScope) { @@ -251,7 +266,7 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { store_->initializeThreading(main_thread_dispatcher_, tls_); ScopePtr scope1 = store_->createScope("scope1."); - EXPECT_CALL(*alloc_, alloc(_)).Times(4); + EXPECT_CALL(*alloc_, alloc(_)).Times(6); Counter& c1 = store_->counter("c1"); Counter& c2 = scope1->counter("c2"); EXPECT_EQ("c1", c1.name()); @@ -262,6 +277,11 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { EXPECT_EQ("g1", g1.name()); EXPECT_EQ("scope1.g2", g2.name()); + BoolIndicator& b1 = store_->boolIndicator("b1"); + BoolIndicator& b2 = scope1->boolIndicator("b2"); + EXPECT_EQ("b1", b1.name()); + EXPECT_EQ("scope1.b2", b2.name()); + Histogram& h1 = store_->histogram("h1"); Histogram& h2 = scope1->histogram("h2"); EXPECT_EQ("h1", h1.name()); @@ -277,7 +297,7 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*alloc_, free(_)).Times(5); + EXPECT_CALL(*alloc_, free(_)).Times(7); } // Validate that we sanitize away bad characters in the stats prefix. @@ -352,11 +372,15 @@ TEST_F(StatsThreadLocalStoreTest, NestedScopes) { Gauge& g1 = scope2->gauge("some_gauge"); EXPECT_EQ("scope1.foo.some_gauge", g1.name()); + EXPECT_CALL(*alloc_, alloc(_)); + BoolIndicator& b1 = scope2->boolIndicator("some_bool"); + EXPECT_EQ("scope1.foo.some_bool", b1.name()); + store_->shutdownThreading(); tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*alloc_, free(_)).Times(4); + EXPECT_CALL(*alloc_, free(_)).Times(5); } TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { @@ -396,8 +420,21 @@ TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { EXPECT_EQ(1UL, g2.value()); EXPECT_EQ(1UL, store_->gauges().size()); + // Bools should work just like gauges. + EXPECT_CALL(*alloc_, alloc(_)).Times(2); + BoolIndicator& b1 = scope1->boolIndicator("b"); + BoolIndicator& b2 = scope2->boolIndicator("b"); + EXPECT_NE(&b1, &b2); + b1.set(true); + EXPECT_EQ(1, b1.value()); + EXPECT_EQ(1, b2.value()); + b2.set(false); + EXPECT_EQ(0, b1.value()); + EXPECT_EQ(0, b2.value()); + EXPECT_EQ(1UL, store_->boolIndicators().size()); + // Deleting scope 1 will call free but will be reference counted. It still leaves scope 2 valid. - EXPECT_CALL(*alloc_, free(_)).Times(2); + EXPECT_CALL(*alloc_, free(_)).Times(7); scope1.reset(); c2.inc(); EXPECT_EQ(3UL, c2.value()); @@ -405,12 +442,47 @@ TEST_F(StatsThreadLocalStoreTest, OverlappingScopes) { g2.set(10); EXPECT_EQ(10UL, g2.value()); EXPECT_EQ(1UL, store_->gauges().size()); + b2.set(false); + EXPECT_EQ(0, b2.value()); + EXPECT_EQ(1UL, store_->boolIndicators().size()); store_->shutdownThreading(); tls_.shutdownThread(); +} +// Demonstrates that counters, gauges, and indicators are all mixed together in +// the shared memory, and not separated by type; only the name matters. +// This test is only here to reassure us that PR #5813, in the context of the current +// state of the Envoy codebase it is being submitted into, will not break hot restart! +// It is not meant to enforce this behavior as a desirable feature that must be kept. +TEST_F(StatsThreadLocalStoreTest, SameNameDifferentType) { + InSequence s; + store_->initializeThreading(main_thread_dispatcher_, tls_); + EXPECT_CALL(*alloc_, alloc(_)).Times(4); + + Counter& c1 = store_->counter("samename"); + EXPECT_EQ(&c1, &store_->counter("samename")); + Gauge& g1 = store_->gauge("samename"); + EXPECT_EQ(&g1, &store_->gauge("samename")); + c1.add(5); + EXPECT_EQ(5UL, c1.value()); + g1.add(3); + EXPECT_EQ(8UL, c1.value()); + + Gauge& g2 = store_->gauge("samename2"); + EXPECT_EQ(&g2, &store_->gauge("samename2")); + BoolIndicator& b1 = store_->boolIndicator("samename2"); + EXPECT_EQ(&b1, &store_->boolIndicator("samename2")); + g2.add(1); + EXPECT_EQ(1UL, g2.value()); + EXPECT_TRUE(b1.value()); + b1.set(false); + EXPECT_EQ(0UL, g2.value()); + + store_->shutdownThreading(); + tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*alloc_, free(_)).Times(3); + EXPECT_CALL(*alloc_, free(_)).Times(5); } TEST_F(StatsThreadLocalStoreTest, AllocFailed) { @@ -521,6 +593,21 @@ TEST_F(StatsMatcherTLSTest, TestNoOpStatImpls) { Gauge& noop_gauge_2 = store_->gauge("noop_gauge_2"); EXPECT_EQ(&noop_gauge, &noop_gauge_2); + // BoolIndicator + BoolIndicator& noop_bool = store_->boolIndicator("noop_bool"); + EXPECT_EQ(noop_bool.name(), ""); + EXPECT_EQ(0, noop_bool.value()); + noop_bool.set(true); + EXPECT_EQ(0, noop_bool.value()); + noop_bool.set(true); + EXPECT_EQ(0, noop_bool.value()); + noop_bool.set(false); + EXPECT_EQ(0, noop_bool.value()); + noop_bool.set(true); + EXPECT_EQ(0, noop_bool.value()); + BoolIndicator& noop_bool_2 = store_->boolIndicator("noop_bool_2"); + EXPECT_EQ(&noop_bool, &noop_bool_2); + // Histogram Histogram& noop_histogram = store_->histogram("noop_histogram"); EXPECT_EQ(noop_histogram.name(), ""); @@ -538,8 +625,9 @@ TEST_F(StatsMatcherTLSTest, TestNoOpStatImpls) { TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { InSequence s; - // Expected to alloc lowercase_counter, lowercase_gauge, valid_counter, valid_gauge - EXPECT_CALL(*alloc_, alloc(_)).Times(4); + // Expected to alloc lowercase_counter, lowercase_gauge, lowercase_bool, + // valid_counter, valid_gauge, valid_bool + EXPECT_CALL(*alloc_, alloc(_)).Times(6); // Will block all stats containing any capital alphanumeric letter. stats_config_.mutable_stats_matcher()->mutable_exclusion_list()->add_patterns()->set_regex( @@ -551,6 +639,8 @@ TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { EXPECT_EQ(lowercase_counter.name(), "lowercase_counter"); Gauge& lowercase_gauge = store_->gauge("lowercase_gauge"); EXPECT_EQ(lowercase_gauge.name(), "lowercase_gauge"); + BoolIndicator& lowercase_bool = store_->boolIndicator("lowercase_bool"); + EXPECT_EQ(lowercase_bool.name(), "lowercase_bool"); Histogram& lowercase_histogram = store_->histogram("lowercase_histogram"); EXPECT_EQ(lowercase_histogram.name(), "lowercase_histogram"); @@ -569,6 +659,11 @@ TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { uppercase_gauge.inc(); EXPECT_EQ(uppercase_gauge.value(), 0); + BoolIndicator& uppercase_bool = store_->boolIndicator("uppercase_BOOL"); + EXPECT_EQ(uppercase_bool.name(), ""); + uppercase_bool.set(true); + EXPECT_FALSE(uppercase_bool.value()); + // Histograms are harder to query and test, so we resort to testing that name() returns the empty // string. Histogram& uppercase_histogram = store_->histogram("upperCASE_histogram"); @@ -589,11 +684,11 @@ TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { EXPECT_EQ(invalid_counter.value(), 0); // But the old exclusion rule still holds. - Counter& invalid_counter_2 = store_->counter("also_INVALID_counter"); + Counter& invalid_counter_2 = store_->counter("also_INVLD_counter"); invalid_counter_2.inc(); EXPECT_EQ(invalid_counter_2.value(), 0); - // And we expect the same behavior from gauges and histograms. + // And we expect the same behavior from gauges, histograms, and bools. Gauge& valid_gauge = store_->gauge("valid_gauge"); valid_gauge.set(2); EXPECT_EQ(valid_gauge.value(), 2); @@ -602,10 +697,22 @@ TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { invalid_gauge_1.inc(); EXPECT_EQ(invalid_gauge_1.value(), 0); - Gauge& invalid_gauge_2 = store_->gauge("also_INVALID_gauge"); + Gauge& invalid_gauge_2 = store_->gauge("also_INVLD_gauge"); invalid_gauge_2.inc(); EXPECT_EQ(invalid_gauge_2.value(), 0); + BoolIndicator& valid_bool = store_->boolIndicator("valid_bool"); + valid_bool.set(true); + EXPECT_EQ(1, valid_bool.value()); + + BoolIndicator& invalid_bool_1 = store_->boolIndicator("invalid_bool"); + invalid_bool_1.set(true); + EXPECT_EQ(0, invalid_gauge_1.value()); + + BoolIndicator& invalid_bool_2 = store_->boolIndicator("also_INVLD_bool"); + invalid_bool_2.set(true); + EXPECT_EQ(0, invalid_bool_2.value()); + Histogram& valid_histogram = store_->histogram("valid_histogram"); EXPECT_EQ(valid_histogram.name(), "valid_histogram"); @@ -615,9 +722,9 @@ TEST_F(StatsMatcherTLSTest, TestExclusionRegex) { Histogram& invalid_histogram_2 = store_->histogram("also_INVALID_histogram"); EXPECT_EQ(invalid_histogram_2.name(), ""); - // Expected to free lowercase_counter, lowercase_gauge, valid_counter, - // valid_gauge, overflow.stats - EXPECT_CALL(*alloc_, free(_)).Times(5); + // Expected to free lowercase_counter, lowercase_gauge, lowercase_bool, + // valid_counter, valid_gauge, valid_bool, overflow.stats + EXPECT_CALL(*alloc_, free(_)).Times(7); store_->shutdownThreading(); } @@ -641,12 +748,15 @@ class HeapStatsThreadLocalStoreTest : public StatsThreadLocalStoreTest { TEST_F(HeapStatsThreadLocalStoreTest, RemoveRejectedStats) { Counter& counter = store_->counter("c1"); Gauge& gauge = store_->gauge("g1"); + BoolIndicator& boolIndicator = store_->boolIndicator("b1"); Histogram& histogram = store_->histogram("h1"); ASSERT_EQ(2, store_->counters().size()); // "stats.overflow" and "c1". EXPECT_TRUE(&counter == store_->counters()[0].get() || &counter == store_->counters()[1].get()); // counters() order is non-deterministic. ASSERT_EQ(1, store_->gauges().size()); EXPECT_EQ("g1", store_->gauges()[0]->name()); + ASSERT_EQ(1, store_->boolIndicators().size()); + EXPECT_EQ("b1", store_->boolIndicators()[0]->name()); ASSERT_EQ(1, store_->histograms().size()); EXPECT_EQ("h1", store_->histograms()[0]->name()); @@ -659,11 +769,13 @@ TEST_F(HeapStatsThreadLocalStoreTest, RemoveRejectedStats) { // They can no longer be found. EXPECT_EQ(0, store_->counters().size()); EXPECT_EQ(0, store_->gauges().size()); + EXPECT_EQ(0, store_->boolIndicators().size()); EXPECT_EQ(0, store_->histograms().size()); // However, referencing the previously allocated stats will not crash. counter.inc(); gauge.inc(); + boolIndicator.set(true); EXPECT_CALL(sink_, onHistogramComplete(Ref(histogram), 42)); histogram.recordValue(42); } @@ -733,23 +845,27 @@ TEST_F(StatsThreadLocalStoreTest, ShuttingDown) { InSequence s; store_->initializeThreading(main_thread_dispatcher_, tls_); - EXPECT_CALL(*alloc_, alloc(_)).Times(4); + EXPECT_CALL(*alloc_, alloc(_)).Times(6); store_->counter("c1"); store_->gauge("g1"); + store_->boolIndicator("b1"); store_->shutdownThreading(); store_->counter("c2"); store_->gauge("g2"); + store_->boolIndicator("b2"); - // c1, g1 should have a thread local ref, but c2, g2 should not. + // c1, g1, b1 should have a thread local ref, but c2, g2, b2 should not. EXPECT_EQ(3L, TestUtility::findCounter(*store_, "c1").use_count()); EXPECT_EQ(3L, TestUtility::findGauge(*store_, "g1").use_count()); + EXPECT_EQ(3L, TestUtility::findBoolIndicator(*store_, "b1").use_count()); EXPECT_EQ(2L, TestUtility::findCounter(*store_, "c2").use_count()); EXPECT_EQ(2L, TestUtility::findGauge(*store_, "g2").use_count()); + EXPECT_EQ(2L, TestUtility::findBoolIndicator(*store_, "b2").use_count()); tls_.shutdownThread(); // Includes overflow stat. - EXPECT_CALL(*alloc_, free(_)).Times(5); + EXPECT_CALL(*alloc_, free(_)).Times(7); } TEST_F(StatsThreadLocalStoreTest, MergeDuringShutDown) { @@ -971,6 +1087,13 @@ TEST_F(TruncatingAllocTest, GaugeNotTruncated) { }); } +TEST_F(TruncatingAllocTest, BoolNotTruncated) { + EXPECT_NO_LOGS({ + BoolIndicator& boolIndicator = store_->boolIndicator("simple"); + EXPECT_EQ(&boolIndicator, &store_->boolIndicator("simple")); + }); +} + TEST_F(TruncatingAllocTest, CounterTruncated) { Counter* counter = nullptr; EXPECT_LOG_CONTAINS("warning", "is too long with", { @@ -989,6 +1112,15 @@ TEST_F(TruncatingAllocTest, GaugeTruncated) { EXPECT_NO_LOGS(EXPECT_EQ(gauge, &store_->gauge(long_name_))); } +TEST_F(TruncatingAllocTest, BoolTruncated) { + BoolIndicator* boolIndicator = nullptr; + EXPECT_LOG_CONTAINS("warning", "is too long with", { + BoolIndicator& b = store_->boolIndicator(long_name_); + boolIndicator = &b; + }); + EXPECT_NO_LOGS(EXPECT_EQ(boolIndicator, &store_->boolIndicator(long_name_))); +} + TEST_F(TruncatingAllocTest, HistogramWithLongNameNotTruncated) { EXPECT_NO_LOGS({ Histogram& histogram = store_->histogram(long_name_); diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index f40a35b0d640b..2653380f0070c 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -6,6 +6,7 @@ load( "envoy_cc_test", "envoy_cc_test_library", "envoy_package", + "envoy_proto_library", ) envoy_package() @@ -40,6 +41,7 @@ envoy_cc_test( "//source/common/network:transport_socket_options_lib", "//source/common/network:utility_lib", "//source/common/stats:stats_lib", + "//source/common/upstream:cluster_factory_lib", "//source/common/upstream:cluster_manager_lib", "//source/extensions/transport_sockets/raw_buffer:config", "//source/extensions/transport_sockets/tls:context_lib", @@ -378,3 +380,35 @@ envoy_cc_test_library( "//source/common/upstream:upstream_lib", ], ) + +envoy_cc_test( + name = "cluster_factory_impl_test", + srcs = ["cluster_factory_impl_test.cc"], + deps = [ + ":utility_lib", + "//include/envoy/api:api_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/upstream:cluster_factory_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/config:metadata_lib", + "//source/common/event:dispatcher_lib", + "//source/common/json:config_schemas_lib", + "//source/common/json:json_loader_lib", + "//source/common/network:utility_lib", + "//source/common/upstream:cluster_factory_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//source/extensions/transport_sockets/raw_buffer:config", + "//source/server:transport_socket_config_lib", + "//test/integration/clusters:custom_static_cluster", + "//test/mocks:common_lib", + "//test/mocks/local_info:local_info_mocks", + "//test/mocks/network:network_mocks", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", + "//test/mocks/ssl:ssl_mocks", + "//test/mocks/upstream:upstream_mocks", + "//test/test_common:registry_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/common/upstream/cds_api_impl_test.cc b/test/common/upstream/cds_api_impl_test.cc index e9fa17c85acc1..d18aee1f7042d 100644 --- a/test/common/upstream/cds_api_impl_test.cc +++ b/test/common/upstream/cds_api_impl_test.cc @@ -18,6 +18,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::AnyNumber; using testing::InSequence; using testing::Invoke; using testing::Return; @@ -48,14 +49,12 @@ class CdsApiImplTest : public testing::Test { Config::Utility::translateCdsConfig(*config, cds_config); cds_config.mutable_api_config_source()->set_api_type( envoy::api::v2::core::ApiConfigSource::REST); - Upstream::ClusterManager::ClusterInfoMap cluster_map; - Upstream::MockClusterMockPrioritySet cluster; - cluster_map.emplace("foo_cluster", cluster); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(cluster_map)); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, addedViaApi()); - EXPECT_CALL(cluster, info()); - EXPECT_CALL(*cluster.info_, type()); + cluster_map_.emplace("foo_cluster", mock_cluster_); + EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); + EXPECT_CALL(mock_cluster_, info()).Times(AnyNumber()); + EXPECT_CALL(*mock_cluster_.info_, addedViaApi()); + EXPECT_CALL(mock_cluster_, info()).Times(AnyNumber()); + EXPECT_CALL(*mock_cluster_.info_, type()); cds_ = CdsApiImpl::create(cds_config, cm_, dispatcher_, random_, local_info_, store_, *api_); resetCdsInitializedCb(); @@ -167,6 +166,8 @@ class CdsApiImplTest : public testing::Test { }; NiceMock cm_; + Upstream::ClusterManager::ClusterInfoMap cluster_map_; + Upstream::MockClusterMockPrioritySet mock_cluster_; NiceMock dispatcher_; NiceMock random_; NiceMock local_info_; @@ -188,8 +189,9 @@ TEST_F(CdsApiImplTest, ValidateFail) { Protobuf::RepeatedPtrField clusters; clusters.Add(); - EXPECT_THROW(dynamic_cast(cds_.get())->onConfigUpdate(clusters, ""), - ProtoValidationException); + EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); + EXPECT_CALL(initialized_, ready()); + EXPECT_THROW(dynamic_cast(cds_.get())->onConfigUpdate(clusters, ""), EnvoyException); EXPECT_CALL(request_, cancel()); } @@ -206,8 +208,12 @@ TEST_F(CdsApiImplTest, ValidateDuplicateClusters) { auto* cluster_2 = clusters.Add(); cluster_2->set_name("duplicate_cluster"); + EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); + EXPECT_CALL(initialized_, ready()); EXPECT_THROW_WITH_MESSAGE(dynamic_cast(cds_.get())->onConfigUpdate(clusters, ""), - EnvoyException, "duplicate cluster duplicate_cluster found"); + EnvoyException, + "Error adding/updating cluster(s) duplicate_cluster: duplicate cluster " + "duplicate_cluster found"); EXPECT_CALL(request_, cancel()); } @@ -362,6 +368,7 @@ version_info: '1' TEST_F(CdsApiImplTest, CdsPauseOnWarming) { interval_timer_ = new Event::MockTimer(&dispatcher_); + EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(ClusterManager::ClusterInfoMap{})); InSequence s; setup(); @@ -385,7 +392,6 @@ version_info: '0' // Two clusters updated, both warmed up. EXPECT_CALL(cm_.ads_mux_, pause(Config::TypeUrl::get().ClusterLoadAssignment)).Times(1); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); cm_.expectAddWithWarming("cluster1", "0"); cm_.expectWarmingClusterCount(); EXPECT_CALL(cm_.ads_mux_, pause(Config::TypeUrl::get().Cluster)).Times(1); @@ -421,7 +427,6 @@ version_info: '1' )EOF"; EXPECT_CALL(cm_.ads_mux_, pause(Config::TypeUrl::get().ClusterLoadAssignment)).Times(1); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); cm_.expectAddWithWarming("cluster1", "1"); cm_.expectWarmingClusterCount(); EXPECT_CALL(cm_.ads_mux_, pause(Config::TypeUrl::get().Cluster)).Times(1); @@ -451,7 +456,6 @@ version_info: '2' )EOF"; EXPECT_CALL(cm_.ads_mux_, pause(Config::TypeUrl::get().ClusterLoadAssignment)).Times(1); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); cm_.expectAddWithWarming("cluster4", "2"); cm_.expectWarmingClusterCount(); EXPECT_CALL(initialized_, ready()); @@ -485,7 +489,6 @@ version_info: '3' // Two clusters updated, first one warmed up before processing of the second one starts. EXPECT_CALL(cm_.ads_mux_, pause(Config::TypeUrl::get().ClusterLoadAssignment)).Times(1); - EXPECT_CALL(cm_, clusters()).WillOnce(Return(ClusterManager::ClusterInfoMap{})); cm_.expectAddWithWarming("cluster5", "3", true); cm_.expectWarmingClusterCount(); EXPECT_CALL(cm_.ads_mux_, pause(Config::TypeUrl::get().Cluster)).Times(1); @@ -527,6 +530,7 @@ version_info: '0' path: eds path )EOF"; + EXPECT_CALL(cm_, clusters()).WillRepeatedly(Return(cluster_map_)); EXPECT_CALL(initialized_, ready()); EXPECT_CALL(*interval_timer_, enableTimer(_)); callbacks_->onSuccess(parseResponseMessageFromYaml(response_yaml)); @@ -535,6 +539,7 @@ version_info: '0' interval_timer_->callback_(); EXPECT_CALL(*interval_timer_, enableTimer(_)); + callbacks_->onFailure(Http::AsyncClient::FailureReason::Reset); EXPECT_EQ("", cds_->versionInfo()); diff --git a/test/common/upstream/cluster_factory_impl_test.cc b/test/common/upstream/cluster_factory_impl_test.cc new file mode 100644 index 0000000000000..6ed080956e141 --- /dev/null +++ b/test/common/upstream/cluster_factory_impl_test.cc @@ -0,0 +1,222 @@ +#include +#include +#include +#include +#include + +#include "envoy/api/api.h" +#include "envoy/http/codec.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/network/utility.h" +#include "common/singleton/manager_impl.h" +#include "common/upstream/cluster_factory_impl.h" + +#include "server/transport_socket_config_impl.h" + +#include "test/common/upstream/utility.h" +#include "test/integration/clusters/cluster_factory_config.pb.validate.h" +#include "test/integration/clusters/custom_static_cluster.h" +#include "test/mocks/common.h" +#include "test/mocks/local_info/mocks.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/server/mocks.h" +#include "test/mocks/ssl/mocks.h" + +using testing::_; +using testing::ContainerEq; +using testing::Invoke; +using testing::NiceMock; +using testing::ReturnRef; + +namespace Envoy { +namespace Upstream { +namespace { + +// Test Cluster Factory without custom configuration +class TestStaticClusterFactory : public ClusterFactoryImplBase { +public: + TestStaticClusterFactory() : ClusterFactoryImplBase("envoy.clusters.test_static") {} + + ClusterImplBaseSharedPtr + createClusterImpl(const envoy::api::v2::Cluster& cluster, ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override { + return std::make_unique(cluster, context.runtime(), socket_factory_context, + std::move(stats_scope), context.addedViaApi(), 1, + "127.0.0.1", 80); + } +}; + +class ClusterFactoryTestBase { +protected: + ClusterFactoryTestBase() : api_(Api::createApiForTest(stats_)) { + outlier_event_logger_.reset(new Outlier::MockEventLogger()); + dns_resolver_.reset(new Network::MockDnsResolver()); + } + + NiceMock admin_; + Ssl::MockContextManager ssl_context_manager_; + NiceMock cm_; + const NiceMock local_info_; + NiceMock dispatcher_; + NiceMock runtime_; + NiceMock random_; + Stats::IsolatedStoreImpl stats_; + Singleton::ManagerImpl singleton_manager_{Thread::threadFactoryForTest().currentThreadId()}; + NiceMock tls_; + Api::ApiPtr api_; + Network::DnsResolverSharedPtr dns_resolver_; + AccessLog::MockAccessLogManager log_manager_; + Outlier::EventLoggerSharedPtr outlier_event_logger_; +}; + +class TestStaticClusterImplTest : public testing::Test, public ClusterFactoryTestBase {}; + +TEST_F(TestStaticClusterImplTest, CreateWithoutConfig) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + address: 10.0.0.1 + port_value: 443 + cluster_type: + name: envoy.clusters.test_static + )EOF"; + + TestStaticClusterFactory factory; + Registry::InjectFactory registered_factory(factory); + + const envoy::api::v2::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + auto cluster = ClusterFactoryImplBase::create( + cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, + dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + std::move(outlier_event_logger_), false, *api_); + cluster->initialize([] {}); + + EXPECT_EQ(1UL, cluster->prioritySet().hostSetsPerPriority()[1]->healthyHosts().size()); + EXPECT_EQ("", cluster->prioritySet().hostSetsPerPriority()[1]->hosts()[0]->hostname()); + // the hosts field override by values hardcoded in the factory + EXPECT_EQ("127.0.0.1", cluster->prioritySet() + .hostSetsPerPriority()[1] + ->hosts()[0] + ->address() + ->ip() + ->addressAsString()); + EXPECT_EQ(80, + cluster->prioritySet().hostSetsPerPriority()[1]->hosts()[0]->address()->ip()->port()); + EXPECT_FALSE(cluster->info()->addedViaApi()); +} + +TEST_F(TestStaticClusterImplTest, CreateWithStructConfig) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + address: 10.0.0.1 + port_value: 443 + cluster_type: + name: envoy.clusters.custom_static + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + priority: 10 + address: 127.0.0.1 + port_value: 80 + )EOF"; + + const envoy::api::v2::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + auto cluster = ClusterFactoryImplBase::create( + cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, + dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + std::move(outlier_event_logger_), false, *api_); + cluster->initialize([] {}); + + EXPECT_EQ(1UL, cluster->prioritySet().hostSetsPerPriority()[10]->healthyHosts().size()); + EXPECT_EQ("", cluster->prioritySet().hostSetsPerPriority()[10]->hosts()[0]->hostname()); + EXPECT_EQ("127.0.0.1", cluster->prioritySet() + .hostSetsPerPriority()[10] + ->hosts()[0] + ->address() + ->ip() + ->addressAsString()); + EXPECT_EQ(80, + cluster->prioritySet().hostSetsPerPriority()[10]->hosts()[0]->address()->ip()->port()); + EXPECT_FALSE(cluster->info()->addedViaApi()); +} + +TEST_F(TestStaticClusterImplTest, CreateWithTypedConfig) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + address: 10.0.0.1 + port_value: 443 + cluster_type: + name: envoy.clusters.custom_static + typed_config: + "@type": type.googleapis.com/test.integration.clusters.CustomStaticConfig + priority: 10 + address: 127.0.0.1 + port_value: 80 + )EOF"; + + const envoy::api::v2::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + auto cluster = ClusterFactoryImplBase::create( + cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, + dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + std::move(outlier_event_logger_), false, *api_); + cluster->initialize([] {}); + + EXPECT_EQ(1UL, cluster->prioritySet().hostSetsPerPriority()[10]->healthyHosts().size()); + EXPECT_EQ("", cluster->prioritySet().hostSetsPerPriority()[10]->hosts()[0]->hostname()); + EXPECT_EQ("127.0.0.1", cluster->prioritySet() + .hostSetsPerPriority()[10] + ->hosts()[0] + ->address() + ->ip() + ->addressAsString()); + EXPECT_EQ(80, + cluster->prioritySet().hostSetsPerPriority()[10]->hosts()[0]->address()->ip()->port()); + EXPECT_FALSE(cluster->info()->addedViaApi()); +} + +TEST_F(TestStaticClusterImplTest, UnsupportedClusterType) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + address: 10.0.0.1 + port_value: 443 + cluster_type: + name: envoy.clusters.bad_cluster_name + typed_config: + "@type": type.googleapis.com/test.integration.clusters.CustomStaticConfig + priority: 10 + )EOF"; + // the factory is not registered, expect to throw + EXPECT_THROW_WITH_MESSAGE( + { + const envoy::api::v2::Cluster cluster_config = parseClusterFromV2Yaml(yaml); + auto cluster = ClusterFactoryImplBase::create( + cluster_config, cm_, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, + random_, dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + std::move(outlier_event_logger_), false, *api_); + cluster->initialize([] {}); + }, + EnvoyException, + "Didn't find a registered cluster factory implementation for name: " + "'envoy.clusters.bad_cluster_name'"); +} + +} // namespace +} // namespace Upstream +} // namespace Envoy \ No newline at end of file diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 5fee48736d8d4..30c398f85b10e 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -13,6 +13,7 @@ #include "common/network/transport_socket_options_impl.h" #include "common/network/utility.h" #include "common/singleton/manager_impl.h" +#include "common/upstream/cluster_factory_impl.h" #include "common/upstream/cluster_manager_impl.h" #include "extensions/transport_sockets/tls/context_manager_impl.h" @@ -61,10 +62,10 @@ class TestClusterManagerFactory : public ClusterManagerFactory { .WillByDefault(Invoke([&](const envoy::api::v2::Cluster& cluster, ClusterManager& cm, Outlier::EventLoggerSharedPtr outlier_event_logger, bool added_via_api) -> ClusterSharedPtr { - return ClusterImplBase::create(cluster, cm, stats_, tls_, dns_resolver_, - ssl_context_manager_, runtime_, random_, dispatcher_, - log_manager_, local_info_, admin_, singleton_manager_, - outlier_event_logger, added_via_api, *api_); + return ClusterFactoryImplBase::create( + cluster, cm, stats_, tls_, dns_resolver_, ssl_context_manager_, runtime_, random_, + dispatcher_, log_manager_, local_info_, admin_, singleton_manager_, + outlier_event_logger, added_via_api, *api_); })); } @@ -916,6 +917,7 @@ version_info: version3 static_clusters: - cluster: name: "cds_cluster" + type: "STATIC" connect_timeout: 0.25s hosts: - socket_address: @@ -927,6 +929,7 @@ version_info: version3 nanos: 234000000 - cluster: name: "fake_cluster" + type: "STATIC" connect_timeout: 0.25s hosts: - socket_address: @@ -938,6 +941,7 @@ version_info: version3 nanos: 234000000 - cluster: name: "fake_cluster2" + type: "STATIC" connect_timeout: 0.25s hosts: - socket_address: @@ -951,6 +955,7 @@ version_info: version3 - version_info: "version1" cluster: name: "cluster3" + type: "STATIC" connect_timeout: 0.25s hosts: - socket_address: @@ -963,6 +968,7 @@ version_info: version3 - version_info: "version2" cluster: name: "cluster4" + type: "STATIC" connect_timeout: 0.25s hosts: - socket_address: @@ -975,6 +981,7 @@ version_info: version3 - version_info: "version3" cluster: name: "cluster5" + type: "STATIC" connect_timeout: 0.25s hosts: - socket_address: @@ -1088,6 +1095,7 @@ TEST_F(ClusterManagerImplTest, RemoveWarmingCluster) { - version_info: "version1" cluster: name: "fake_cluster" + type: STATIC connect_timeout: 0.25s hosts: - socket_address: diff --git a/test/common/upstream/resource_manager_impl_test.cc b/test/common/upstream/resource_manager_impl_test.cc index ae1c7d356d628..259023a00e577 100644 --- a/test/common/upstream/resource_manager_impl_test.cc +++ b/test/common/upstream/resource_manager_impl_test.cc @@ -19,14 +19,14 @@ namespace { TEST(ResourceManagerImplTest, RuntimeResourceManager) { NiceMock runtime; - NiceMock gauge; + NiceMock bool_indicator; NiceMock store; - ON_CALL(store, gauge(_)).WillByDefault(ReturnRef(gauge)); + ON_CALL(store, boolIndicator(_)).WillByDefault(ReturnRef(bool_indicator)); ResourceManagerImpl resource_manager( runtime, "circuit_breakers.runtime_resource_manager_test.default.", 0, 0, 0, 1, - ClusterCircuitBreakersStats{ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_GAUGE(store))}); + ClusterCircuitBreakersStats{ALL_CLUSTER_CIRCUIT_BREAKERS_STATS(POOL_BOOL_INDICATOR(store))}); EXPECT_CALL( runtime.snapshot_, diff --git a/test/config/utility.cc b/test/config/utility.cc index 84e76a6579dfa..27c0e4cc184f3 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -148,6 +148,7 @@ void ConfigHelper::finalize(const std::vector& ports) { uint32_t port_idx = 0; bool eds_hosts = false; + bool custom_cluster = false; auto* static_resources = bootstrap_.mutable_static_resources(); const auto tap_path = TestEnvironment::getOptionalEnvVar("TAP_PATH"); if (tap_path) { @@ -176,6 +177,8 @@ void ConfigHelper::finalize(const std::vector& ports) { auto* cluster = static_resources->mutable_clusters(i); if (cluster->type() == envoy::api::v2::Cluster::EDS) { eds_hosts = true; + } else if (cluster->has_cluster_type()) { + custom_cluster = true; } else { for (int j = 0; j < cluster->hosts_size(); ++j) { if (cluster->mutable_hosts(j)->has_socket_address()) { @@ -213,7 +216,7 @@ void ConfigHelper::finalize(const std::vector& ports) { *cluster->mutable_transport_socket(), tls_config); } } - ASSERT(port_idx == ports.size() || eds_hosts); + ASSERT(port_idx == ports.size() || eds_hosts || custom_cluster); if (!connect_timeout_set_) { #ifdef __APPLE__ diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index f615c39b1c7b8..1e5136b96e249 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -211,6 +211,28 @@ class AdminRequestTest : public MainCommonTest { } } + // Wait until Envoy is inside the main server run loop proper. Before entering, Envoy runs any + // pending post callbacks, so it's not reliable to use adminRequest() or post() to do this. + // Generally, tests should not depend on this for correctness, but as a result of + // https://github.com/libevent/libevent/issues/779 we need to for TSAN. This is because the entry + // to event_base_loop() is where the signal base race occurs, but once we're in that loop in + // blocking mode, we're safe to take signals. + // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is fixed. + void waitForEnvoyRun() { + absl::Notification done; + main_common_->dispatcherForTest().post([this, &done] { + struct Sacrifice : Event::DeferredDeletable { + Sacrifice(absl::Notification& notify) : notify_(notify) {} + ~Sacrifice() { notify_.Notify(); } + absl::Notification& notify_; + }; + auto sacrifice = std::make_unique(done); + // Wait for a deferred delete cleanup, this only happens in the main server run loop. + main_common_->dispatcherForTest().deferredDelete(std::move(sacrifice)); + }); + done.WaitForNotification(); + } + // Having triggered Envoy to quit (via signal or /quitquitquit), this blocks until Envoy exits. bool waitForEnvoyToExit() { finished_.WaitForNotification(); @@ -245,6 +267,9 @@ TEST_P(AdminRequestTest, AdminRequestGetStatsAndQuit) { TEST_P(AdminRequestTest, AdminRequestGetStatsAndKill) { startEnvoy(); started_.WaitForNotification(); + // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is + // fixed, started_ will then become our real synchronization point. + waitForEnvoyRun(); EXPECT_THAT(adminRequest("/stats", "GET"), HasSubstr("access_log_file.reopen_failed")); kill(getpid(), SIGTERM); EXPECT_TRUE(waitForEnvoyToExit()); @@ -255,6 +280,9 @@ TEST_P(AdminRequestTest, AdminRequestGetStatsAndKill) { TEST_P(AdminRequestTest, AdminRequestGetStatsAndCtrlC) { startEnvoy(); started_.WaitForNotification(); + // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is + // fixed, started_ will then become our real synchronization point. + waitForEnvoyRun(); EXPECT_THAT(adminRequest("/stats", "GET"), HasSubstr("access_log_file.reopen_failed")); kill(getpid(), SIGINT); EXPECT_TRUE(waitForEnvoyToExit()); @@ -263,6 +291,9 @@ TEST_P(AdminRequestTest, AdminRequestGetStatsAndCtrlC) { TEST_P(AdminRequestTest, AdminRequestContentionDisabled) { startEnvoy(); started_.WaitForNotification(); + // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is + // fixed, started_ will then become our real synchronization point. + waitForEnvoyRun(); EXPECT_THAT(adminRequest("/contention", "GET"), HasSubstr("not enabled")); kill(getpid(), SIGTERM); EXPECT_TRUE(waitForEnvoyToExit()); @@ -272,6 +303,9 @@ TEST_P(AdminRequestTest, AdminRequestContentionEnabled) { addArg("--enable-mutex-tracing"); startEnvoy(); started_.WaitForNotification(); + // TODO(htuch): Remove when https://github.com/libevent/libevent/issues/779 is + // fixed, started_ will then become our real synchronization point. + waitForEnvoyRun(); // Induce contention to guarantee a non-zero num_contentions count. Thread::TestUtil::ContentionGenerator contention_generator; diff --git a/test/extensions/filters/network/common/redis/BUILD b/test/extensions/filters/network/common/redis/BUILD new file mode 100644 index 0000000000000..8b0f46b1600f6 --- /dev/null +++ b/test/extensions/filters/network/common/redis/BUILD @@ -0,0 +1,60 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_mock", + "envoy_cc_test", + "envoy_cc_test_library", + "envoy_package", +) + +envoy_package() + +envoy_cc_mock( + name = "redis_mocks", + srcs = ["mocks.cc"], + hdrs = ["mocks.h"], + deps = [ + "//source/common/common:assert_lib", + "//source/extensions/filters/network/common/redis:client_lib", + "//source/extensions/filters/network/common/redis:codec_lib", + ], +) + +envoy_cc_test_library( + name = "test_utils_lib", + hdrs = ["test_utils.h"], + deps = [ + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/config/filter/network/redis_proxy/v2:redis_proxy_cc", + ], +) + +envoy_cc_test( + name = "codec_impl_test", + srcs = ["codec_impl_test.cc"], + deps = [ + ":redis_mocks", + "//source/common/buffer:buffer_lib", + "//source/common/common:assert_lib", + "//source/extensions/filters/network/common/redis:codec_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_cc_test( + name = "client_impl_test", + srcs = ["client_impl_test.cc"], + deps = [ + ":redis_mocks", + ":test_utils_lib", + "//source/common/event:dispatcher_lib", + "//source/common/network:utility_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//source/extensions/filters/network/common/redis:client_lib", + "//test/mocks/network:network_mocks", + "//test/mocks/thread_local:thread_local_mocks", + "//test/mocks/upstream:upstream_mocks", + ], +) diff --git a/test/extensions/filters/network/common/redis/client_impl_test.cc b/test/extensions/filters/network/common/redis/client_impl_test.cc new file mode 100644 index 0000000000000..72a0952f45fca --- /dev/null +++ b/test/extensions/filters/network/common/redis/client_impl_test.cc @@ -0,0 +1,386 @@ +#include + +#include "common/buffer/buffer_impl.h" +#include "common/common/assert.h" +#include "common/network/utility.h" +#include "common/upstream/upstream_impl.h" + +#include "extensions/filters/network/common/redis/client_impl.h" + +#include "test/extensions/filters/network/common/redis/mocks.h" +#include "test/extensions/filters/network/common/redis/test_utils.h" +#include "test/mocks/network/mocks.h" +#include "test/mocks/thread_local/mocks.h" +#include "test/mocks/upstream/mocks.h" +#include "test/test_common/printers.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::DoAll; +using testing::Eq; +using testing::InSequence; +using testing::Invoke; +using testing::Ref; +using testing::Return; +using testing::ReturnNew; +using testing::ReturnRef; +using testing::SaveArg; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { +namespace Client { + +class RedisClientImplTest : public testing::Test, public Common::Redis::DecoderFactory { +public: + // Commmon::Redis::DecoderFactory + Common::Redis::DecoderPtr create(Common::Redis::DecoderCallbacks& callbacks) override { + callbacks_ = &callbacks; + return Common::Redis::DecoderPtr{decoder_}; + } + + ~RedisClientImplTest() { + client_.reset(); + + // Make sure all gauges are 0. + for (const Stats::GaugeSharedPtr& gauge : host_->cluster_.stats_store_.gauges()) { + EXPECT_EQ(0U, gauge->value()); + } + for (const Stats::GaugeSharedPtr& gauge : host_->stats_store_.gauges()) { + EXPECT_EQ(0U, gauge->value()); + } + } + + void setup() { + config_ = std::make_unique(createConnPoolSettings()); + finishSetup(); + } + + void setup(std::unique_ptr&& config) { + config_ = std::move(config); + finishSetup(); + } + + void finishSetup() { + upstream_connection_ = new NiceMock(); + Upstream::MockHost::MockCreateConnectionData conn_info; + conn_info.connection_ = upstream_connection_; + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); + EXPECT_CALL(*host_, createConnection_(_, _)).WillOnce(Return(conn_info)); + EXPECT_CALL(*upstream_connection_, addReadFilter(_)) + .WillOnce(SaveArg<0>(&upstream_read_filter_)); + EXPECT_CALL(*upstream_connection_, connect()); + EXPECT_CALL(*upstream_connection_, noDelay(true)); + + client_ = ClientImpl::create(host_, dispatcher_, Common::Redis::EncoderPtr{encoder_}, *this, + *config_); + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_total_.value()); + EXPECT_EQ(1UL, host_->stats_.cx_total_.value()); + + // NOP currently. + upstream_connection_->runHighWatermarkCallbacks(); + upstream_connection_->runLowWatermarkCallbacks(); + } + + void onConnected() { + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); + upstream_connection_->raiseEvent(Network::ConnectionEvent::Connected); + } + + const std::string cluster_name_{"foo"}; + std::shared_ptr host_{new NiceMock()}; + Event::MockDispatcher dispatcher_; + Event::MockTimer* connect_or_op_timer_{new Event::MockTimer(&dispatcher_)}; + MockEncoder* encoder_{new MockEncoder()}; + MockDecoder* decoder_{new MockDecoder()}; + Common::Redis::DecoderCallbacks* callbacks_{}; + NiceMock* upstream_connection_{}; + Network::ReadFilterSharedPtr upstream_read_filter_; + std::unique_ptr config_; + ClientPtr client_; +}; + +TEST_F(RedisClientImplTest, Basic) { + InSequence s; + + setup(); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + + Common::Redis::RespValue request2; + MockPoolCallbacks callbacks2; + EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); + EXPECT_NE(nullptr, handle2); + + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value()); + EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_total_.value()); + EXPECT_EQ(2UL, host_->stats_.rq_active_.value()); + + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + InSequence s; + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + EXPECT_CALL(callbacks1, onResponse_(Ref(response1))); + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response1)); + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + EXPECT_CALL(callbacks2, onResponse_(Ref(response2))); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response2)); + })); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); +} + +TEST_F(RedisClientImplTest, Cancel) { + InSequence s; + + setup(); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + + Common::Redis::RespValue request2; + MockPoolCallbacks callbacks2; + EXPECT_CALL(*encoder_, encode(Ref(request2), _)); + PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); + EXPECT_NE(nullptr, handle2); + + handle1->cancel(); + + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + InSequence s; + + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + EXPECT_CALL(callbacks1, onResponse_(_)).Times(0); + EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response1)); + + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + EXPECT_CALL(callbacks2, onResponse_(Ref(response2))); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); + callbacks_->onRespValue(std::move(response2)); + })); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + client_->close(); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_cancelled_.value()); +} + +TEST_F(RedisClientImplTest, FailAll) { + InSequence s; + + setup(); + + NiceMock connection_callbacks; + client_->addConnectionCallbacks(connection_callbacks); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SERVER_FAILURE)); + EXPECT_CALL(callbacks1, onFailure()); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose)); + upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_with_active_rq_.value()); + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_remote_with_active_rq_.value()); +} + +TEST_F(RedisClientImplTest, FailAllWithCancel) { + InSequence s; + + setup(); + + NiceMock connection_callbacks; + client_->addConnectionCallbacks(connection_callbacks); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + handle1->cancel(); + + EXPECT_CALL(callbacks1, onFailure()).Times(0); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); + upstream_connection_->raiseEvent(Network::ConnectionEvent::LocalClose); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_with_active_rq_.value()); + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_local_with_active_rq_.value()); + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_cancelled_.value()); +} + +TEST_F(RedisClientImplTest, ProtocolError) { + InSequence s; + + setup(); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + + Buffer::OwnedImpl fake_data; + EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { + throw Common::Redis::ProtocolError("error"); + })); + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::REQUEST_FAILED)); + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(callbacks1, onFailure()); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + upstream_read_filter_->onData(fake_data, false); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_protocol_error_.value()); + EXPECT_EQ(1UL, host_->stats_.rq_error_.value()); +} + +TEST_F(RedisClientImplTest, ConnectFail) { + InSequence s; + + setup(); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SERVER_FAILURE)); + EXPECT_CALL(callbacks1, onFailure()); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_fail_.value()); + EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value()); +} + +class ConfigOutlierDisabled : public Config { + bool disableOutlierEvents() const override { return true; } + std::chrono::milliseconds opTimeout() const override { return std::chrono::milliseconds(25); } + bool enableHashtagging() const override { return false; } +}; + +TEST_F(RedisClientImplTest, OutlierDisabled) { + InSequence s; + + setup(std::make_unique()); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + EXPECT_CALL(host_->outlier_detector_, putResult(_)).Times(0); + EXPECT_CALL(callbacks1, onFailure()); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_fail_.value()); + EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value()); +} + +TEST_F(RedisClientImplTest, ConnectTimeout) { + InSequence s; + + setup(); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::TIMEOUT)); + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(callbacks1, onFailure()); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + connect_or_op_timer_->callback_(); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_timeout_.value()); + EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value()); +} + +TEST_F(RedisClientImplTest, OpTimeout) { + InSequence s; + + setup(); + + Common::Redis::RespValue request1; + MockPoolCallbacks callbacks1; + EXPECT_CALL(*encoder_, encode(Ref(request1), _)); + PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); + EXPECT_NE(nullptr, handle1); + + onConnected(); + + EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::TIMEOUT)); + EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); + EXPECT_CALL(callbacks1, onFailure()); + EXPECT_CALL(*connect_or_op_timer_, disableTimer()); + connect_or_op_timer_->callback_(); + + EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_timeout_.value()); + EXPECT_EQ(1UL, host_->stats_.rq_timeout_.value()); +} + +TEST(RedisClientFactoryImplTest, Basic) { + ClientFactoryImpl factory; + Upstream::MockHost::MockCreateConnectionData conn_info; + conn_info.connection_ = new NiceMock(); + std::shared_ptr host(new NiceMock()); + EXPECT_CALL(*host, createConnection_(_, _)).WillOnce(Return(conn_info)); + NiceMock dispatcher; + ConfigImpl config(createConnPoolSettings()); + ClientPtr client = factory.create(host, dispatcher, config); + client->close(); +} + +} // namespace Client +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/redis_proxy/codec_impl_test.cc b/test/extensions/filters/network/common/redis/codec_impl_test.cc similarity index 90% rename from test/extensions/filters/network/redis_proxy/codec_impl_test.cc rename to test/extensions/filters/network/common/redis/codec_impl_test.cc index 9ecf214707635..60f8c72eb8f05 100644 --- a/test/extensions/filters/network/redis_proxy/codec_impl_test.cc +++ b/test/extensions/filters/network/common/redis/codec_impl_test.cc @@ -3,9 +3,9 @@ #include "common/buffer/buffer_impl.h" #include "common/common/assert.h" -#include "extensions/filters/network/redis_proxy/codec_impl.h" +#include "extensions/filters/network/common/redis/codec_impl.h" -#include "test/extensions/filters/network/redis_proxy/mocks.h" +#include "test/extensions/filters/network/common/redis/mocks.h" #include "test/test_common/printers.h" #include "test/test_common/utility.h" @@ -14,7 +14,8 @@ namespace Envoy { namespace Extensions { namespace NetworkFilters { -namespace RedisProxy { +namespace Common { +namespace Redis { class RedisEncoderDecoderImplTest : public testing::Test, public DecoderCallbacks { public: @@ -65,6 +66,18 @@ TEST_F(RedisEncoderDecoderImplTest, SimpleString) { EXPECT_EQ(0UL, buffer_.length()); } +TEST_F(RedisEncoderDecoderImplTest, BulkString) { + RespValue value; + value.type(RespType::BulkString); + value.asString() = "bulk string"; + EXPECT_EQ("\"bulk string\"", value.toString()); + encoder_.encode(value, buffer_); + EXPECT_EQ("$11\r\nbulk string\r\n", buffer_.toString()); + decoder_.decode(buffer_); + EXPECT_EQ(value, *decoded_values_[0]); + EXPECT_EQ(0UL, buffer_.length()); +} + TEST_F(RedisEncoderDecoderImplTest, Integer) { RespValue value; value.type(RespType::Integer); @@ -188,7 +201,8 @@ TEST_F(RedisEncoderDecoderImplTest, InvalidBulkStringExpectLF) { EXPECT_THROW(decoder_.decode(buffer_), ProtocolError); } -} // namespace RedisProxy +} // namespace Redis +} // namespace Common } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy diff --git a/test/extensions/filters/network/common/redis/mocks.cc b/test/extensions/filters/network/common/redis/mocks.cc new file mode 100644 index 0000000000000..9337421294aea --- /dev/null +++ b/test/extensions/filters/network/common/redis/mocks.cc @@ -0,0 +1,96 @@ +#include "mocks.h" + +#include + +#include "common/common/assert.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Invoke; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { + +void PrintTo(const RespValue& value, std::ostream* os) { *os << value.toString(); } + +void PrintTo(const RespValuePtr& value, std::ostream* os) { *os << value->toString(); } + +bool operator==(const RespValue& lhs, const RespValue& rhs) { + if (lhs.type() != rhs.type()) { + return false; + } + + switch (lhs.type()) { + case RespType::Array: { + if (lhs.asArray().size() != rhs.asArray().size()) { + return false; + } + + bool equal = true; + for (uint64_t i = 0; i < lhs.asArray().size(); i++) { + equal &= (lhs.asArray()[i] == rhs.asArray()[i]); + } + + return equal; + } + case RespType::SimpleString: + case RespType::BulkString: + case RespType::Error: { + return lhs.asString() == rhs.asString(); + } + case RespType::Null: { + return true; + } + case RespType::Integer: { + return lhs.asInteger() == rhs.asInteger(); + } + } + + NOT_REACHED_GCOVR_EXCL_LINE; +} + +MockEncoder::MockEncoder() { + ON_CALL(*this, encode(_, _)) + .WillByDefault( + Invoke([this](const Common::Redis::RespValue& value, Buffer::Instance& out) -> void { + real_encoder_.encode(value, out); + })); +} + +MockEncoder::~MockEncoder() {} + +MockDecoder::MockDecoder() {} +MockDecoder::~MockDecoder() {} + +namespace Client { + +MockClient::MockClient() { + ON_CALL(*this, addConnectionCallbacks(_)) + .WillByDefault(Invoke([this](Network::ConnectionCallbacks& callbacks) -> void { + callbacks_.push_back(&callbacks); + })); + ON_CALL(*this, close()).WillByDefault(Invoke([this]() -> void { + raiseEvent(Network::ConnectionEvent::LocalClose); + })); +} + +MockClient::~MockClient() {} + +MockPoolRequest::MockPoolRequest() {} +MockPoolRequest::~MockPoolRequest() {} + +MockPoolCallbacks::MockPoolCallbacks() {} +MockPoolCallbacks::~MockPoolCallbacks() {} + +} // namespace Client + +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/redis/mocks.h b/test/extensions/filters/network/common/redis/mocks.h new file mode 100644 index 0000000000000..1c6f573954361 --- /dev/null +++ b/test/extensions/filters/network/common/redis/mocks.h @@ -0,0 +1,105 @@ +#pragma once + +#include +#include +#include + +#include "extensions/filters/network/common/redis/client_impl.h" +#include "extensions/filters/network/common/redis/codec_impl.h" + +#include "test/test_common/printers.h" + +#include "gmock/gmock.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { + +/** + * Pretty print const RespValue& value + */ + +void PrintTo(const RespValue& value, std::ostream* os); +void PrintTo(const RespValuePtr& value, std::ostream* os); +bool operator==(const RespValue& lhs, const RespValue& rhs); + +class MockEncoder : public Common::Redis::Encoder { +public: + MockEncoder(); + ~MockEncoder(); + + MOCK_METHOD2(encode, void(const Common::Redis::RespValue& value, Buffer::Instance& out)); + +private: + Common::Redis::EncoderImpl real_encoder_; +}; + +class MockDecoder : public Common::Redis::Decoder { +public: + MockDecoder(); + ~MockDecoder(); + + MOCK_METHOD1(decode, void(Buffer::Instance& data)); +}; + +namespace Client { + +class MockClient : public Client { +public: + MockClient(); + ~MockClient(); + + void raiseEvent(Network::ConnectionEvent event) { + for (Network::ConnectionCallbacks* callbacks : callbacks_) { + callbacks->onEvent(event); + } + } + + void runHighWatermarkCallbacks() { + for (auto* callback : callbacks_) { + callback->onAboveWriteBufferHighWatermark(); + } + } + + void runLowWatermarkCallbacks() { + for (auto* callback : callbacks_) { + callback->onBelowWriteBufferLowWatermark(); + } + } + + MOCK_METHOD1(addConnectionCallbacks, void(Network::ConnectionCallbacks& callbacks)); + MOCK_METHOD0(close, void()); + MOCK_METHOD2(makeRequest, + PoolRequest*(const Common::Redis::RespValue& request, PoolCallbacks& callbacks)); + + std::list callbacks_; +}; + +class MockPoolRequest : public PoolRequest { +public: + MockPoolRequest(); + ~MockPoolRequest(); + + MOCK_METHOD0(cancel, void()); +}; + +class MockPoolCallbacks : public PoolCallbacks { +public: + MockPoolCallbacks(); + ~MockPoolCallbacks(); + + void onResponse(Common::Redis::RespValuePtr&& value) override { onResponse_(value); } + + MOCK_METHOD1(onResponse_, void(Common::Redis::RespValuePtr& value)); + MOCK_METHOD0(onFailure, void()); +}; + +} // namespace Client + +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/common/redis/test_utils.h b/test/extensions/filters/network/common/redis/test_utils.h new file mode 100644 index 0000000000000..e1c418897e48b --- /dev/null +++ b/test/extensions/filters/network/common/redis/test_utils.h @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include + +#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.h" + +#include "common/protobuf/utility.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Common { +namespace Redis { +namespace Client { + +inline envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings +createConnPoolSettings() { + envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings setting{}; + setting.mutable_op_timeout()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(20)); + setting.set_enable_hashtagging(true); + return setting; +} + +} // namespace Client +} // namespace Redis +} // namespace Common +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index c8e502a5eed6c..d47ac26fd3bb5 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -152,7 +152,7 @@ TEST_F(HttpConnectionManagerConfigTest, UnixSocketInternalAddress) { EXPECT_FALSE(config.internalAddressConfig().isInternalAddress(externalIpAddress)); } -TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersSizeDefault) { +TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbDefault) { const std::string yaml_string = R"EOF( stat_prefix: ingress_http route_config: @@ -166,7 +166,7 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersSizeDefault) { EXPECT_EQ(60, config.maxRequestHeadersKb()); } -TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersSizeConfigured) { +TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbConfigured) { const std::string yaml_string = R"EOF( stat_prefix: ingress_http max_request_headers_kb: 16 @@ -181,6 +181,21 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersSizeConfigured) { EXPECT_EQ(16, config.maxRequestHeadersKb()); } +TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeadersKbMaxConfigurable) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + max_request_headers_kb: 96 + route_config: + name: local_route + http_filters: + - name: envoy.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromV2Yaml(yaml_string), context_, + date_provider_, route_config_provider_manager_); + EXPECT_EQ(96, config.maxRequestHeadersKb()); +} + // Validated that an explicit zero stream idle timeout disables. TEST_F(HttpConnectionManagerConfigTest, DisabledStreamIdleTimeout) { const std::string yaml_string = R"EOF( diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index e0a14d9cd3475..492404c41547e 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -13,19 +13,6 @@ load( envoy_package() -envoy_extension_cc_test( - name = "codec_impl_test", - srcs = ["codec_impl_test.cc"], - extension_name = "envoy.filters.network.redis_proxy", - deps = [ - ":redis_mocks", - "//source/common/buffer:buffer_lib", - "//source/common/common:assert_lib", - "//source/extensions/filters/network/redis_proxy:codec_lib", - "//test/test_common:utility_lib", - ], -) - envoy_extension_cc_test( name = "command_splitter_impl_test", srcs = ["command_splitter_impl_test.cc"], @@ -35,6 +22,7 @@ envoy_extension_cc_test( "//source/common/stats:isolated_store_lib", "//source/common/stats:stats_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_lib", + "//test/extensions/filters/network/common/redis:redis_mocks", "//test/mocks:common_lib", "//test/mocks/stats:stats_mocks", "//test/test_common:simulated_time_system_lib", @@ -52,6 +40,8 @@ envoy_extension_cc_test( "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", "//source/extensions/filters/network/redis_proxy:conn_pool_lib", + "//test/extensions/filters/network/common/redis:redis_mocks", + "//test/extensions/filters/network/common/redis:test_utils_lib", "//test/mocks/network:network_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:upstream_mocks", @@ -67,6 +57,7 @@ envoy_extension_cc_test( "//source/common/config:filter_json_lib", "//source/common/event:dispatcher_lib", "//source/extensions/filters/network/redis_proxy:proxy_filter_lib", + "//test/extensions/filters/network/common/redis:redis_mocks", "//test/mocks:common_lib", "//test/mocks/network:network_mocks", "//test/mocks/upstream:upstream_mocks", @@ -80,7 +71,8 @@ envoy_cc_mock( hdrs = ["mocks.h"], deps = [ "//source/common/common:assert_lib", - "//source/extensions/filters/network/redis_proxy:codec_lib", + "//source/extensions/filters/network/common/redis:client_interface", + "//source/extensions/filters/network/common/redis:codec_lib", "//source/extensions/filters/network/redis_proxy:command_splitter_interface", "//source/extensions/filters/network/redis_proxy:conn_pool_interface", ], diff --git a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc index a84bef53253f1..efbc1abc623ab 100644 --- a/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_lookup_speed_test.cc @@ -9,8 +9,8 @@ #include "common/common/fmt.h" #include "common/stats/isolated_store_impl.h" +#include "extensions/filters/network/common/redis/supported_commands.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" -#include "extensions/filters/network/redis_proxy/supported_commands.h" #include "test/test_common/printers.h" #include "test/test_common/simulated_time_system.h" @@ -27,37 +27,39 @@ class NoOpSplitCallbacks : public CommandSplitter::SplitCallbacks { NoOpSplitCallbacks() {} ~NoOpSplitCallbacks() {} - void onResponse(RespValuePtr&&) override {} + void onResponse(Common::Redis::RespValuePtr&&) override {} }; class NullInstanceImpl : public ConnPool::Instance { - ConnPool::PoolRequest* makeRequest(const std::string&, const RespValue&, - ConnPool::PoolCallbacks&) override { + Common::Redis::Client::PoolRequest* makeRequest(const std::string&, + const Common::Redis::RespValue&, + Common::Redis::Client::PoolCallbacks&) override { return nullptr; } }; class CommandLookUpSpeedTest { public: - void makeBulkStringArray(RespValue& value, const std::vector& strings) { - std::vector values(strings.size()); + void makeBulkStringArray(Common::Redis::RespValue& value, + const std::vector& strings) { + std::vector values(strings.size()); for (uint64_t i = 0; i < strings.size(); i++) { - values[i].type(RespType::BulkString); + values[i].type(Common::Redis::RespType::BulkString); values[i].asString() = strings[i]; } - value.type(RespType::Array); + value.type(Common::Redis::RespType::Array); value.asArray().swap(values); } void makeRequests() { - RespValue request; - for (const std::string& command : SupportedCommands::simpleCommands()) { + Common::Redis::RespValue request; + for (const std::string& command : Common::Redis::SupportedCommands::simpleCommands()) { makeBulkStringArray(request, {command, "hello"}); splitter_.makeRequest(request, callbacks_); } - for (const std::string& command : SupportedCommands::evalCommands()) { + for (const std::string& command : Common::Redis::SupportedCommands::evalCommands()) { makeBulkStringArray(request, {command, "hello"}); splitter_.makeRequest(request, callbacks_); } diff --git a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc index 2fc502a26ebfa..697ec403a8f2b 100644 --- a/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/command_splitter_impl_test.cc @@ -6,9 +6,10 @@ #include "common/common/fmt.h" #include "common/stats/isolated_store_impl.h" +#include "extensions/filters/network/common/redis/supported_commands.h" #include "extensions/filters/network/redis_proxy/command_splitter_impl.h" -#include "extensions/filters/network/redis_proxy/supported_commands.h" +#include "test/extensions/filters/network/common/redis/mocks.h" #include "test/extensions/filters/network/redis_proxy/mocks.h" #include "test/mocks/common.h" #include "test/mocks/stats/mocks.h" @@ -37,14 +38,15 @@ namespace CommandSplitter { class RedisCommandSplitterImplTest : public testing::Test { public: - void makeBulkStringArray(RespValue& value, const std::vector& strings) { - std::vector values(strings.size()); + void makeBulkStringArray(Common::Redis::RespValue& value, + const std::vector& strings) { + std::vector values(strings.size()); for (uint64_t i = 0; i < strings.size(); i++) { - values[i].type(RespType::BulkString); + values[i].type(Common::Redis::RespType::BulkString); values[i].asString() = strings[i]; } - value.type(RespType::Array); + value.type(Common::Redis::RespType::Array); value.asArray().swap(values); } @@ -57,22 +59,22 @@ class RedisCommandSplitterImplTest : public testing::Test { }; TEST_F(RedisCommandSplitterImplTest, InvalidRequestNotArray) { - RespValue response; - response.type(RespType::Error); + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::Error); response.asString() = Response::get().InvalidRequest; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - RespValue request; + Common::Redis::RespValue request; EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); EXPECT_EQ(1UL, store_.counter("redis.foo.splitter.invalid_request").value()); } TEST_F(RedisCommandSplitterImplTest, InvalidRequestArrayTooSmall) { - RespValue response; - response.type(RespType::Error); + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::Error); response.asString() = Response::get().InvalidRequest; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {"incr"}); EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); @@ -80,24 +82,24 @@ TEST_F(RedisCommandSplitterImplTest, InvalidRequestArrayTooSmall) { } TEST_F(RedisCommandSplitterImplTest, InvalidRequestArrayNotStrings) { - RespValue response; - response.type(RespType::Error); + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::Error); response.asString() = Response::get().InvalidRequest; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {"incr", ""}); - request.asArray()[1].type(RespType::Null); + request.asArray()[1].type(Common::Redis::RespType::Null); EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); EXPECT_EQ(1UL, store_.counter("redis.foo.splitter.invalid_request").value()); } TEST_F(RedisCommandSplitterImplTest, UnsupportedCommand) { - RespValue response; - response.type(RespType::Error); + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::Error); response.asString() = "unsupported command 'newcommand'"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {"newcommand", "hello"}); EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); @@ -107,29 +109,29 @@ TEST_F(RedisCommandSplitterImplTest, UnsupportedCommand) { class RedisSingleServerRequestTest : public RedisCommandSplitterImplTest, public testing::WithParamInterface { public: - void makeRequest(const std::string& hash_key, const RespValue& request) { + void makeRequest(const std::string& hash_key, const Common::Redis::RespValue& request) { EXPECT_CALL(*conn_pool_, makeRequest(hash_key, Ref(request), _)) .WillOnce(DoAll(WithArg<2>(SaveArgAddress(&pool_callbacks_)), Return(&pool_request_))); handle_ = splitter_.makeRequest(request, callbacks_); } void fail() { - RespValue response; - response.type(RespType::Error); + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::Error); response.asString() = Response::get().UpstreamFailure; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); pool_callbacks_->onFailure(); } void respond() { - RespValuePtr response1(new RespValue()); - RespValue* response1_ptr = response1.get(); + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + Common::Redis::RespValue* response1_ptr = response1.get(); EXPECT_CALL(callbacks_, onResponse_(PointeesEq(response1_ptr))); pool_callbacks_->onResponse(std::move(response1)); } - ConnPool::PoolCallbacks* pool_callbacks_; - ConnPool::MockPoolRequest pool_request_; + Common::Redis::Client::PoolCallbacks* pool_callbacks_; + Common::Redis::Client::MockPoolRequest pool_request_; }; TEST_P(RedisSingleServerRequestTest, Success) { @@ -139,7 +141,7 @@ TEST_P(RedisSingleServerRequestTest, Success) { std::string lower_command(GetParam()); table.toLowerCase(lower_command); - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {GetParam(), "hello"}); makeRequest("hello", request); EXPECT_NE(nullptr, handle_); @@ -159,7 +161,7 @@ TEST_P(RedisSingleServerRequestTest, Success) { TEST_P(RedisSingleServerRequestTest, SuccessMultipleArgs) { InSequence s; - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {GetParam(), "hello", "123", "world"}); makeRequest("hello", request); EXPECT_NE(nullptr, handle_); @@ -183,7 +185,7 @@ TEST_P(RedisSingleServerRequestTest, SuccessMultipleArgs) { TEST_P(RedisSingleServerRequestTest, Fail) { InSequence s; - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {GetParam(), "hello"}); makeRequest("hello", request); EXPECT_NE(nullptr, handle_); @@ -206,7 +208,7 @@ TEST_P(RedisSingleServerRequestTest, Fail) { TEST_P(RedisSingleServerRequestTest, Cancel) { InSequence s; - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {GetParam(), "hello"}); makeRequest("hello", request); EXPECT_NE(nullptr, handle_); @@ -218,11 +220,11 @@ TEST_P(RedisSingleServerRequestTest, Cancel) { TEST_P(RedisSingleServerRequestTest, NoUpstream) { InSequence s; - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {GetParam(), "hello"}); EXPECT_CALL(*conn_pool_, makeRequest("hello", Ref(request), _)).WillOnce(Return(nullptr)); - RespValue response; - response.type(RespType::Error); + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); handle_ = splitter_.makeRequest(request, callbacks_); @@ -230,7 +232,7 @@ TEST_P(RedisSingleServerRequestTest, NoUpstream) { }; INSTANTIATE_TEST_SUITE_P(RedisSingleServerRequestTest, RedisSingleServerRequestTest, - testing::ValuesIn(SupportedCommands::simpleCommands())); + testing::ValuesIn(Common::Redis::SupportedCommands::simpleCommands())); INSTANTIATE_TEST_SUITE_P(RedisSimpleRequestCommandHandlerMixedCaseTests, RedisSingleServerRequestTest, testing::Values("INCR", "inCrBY")); @@ -238,11 +240,11 @@ INSTANTIATE_TEST_SUITE_P(RedisSimpleRequestCommandHandlerMixedCaseTests, TEST_F(RedisSingleServerRequestTest, PingSuccess) { InSequence s; - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {"ping"}); - RespValue response; - response.type(RespType::SimpleString); + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::SimpleString); response.asString() = "PONG"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); @@ -253,7 +255,7 @@ TEST_F(RedisSingleServerRequestTest, PingSuccess) { TEST_F(RedisSingleServerRequestTest, EvalSuccess) { InSequence s; - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {"eval", "return {ARGV[1]}", "1", "key", "arg"}); makeRequest("key", request); EXPECT_NE(nullptr, handle_); @@ -277,7 +279,7 @@ TEST_F(RedisSingleServerRequestTest, EvalSuccess) { TEST_F(RedisSingleServerRequestTest, EvalShaSuccess) { InSequence s; - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {"EVALSHA", "return {ARGV[1]}", "1", "keykey", "arg"}); makeRequest("keykey", request); EXPECT_NE(nullptr, handle_); @@ -301,9 +303,9 @@ TEST_F(RedisSingleServerRequestTest, EvalShaSuccess) { TEST_F(RedisSingleServerRequestTest, EvalWrongNumberOfArgs) { InSequence s; - RespValue request; - RespValue response; - response.type(RespType::Error); + Common::Redis::RespValue request; + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::Error); response.asString() = "wrong number of arguments for 'eval' command"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); @@ -319,11 +321,11 @@ TEST_F(RedisSingleServerRequestTest, EvalWrongNumberOfArgs) { TEST_F(RedisSingleServerRequestTest, EvalNoUpstream) { InSequence s; - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {"eval", "return {ARGV[1]}", "1", "key", "arg"}); EXPECT_CALL(*conn_pool_, makeRequest("key", Ref(request), _)).WillOnce(Return(nullptr)); - RespValue response; - response.type(RespType::Error); + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::Error); response.asString() = Response::get().NoUpstreamHost; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); handle_ = splitter_.makeRequest(request, callbacks_); @@ -341,17 +343,17 @@ class RedisMGETCommandHandlerTest : public RedisCommandSplitterImplTest { request_strings.push_back(std::to_string(i)); } - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, request_strings); - std::vector tmp_expected_requests(num_gets); + std::vector tmp_expected_requests(num_gets); expected_requests_.swap(tmp_expected_requests); pool_callbacks_.resize(num_gets); - std::vector tmp_pool_requests(num_gets); + std::vector tmp_pool_requests(num_gets); pool_requests_.swap(tmp_pool_requests); for (uint32_t i = 0; i < num_gets; i++) { makeBulkStringArray(expected_requests_[i], {"get", std::to_string(i)}); - ConnPool::PoolRequest* request_to_use = nullptr; + Common::Redis::Client::PoolRequest* request_to_use = nullptr; if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) == null_handle_indexes.end()) { request_to_use = &pool_requests_[i]; @@ -363,9 +365,9 @@ class RedisMGETCommandHandlerTest : public RedisCommandSplitterImplTest { handle_ = splitter_.makeRequest(request, callbacks_); } - std::vector expected_requests_; - std::vector pool_callbacks_; - std::vector pool_requests_; + std::vector expected_requests_; + std::vector pool_callbacks_; + std::vector pool_requests_; }; TEST_F(RedisMGETCommandHandlerTest, Normal) { @@ -374,22 +376,22 @@ TEST_F(RedisMGETCommandHandlerTest, Normal) { setup(2, {}); EXPECT_NE(nullptr, handle_); - RespValue expected_response; - expected_response.type(RespType::Array); - std::vector elements(2); - elements[0].type(RespType::BulkString); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Array); + std::vector elements(2); + elements[0].type(Common::Redis::RespType::BulkString); elements[0].asString() = "response"; - elements[1].type(RespType::BulkString); + elements[1].type(Common::Redis::RespType::BulkString); elements[1].asString() = "5"; expected_response.asArray().swap(elements); - RespValuePtr response2(new RespValue()); - response2->type(RespType::BulkString); + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::BulkString); response2->asString() = "5"; pool_callbacks_[1]->onResponse(std::move(response2)); - RespValuePtr response1(new RespValue()); - response1->type(RespType::BulkString); + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::BulkString); response1->asString() = "response"; time_system_.setMonotonicTime(std::chrono::milliseconds(10)); EXPECT_CALL(store_, deliverHistogramToSinks( @@ -407,18 +409,18 @@ TEST_F(RedisMGETCommandHandlerTest, NormalWithNull) { setup(2, {}); EXPECT_NE(nullptr, handle_); - RespValue expected_response; - expected_response.type(RespType::Array); - std::vector elements(2); - elements[0].type(RespType::BulkString); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Array); + std::vector elements(2); + elements[0].type(Common::Redis::RespType::BulkString); elements[0].asString() = "response"; expected_response.asArray().swap(elements); - RespValuePtr response2(new RespValue()); + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); pool_callbacks_[1]->onResponse(std::move(response2)); - RespValuePtr response1(new RespValue()); - response1->type(RespType::BulkString); + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::BulkString); response1->asString() = "response"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); pool_callbacks_[0]->onResponse(std::move(response1)); @@ -427,12 +429,12 @@ TEST_F(RedisMGETCommandHandlerTest, NormalWithNull) { TEST_F(RedisMGETCommandHandlerTest, NoUpstreamHostForAll) { // No InSequence to avoid making setup() more complicated. - RespValue expected_response; - expected_response.type(RespType::Array); - std::vector elements(2); - elements[0].type(RespType::Error); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Array); + std::vector elements(2); + elements[0].type(Common::Redis::RespType::Error); elements[0].asString() = Response::get().NoUpstreamHost; - elements[1].type(RespType::Error); + elements[1].type(Common::Redis::RespType::Error); elements[1].asString() = Response::get().NoUpstreamHost; expected_response.asArray().swap(elements); @@ -449,12 +451,12 @@ TEST_F(RedisMGETCommandHandlerTest, NoUpstreamHostForOne) { setup(2, {0}); EXPECT_NE(nullptr, handle_); - RespValue expected_response; - expected_response.type(RespType::Array); - std::vector elements(2); - elements[0].type(RespType::Error); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Array); + std::vector elements(2); + elements[0].type(Common::Redis::RespType::Error); elements[0].asString() = Response::get().NoUpstreamHost; - elements[1].type(RespType::Error); + elements[1].type(Common::Redis::RespType::Error); elements[1].asString() = Response::get().UpstreamFailure; expected_response.asArray().swap(elements); @@ -470,19 +472,19 @@ TEST_F(RedisMGETCommandHandlerTest, Failure) { setup(2, {}); EXPECT_NE(nullptr, handle_); - RespValue expected_response; - expected_response.type(RespType::Array); - std::vector elements(2); - elements[0].type(RespType::BulkString); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Array); + std::vector elements(2); + elements[0].type(Common::Redis::RespType::BulkString); elements[0].asString() = "response"; - elements[1].type(RespType::Error); + elements[1].type(Common::Redis::RespType::Error); elements[1].asString() = Response::get().UpstreamFailure; expected_response.asArray().swap(elements); pool_callbacks_[1]->onFailure(); - RespValuePtr response1(new RespValue()); - response1->type(RespType::BulkString); + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::BulkString); response1->asString() = "response"; time_system_.setMonotonicTime(std::chrono::milliseconds(5)); EXPECT_CALL(store_, deliverHistogramToSinks( @@ -499,19 +501,19 @@ TEST_F(RedisMGETCommandHandlerTest, InvalidUpstreamResponse) { setup(2, {}); EXPECT_NE(nullptr, handle_); - RespValue expected_response; - expected_response.type(RespType::Array); - std::vector elements(2); - elements[0].type(RespType::Error); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Array); + std::vector elements(2); + elements[0].type(Common::Redis::RespType::Error); elements[0].asString() = Response::get().UpstreamProtocolError; - elements[1].type(RespType::Error); + elements[1].type(Common::Redis::RespType::Error); elements[1].asString() = Response::get().UpstreamFailure; expected_response.asArray().swap(elements); pool_callbacks_[1]->onFailure(); - RespValuePtr response1(new RespValue()); - response1->type(RespType::Integer); + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::Integer); response1->asInteger() = 5; time_system_.setMonotonicTime(std::chrono::milliseconds(10)); EXPECT_CALL(store_, deliverHistogramToSinks( @@ -544,17 +546,17 @@ class RedisMSETCommandHandlerTest : public RedisCommandSplitterImplTest { request_strings.push_back(std::to_string(i)); } - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, request_strings); - std::vector tmp_expected_requests(num_sets); + std::vector tmp_expected_requests(num_sets); expected_requests_.swap(tmp_expected_requests); pool_callbacks_.resize(num_sets); - std::vector tmp_pool_requests(num_sets); + std::vector tmp_pool_requests(num_sets); pool_requests_.swap(tmp_pool_requests); for (uint32_t i = 0; i < num_sets; i++) { makeBulkStringArray(expected_requests_[i], {"set", std::to_string(i), std::to_string(i)}); - ConnPool::PoolRequest* request_to_use = nullptr; + Common::Redis::Client::PoolRequest* request_to_use = nullptr; if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) == null_handle_indexes.end()) { request_to_use = &pool_requests_[i]; @@ -566,9 +568,9 @@ class RedisMSETCommandHandlerTest : public RedisCommandSplitterImplTest { handle_ = splitter_.makeRequest(request, callbacks_); } - std::vector expected_requests_; - std::vector pool_callbacks_; - std::vector pool_requests_; + std::vector expected_requests_; + std::vector pool_callbacks_; + std::vector pool_requests_; }; TEST_F(RedisMSETCommandHandlerTest, Normal) { @@ -577,17 +579,17 @@ TEST_F(RedisMSETCommandHandlerTest, Normal) { setup(2, {}); EXPECT_NE(nullptr, handle_); - RespValue expected_response; - expected_response.type(RespType::SimpleString); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::SimpleString); expected_response.asString() = Response::get().OK; - RespValuePtr response2(new RespValue()); - response2->type(RespType::SimpleString); + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::SimpleString); response2->asString() = Response::get().OK; pool_callbacks_[1]->onResponse(std::move(response2)); - RespValuePtr response1(new RespValue()); - response1->type(RespType::SimpleString); + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::SimpleString); response1->asString() = Response::get().OK; time_system_.setMonotonicTime(std::chrono::milliseconds(10)); @@ -603,8 +605,8 @@ TEST_F(RedisMSETCommandHandlerTest, Normal) { TEST_F(RedisMSETCommandHandlerTest, NoUpstreamHostForAll) { // No InSequence to avoid making setup() more complicated. - RespValue expected_response; - expected_response.type(RespType::Error); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Error); expected_response.asString() = "finished with 2 error(s)"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); @@ -620,12 +622,12 @@ TEST_F(RedisMSETCommandHandlerTest, NoUpstreamHostForOne) { setup(2, {0}); EXPECT_NE(nullptr, handle_); - RespValue expected_response; - expected_response.type(RespType::Error); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Error); expected_response.asString() = "finished with 1 error(s)"; - RespValuePtr response2(new RespValue()); - response2->type(RespType::SimpleString); + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::SimpleString); response2->asString() = Response::get().OK; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); pool_callbacks_[1]->onResponse(std::move(response2)); @@ -647,11 +649,11 @@ TEST_F(RedisMSETCommandHandlerTest, Cancel) { TEST_F(RedisMSETCommandHandlerTest, WrongNumberOfArgs) { InSequence s; - RespValue response; - response.type(RespType::Error); + Common::Redis::RespValue response; + response.type(Common::Redis::RespType::Error); response.asString() = "wrong number of arguments for 'mset' command"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&response))); - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, {"mset", "foo", "bar", "fizz"}); EXPECT_EQ(nullptr, splitter_.makeRequest(request, callbacks_)); EXPECT_EQ(1UL, store_.counter("redis.foo.command.mset.total").value()); @@ -667,17 +669,17 @@ class RedisSplitKeysSumResultHandlerTest : public RedisCommandSplitterImplTest, request_strings.push_back(std::to_string(i)); } - RespValue request; + Common::Redis::RespValue request; makeBulkStringArray(request, request_strings); - std::vector tmp_expected_requests(num_commands); + std::vector tmp_expected_requests(num_commands); expected_requests_.swap(tmp_expected_requests); pool_callbacks_.resize(num_commands); - std::vector tmp_pool_requests(num_commands); + std::vector tmp_pool_requests(num_commands); pool_requests_.swap(tmp_pool_requests); for (uint32_t i = 0; i < num_commands; i++) { makeBulkStringArray(expected_requests_[i], {GetParam(), std::to_string(i)}); - ConnPool::PoolRequest* request_to_use = nullptr; + Common::Redis::Client::PoolRequest* request_to_use = nullptr; if (std::find(null_handle_indexes.begin(), null_handle_indexes.end(), i) == null_handle_indexes.end()) { request_to_use = &pool_requests_[i]; @@ -689,9 +691,9 @@ class RedisSplitKeysSumResultHandlerTest : public RedisCommandSplitterImplTest, handle_ = splitter_.makeRequest(request, callbacks_); } - std::vector expected_requests_; - std::vector pool_callbacks_; - std::vector pool_requests_; + std::vector expected_requests_; + std::vector pool_callbacks_; + std::vector pool_requests_; }; TEST_P(RedisSplitKeysSumResultHandlerTest, Normal) { @@ -700,17 +702,17 @@ TEST_P(RedisSplitKeysSumResultHandlerTest, Normal) { setup(2, {}); EXPECT_NE(nullptr, handle_); - RespValue expected_response; - expected_response.type(RespType::Integer); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Integer); expected_response.asInteger() = 2; - RespValuePtr response2(new RespValue()); - response2->type(RespType::Integer); + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::Integer); response2->asInteger() = 1; pool_callbacks_[1]->onResponse(std::move(response2)); - RespValuePtr response1(new RespValue()); - response1->type(RespType::Integer); + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::Integer); response1->asInteger() = 1; time_system_.setMonotonicTime(std::chrono::milliseconds(10)); EXPECT_CALL( @@ -730,17 +732,17 @@ TEST_P(RedisSplitKeysSumResultHandlerTest, NormalOneZero) { setup(2, {}); EXPECT_NE(nullptr, handle_); - RespValue expected_response; - expected_response.type(RespType::Integer); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Integer); expected_response.asInteger() = 1; - RespValuePtr response2(new RespValue()); - response2->type(RespType::Integer); + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + response2->type(Common::Redis::RespType::Integer); response2->asInteger() = 0; pool_callbacks_[1]->onResponse(std::move(response2)); - RespValuePtr response1(new RespValue()); - response1->type(RespType::Integer); + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); + response1->type(Common::Redis::RespType::Integer); response1->asInteger() = 1; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); pool_callbacks_[0]->onResponse(std::move(response1)); @@ -752,8 +754,8 @@ TEST_P(RedisSplitKeysSumResultHandlerTest, NormalOneZero) { TEST_P(RedisSplitKeysSumResultHandlerTest, NoUpstreamHostForAll) { // No InSequence to avoid making setup() more complicated. - RespValue expected_response; - expected_response.type(RespType::Error); + Common::Redis::RespValue expected_response; + expected_response.type(Common::Redis::RespType::Error); expected_response.asString() = "finished with 2 error(s)"; EXPECT_CALL(callbacks_, onResponse_(PointeesEq(&expected_response))); @@ -763,8 +765,9 @@ TEST_P(RedisSplitKeysSumResultHandlerTest, NoUpstreamHostForAll) { EXPECT_EQ(1UL, store_.counter("redis.foo.command." + GetParam() + ".error").value()); }; -INSTANTIATE_TEST_SUITE_P(RedisSplitKeysSumResultHandlerTest, RedisSplitKeysSumResultHandlerTest, - testing::ValuesIn(SupportedCommands::hashMultipleSumResultCommands())); +INSTANTIATE_TEST_SUITE_P( + RedisSplitKeysSumResultHandlerTest, RedisSplitKeysSumResultHandlerTest, + testing::ValuesIn(Common::Redis::SupportedCommands::hashMultipleSumResultCommands())); } // namespace CommandSplitter } // namespace RedisProxy diff --git a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc index c1fe324728983..bd267cd1670d2 100644 --- a/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/conn_pool_impl_test.cc @@ -6,6 +6,8 @@ #include "extensions/filters/network/redis_proxy/conn_pool_impl.h" +#include "test/extensions/filters/network/common/redis/mocks.h" +#include "test/extensions/filters/network/common/redis/test_utils.h" #include "test/extensions/filters/network/redis_proxy/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/thread_local/mocks.h" @@ -32,357 +34,7 @@ namespace NetworkFilters { namespace RedisProxy { namespace ConnPool { -envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings -createConnPoolSettings() { - envoy::config::filter::network::redis_proxy::v2::RedisProxy::ConnPoolSettings setting{}; - setting.mutable_op_timeout()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(20)); - setting.set_enable_hashtagging(true); - return setting; -} - -class RedisClientImplTest : public testing::Test, public DecoderFactory { -public: - // RedisProxy::DecoderFactory - DecoderPtr create(DecoderCallbacks& callbacks) override { - callbacks_ = &callbacks; - return DecoderPtr{decoder_}; - } - - ~RedisClientImplTest() { - client_.reset(); - - // Make sure all gauges are 0. - for (const Stats::GaugeSharedPtr& gauge : host_->cluster_.stats_store_.gauges()) { - EXPECT_EQ(0U, gauge->value()); - } - for (const Stats::GaugeSharedPtr& gauge : host_->stats_store_.gauges()) { - EXPECT_EQ(0U, gauge->value()); - } - } - - void setup() { - config_ = std::make_unique(createConnPoolSettings()); - finishSetup(); - } - - void setup(std::unique_ptr&& config) { - config_ = std::move(config); - finishSetup(); - } - - void finishSetup() { - upstream_connection_ = new NiceMock(); - Upstream::MockHost::MockCreateConnectionData conn_info; - conn_info.connection_ = upstream_connection_; - EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); - EXPECT_CALL(*host_, createConnection_(_, _)).WillOnce(Return(conn_info)); - EXPECT_CALL(*upstream_connection_, addReadFilter(_)) - .WillOnce(SaveArg<0>(&upstream_read_filter_)); - EXPECT_CALL(*upstream_connection_, connect()); - EXPECT_CALL(*upstream_connection_, noDelay(true)); - - client_ = ClientImpl::create(host_, dispatcher_, EncoderPtr{encoder_}, *this, *config_); - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_total_.value()); - EXPECT_EQ(1UL, host_->stats_.cx_total_.value()); - - // NOP currently. - upstream_connection_->runHighWatermarkCallbacks(); - upstream_connection_->runLowWatermarkCallbacks(); - } - - void onConnected() { - EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); - upstream_connection_->raiseEvent(Network::ConnectionEvent::Connected); - } - - const std::string cluster_name_{"foo"}; - std::shared_ptr host_{new NiceMock()}; - Event::MockDispatcher dispatcher_; - Event::MockTimer* connect_or_op_timer_{new Event::MockTimer(&dispatcher_)}; - MockEncoder* encoder_{new MockEncoder()}; - MockDecoder* decoder_{new MockDecoder()}; - DecoderCallbacks* callbacks_{}; - NiceMock* upstream_connection_{}; - Network::ReadFilterSharedPtr upstream_read_filter_; - std::unique_ptr config_; - ClientPtr client_; -}; - -TEST_F(RedisClientImplTest, Basic) { - InSequence s; - - setup(); - - RespValue request1; - MockPoolCallbacks callbacks1; - EXPECT_CALL(*encoder_, encode(Ref(request1), _)); - PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); - EXPECT_NE(nullptr, handle1); - - onConnected(); - - RespValue request2; - MockPoolCallbacks callbacks2; - EXPECT_CALL(*encoder_, encode(Ref(request2), _)); - PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); - EXPECT_NE(nullptr, handle2); - - EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value()); - EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value()); - EXPECT_EQ(2UL, host_->stats_.rq_total_.value()); - EXPECT_EQ(2UL, host_->stats_.rq_active_.value()); - - Buffer::OwnedImpl fake_data; - EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { - InSequence s; - RespValuePtr response1(new RespValue()); - EXPECT_CALL(callbacks1, onResponse_(Ref(response1))); - EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); - EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); - callbacks_->onRespValue(std::move(response1)); - - RespValuePtr response2(new RespValue()); - EXPECT_CALL(callbacks2, onResponse_(Ref(response2))); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); - callbacks_->onRespValue(std::move(response2)); - })); - upstream_read_filter_->onData(fake_data, false); - - EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - client_->close(); -} - -TEST_F(RedisClientImplTest, Cancel) { - InSequence s; - - setup(); - - RespValue request1; - MockPoolCallbacks callbacks1; - EXPECT_CALL(*encoder_, encode(Ref(request1), _)); - PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); - EXPECT_NE(nullptr, handle1); - - onConnected(); - - RespValue request2; - MockPoolCallbacks callbacks2; - EXPECT_CALL(*encoder_, encode(Ref(request2), _)); - PoolRequest* handle2 = client_->makeRequest(request2, callbacks2); - EXPECT_NE(nullptr, handle2); - - handle1->cancel(); - - Buffer::OwnedImpl fake_data; - EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { - InSequence s; - - RespValuePtr response1(new RespValue()); - EXPECT_CALL(callbacks1, onResponse_(_)).Times(0); - EXPECT_CALL(*connect_or_op_timer_, enableTimer(_)); - EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); - callbacks_->onRespValue(std::move(response1)); - - RespValuePtr response2(new RespValue()); - EXPECT_CALL(callbacks2, onResponse_(Ref(response2))); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SUCCESS)); - callbacks_->onRespValue(std::move(response2)); - })); - upstream_read_filter_->onData(fake_data, false); - - EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - client_->close(); - - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_cancelled_.value()); -} - -TEST_F(RedisClientImplTest, FailAll) { - InSequence s; - - setup(); - - NiceMock connection_callbacks; - client_->addConnectionCallbacks(connection_callbacks); - - RespValue request1; - MockPoolCallbacks callbacks1; - EXPECT_CALL(*encoder_, encode(Ref(request1), _)); - PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); - EXPECT_NE(nullptr, handle1); - - onConnected(); - - EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SERVER_FAILURE)); - EXPECT_CALL(callbacks1, onFailure()); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose)); - upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_with_active_rq_.value()); - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_remote_with_active_rq_.value()); -} - -TEST_F(RedisClientImplTest, FailAllWithCancel) { - InSequence s; - - setup(); - - NiceMock connection_callbacks; - client_->addConnectionCallbacks(connection_callbacks); - - RespValue request1; - MockPoolCallbacks callbacks1; - EXPECT_CALL(*encoder_, encode(Ref(request1), _)); - PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); - EXPECT_NE(nullptr, handle1); - - onConnected(); - handle1->cancel(); - - EXPECT_CALL(callbacks1, onFailure()).Times(0); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); - upstream_connection_->raiseEvent(Network::ConnectionEvent::LocalClose); - - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_with_active_rq_.value()); - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_local_with_active_rq_.value()); - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_cancelled_.value()); -} - -TEST_F(RedisClientImplTest, ProtocolError) { - InSequence s; - - setup(); - - RespValue request1; - MockPoolCallbacks callbacks1; - EXPECT_CALL(*encoder_, encode(Ref(request1), _)); - PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); - EXPECT_NE(nullptr, handle1); - - onConnected(); - - Buffer::OwnedImpl fake_data; - EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { - throw ProtocolError("error"); - })); - EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::REQUEST_FAILED)); - EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(callbacks1, onFailure()); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - upstream_read_filter_->onData(fake_data, false); - - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_protocol_error_.value()); - EXPECT_EQ(1UL, host_->stats_.rq_error_.value()); -} - -TEST_F(RedisClientImplTest, ConnectFail) { - InSequence s; - - setup(); - - RespValue request1; - MockPoolCallbacks callbacks1; - EXPECT_CALL(*encoder_, encode(Ref(request1), _)); - PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); - EXPECT_NE(nullptr, handle1); - - EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::SERVER_FAILURE)); - EXPECT_CALL(callbacks1, onFailure()); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_fail_.value()); - EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value()); -} - -class ConfigOutlierDisabled : public Config { - bool disableOutlierEvents() const override { return true; } - std::chrono::milliseconds opTimeout() const override { return std::chrono::milliseconds(25); } - bool enableHashtagging() const override { return false; } -}; - -TEST_F(RedisClientImplTest, OutlierDisabled) { - InSequence s; - - setup(std::make_unique()); - - RespValue request1; - MockPoolCallbacks callbacks1; - EXPECT_CALL(*encoder_, encode(Ref(request1), _)); - PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); - EXPECT_NE(nullptr, handle1); - - EXPECT_CALL(host_->outlier_detector_, putResult(_)).Times(0); - EXPECT_CALL(callbacks1, onFailure()); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); - - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_fail_.value()); - EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value()); -} - -TEST_F(RedisClientImplTest, ConnectTimeout) { - InSequence s; - - setup(); - - RespValue request1; - MockPoolCallbacks callbacks1; - EXPECT_CALL(*encoder_, encode(Ref(request1), _)); - PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); - EXPECT_NE(nullptr, handle1); - - EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::TIMEOUT)); - EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(callbacks1, onFailure()); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - connect_or_op_timer_->callback_(); - - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_timeout_.value()); - EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value()); -} - -TEST_F(RedisClientImplTest, OpTimeout) { - InSequence s; - - setup(); - - RespValue request1; - MockPoolCallbacks callbacks1; - EXPECT_CALL(*encoder_, encode(Ref(request1), _)); - PoolRequest* handle1 = client_->makeRequest(request1, callbacks1); - EXPECT_NE(nullptr, handle1); - - onConnected(); - - EXPECT_CALL(host_->outlier_detector_, putResult(Upstream::Outlier::Result::TIMEOUT)); - EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(callbacks1, onFailure()); - EXPECT_CALL(*connect_or_op_timer_, disableTimer()); - connect_or_op_timer_->callback_(); - - EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_timeout_.value()); - EXPECT_EQ(1UL, host_->stats_.rq_timeout_.value()); -} - -TEST(RedisClientFactoryImplTest, Basic) { - ClientFactoryImpl factory; - Upstream::MockHost::MockCreateConnectionData conn_info; - conn_info.connection_ = new NiceMock(); - std::shared_ptr host(new NiceMock()); - EXPECT_CALL(*host, createConnection_(_, _)).WillOnce(Return(conn_info)); - NiceMock dispatcher; - ConfigImpl config(createConnPoolSettings()); - ClientPtr client = factory.create(host, dispatcher, config); - client->close(); -} - -class RedisConnPoolImplTest : public testing::Test, public ClientFactory { +class RedisConnPoolImplTest : public testing::Test, public Common::Redis::Client::ClientFactory { public: void setup(bool cluster_exists = true) { EXPECT_CALL(cm_, addThreadLocalClusterUpdateCallbacks_(_)) @@ -391,38 +43,40 @@ class RedisConnPoolImplTest : public testing::Test, public ClientFactory { if (!cluster_exists) { EXPECT_CALL(cm_, get("fake_cluster")).WillOnce(Return(nullptr)); } - conn_pool_ = - std::make_unique(cluster_name_, cm_, *this, tls_, createConnPoolSettings()); + conn_pool_ = std::make_unique(cluster_name_, cm_, *this, tls_, + Common::Redis::Client::createConnPoolSettings()); } void makeSimpleRequest(bool create_client) { EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)); if (create_client) { - client_ = new NiceMock(); + client_ = new NiceMock(); EXPECT_CALL(*this, create_(_)).WillOnce(Return(client_)); } - RespValue value; - MockPoolCallbacks callbacks; - MockPoolRequest active_request; + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + Common::Redis::Client::MockPoolRequest active_request; EXPECT_CALL(*client_, makeRequest(Ref(value), Ref(callbacks))) .WillOnce(Return(&active_request)); - PoolRequest* request = conn_pool_->makeRequest("hash_key", value, callbacks); + Common::Redis::Client::PoolRequest* request = + conn_pool_->makeRequest("hash_key", value, callbacks); EXPECT_EQ(&active_request, request); } - // RedisProxy::ConnPool::ClientFactory - ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher&, const Config&) override { - return ClientPtr{create_(host)}; + // Common::Redis::Client::ClientFactory + Common::Redis::Client::ClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher&, + const Common::Redis::Client::Config&) override { + return Common::Redis::Client::ClientPtr{create_(host)}; } - MOCK_METHOD1(create_, Client*(Upstream::HostConstSharedPtr host)); + MOCK_METHOD1(create_, Common::Redis::Client::Client*(Upstream::HostConstSharedPtr host)); const std::string cluster_name_{"fake_cluster"}; NiceMock cm_; NiceMock tls_; InstancePtr conn_pool_; Upstream::ClusterUpdateCallbacks* update_callbacks_{}; - MockClient* client_{}; + Common::Redis::Client::MockClient* client_{}; }; TEST_F(RedisConnPoolImplTest, Basic) { @@ -430,10 +84,10 @@ TEST_F(RedisConnPoolImplTest, Basic) { setup(); - RespValue value; - MockPoolRequest active_request; - MockPoolCallbacks callbacks; - MockClient* client = new NiceMock(); + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolRequest active_request; + Common::Redis::Client::MockPoolCallbacks callbacks; + Common::Redis::Client::MockClient* client = new NiceMock(); EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)) .WillOnce(Invoke([&](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { @@ -444,7 +98,8 @@ TEST_F(RedisConnPoolImplTest, Basic) { })); EXPECT_CALL(*this, create_(_)).WillOnce(Return(client)); EXPECT_CALL(*client, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request)); - PoolRequest* request = conn_pool_->makeRequest("hash_key", value, callbacks); + Common::Redis::Client::PoolRequest* request = + conn_pool_->makeRequest("hash_key", value, callbacks); EXPECT_EQ(&active_request, request); EXPECT_CALL(*client, close()); @@ -456,8 +111,8 @@ TEST_F(RedisConnPoolImplTest, Hashtagging) { setup(); - RespValue value; - MockPoolCallbacks callbacks; + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; auto expectHashKey = [](const std::string& s) { return [s](Upstream::LoadBalancerContext* context) -> Upstream::HostConstSharedPtr { @@ -489,9 +144,10 @@ TEST_F(RedisConnPoolImplTest, NoClusterAtConstruction) { setup(false); - RespValue value; - MockPoolCallbacks callbacks; - PoolRequest* request = conn_pool_->makeRequest("hash_key", value, callbacks); + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + Common::Redis::Client::PoolRequest* request = + conn_pool_->makeRequest("hash_key", value, callbacks); EXPECT_EQ(nullptr, request); // Now add the cluster. Request to the cluster should succeed. @@ -533,27 +189,28 @@ TEST_F(RedisConnPoolImplTest, HostRemove) { setup(); - MockPoolCallbacks callbacks; - RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; + Common::Redis::RespValue value; std::shared_ptr host1(new Upstream::MockHost()); std::shared_ptr host2(new Upstream::MockHost()); - MockClient* client1 = new NiceMock(); - MockClient* client2 = new NiceMock(); + Common::Redis::Client::MockClient* client1 = new NiceMock(); + Common::Redis::Client::MockClient* client2 = new NiceMock(); EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(host1)); EXPECT_CALL(*this, create_(Eq(host1))).WillOnce(Return(client1)); - MockPoolRequest active_request1; + Common::Redis::Client::MockPoolRequest active_request1; EXPECT_CALL(*client1, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request1)); - PoolRequest* request1 = conn_pool_->makeRequest("hash_key", value, callbacks); + Common::Redis::Client::PoolRequest* request1 = + conn_pool_->makeRequest("hash_key", value, callbacks); EXPECT_EQ(&active_request1, request1); EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(host2)); EXPECT_CALL(*this, create_(Eq(host2))).WillOnce(Return(client2)); - MockPoolRequest active_request2; + Common::Redis::Client::MockPoolRequest active_request2; EXPECT_CALL(*client2, makeRequest(Ref(value), Ref(callbacks))).WillOnce(Return(&active_request2)); - PoolRequest* request2 = conn_pool_->makeRequest("bar", value, callbacks); + Common::Redis::Client::PoolRequest* request2 = conn_pool_->makeRequest("bar", value, callbacks); EXPECT_EQ(&active_request2, request2); EXPECT_CALL(*client2, close()); @@ -576,10 +233,11 @@ TEST_F(RedisConnPoolImplTest, NoHost) { setup(); - RespValue value; - MockPoolCallbacks callbacks; + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolCallbacks callbacks; EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)).WillOnce(Return(nullptr)); - PoolRequest* request = conn_pool_->makeRequest("hash_key", value, callbacks); + Common::Redis::Client::PoolRequest* request = + conn_pool_->makeRequest("hash_key", value, callbacks); EXPECT_EQ(nullptr, request); tls_.shutdownThread(); @@ -590,10 +248,10 @@ TEST_F(RedisConnPoolImplTest, RemoteClose) { setup(); - RespValue value; - MockPoolRequest active_request; - MockPoolCallbacks callbacks; - MockClient* client = new NiceMock(); + Common::Redis::RespValue value; + Common::Redis::Client::MockPoolRequest active_request; + Common::Redis::Client::MockPoolCallbacks callbacks; + Common::Redis::Client::MockClient* client = new NiceMock(); EXPECT_CALL(cm_.thread_local_cluster_.lb_, chooseHost(_)); EXPECT_CALL(*this, create_(_)).WillOnce(Return(client)); diff --git a/test/extensions/filters/network/redis_proxy/mocks.cc b/test/extensions/filters/network/redis_proxy/mocks.cc index c3283870c80d1..7e0ce1eff0bde 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.cc +++ b/test/extensions/filters/network/redis_proxy/mocks.cc @@ -15,76 +15,8 @@ namespace Extensions { namespace NetworkFilters { namespace RedisProxy { -void PrintTo(const RespValue& value, std::ostream* os) { *os << value.toString(); } - -void PrintTo(const RespValuePtr& value, std::ostream* os) { *os << value->toString(); } - -bool operator==(const RespValue& lhs, const RespValue& rhs) { - if (lhs.type() != rhs.type()) { - return false; - } - - switch (lhs.type()) { - case RespType::Array: { - if (lhs.asArray().size() != rhs.asArray().size()) { - return false; - } - - bool equal = true; - for (uint64_t i = 0; i < lhs.asArray().size(); i++) { - equal &= (lhs.asArray()[i] == rhs.asArray()[i]); - } - - return equal; - } - case RespType::SimpleString: - case RespType::BulkString: - case RespType::Error: { - return lhs.asString() == rhs.asString(); - } - case RespType::Null: { - return true; - } - case RespType::Integer: { - return lhs.asInteger() == rhs.asInteger(); - } - } - - NOT_REACHED_GCOVR_EXCL_LINE; -} - -MockEncoder::MockEncoder() { - ON_CALL(*this, encode(_, _)) - .WillByDefault(Invoke([this](const RespValue& value, Buffer::Instance& out) -> void { - real_encoder_.encode(value, out); - })); -} - -MockEncoder::~MockEncoder() {} - -MockDecoder::MockDecoder() {} -MockDecoder::~MockDecoder() {} - namespace ConnPool { -MockClient::MockClient() { - ON_CALL(*this, addConnectionCallbacks(_)) - .WillByDefault(Invoke([this](Network::ConnectionCallbacks& callbacks) -> void { - callbacks_.push_back(&callbacks); - })); - ON_CALL(*this, close()).WillByDefault(Invoke([this]() -> void { - raiseEvent(Network::ConnectionEvent::LocalClose); - })); -} - -MockClient::~MockClient() {} - -MockPoolRequest::MockPoolRequest() {} -MockPoolRequest::~MockPoolRequest() {} - -MockPoolCallbacks::MockPoolCallbacks() {} -MockPoolCallbacks::~MockPoolCallbacks() {} - MockInstance::MockInstance() {} MockInstance::~MockInstance() {} diff --git a/test/extensions/filters/network/redis_proxy/mocks.h b/test/extensions/filters/network/redis_proxy/mocks.h index b0836ec595410..19c724ac74478 100644 --- a/test/extensions/filters/network/redis_proxy/mocks.h +++ b/test/extensions/filters/network/redis_proxy/mocks.h @@ -4,7 +4,8 @@ #include #include -#include "extensions/filters/network/redis_proxy/codec_impl.h" +#include "extensions/filters/network/common/redis/client.h" +#include "extensions/filters/network/common/redis/codec_impl.h" #include "extensions/filters/network/redis_proxy/command_splitter.h" #include "extensions/filters/network/redis_proxy/conn_pool.h" @@ -17,91 +18,17 @@ namespace Extensions { namespace NetworkFilters { namespace RedisProxy { -/** - * Pretty print const RespValue& value - */ - -void PrintTo(const RespValue& value, std::ostream* os); -void PrintTo(const RespValuePtr& value, std::ostream* os); -bool operator==(const RespValue& lhs, const RespValue& rhs); - -class MockEncoder : public Encoder { -public: - MockEncoder(); - ~MockEncoder(); - - MOCK_METHOD2(encode, void(const RespValue& value, Buffer::Instance& out)); - -private: - EncoderImpl real_encoder_; -}; - -class MockDecoder : public Decoder { -public: - MockDecoder(); - ~MockDecoder(); - - MOCK_METHOD1(decode, void(Buffer::Instance& data)); -}; - namespace ConnPool { -class MockClient : public Client { -public: - MockClient(); - ~MockClient(); - - void raiseEvent(Network::ConnectionEvent event) { - for (Network::ConnectionCallbacks* callbacks : callbacks_) { - callbacks->onEvent(event); - } - } - - void runHighWatermarkCallbacks() { - for (auto* callback : callbacks_) { - callback->onAboveWriteBufferHighWatermark(); - } - } - - void runLowWatermarkCallbacks() { - for (auto* callback : callbacks_) { - callback->onBelowWriteBufferLowWatermark(); - } - } - - MOCK_METHOD1(addConnectionCallbacks, void(Network::ConnectionCallbacks& callbacks)); - MOCK_METHOD0(close, void()); - MOCK_METHOD2(makeRequest, PoolRequest*(const RespValue& request, PoolCallbacks& callbacks)); - - std::list callbacks_; -}; - -class MockPoolRequest : public PoolRequest { -public: - MockPoolRequest(); - ~MockPoolRequest(); - - MOCK_METHOD0(cancel, void()); -}; - -class MockPoolCallbacks : public PoolCallbacks { -public: - MockPoolCallbacks(); - ~MockPoolCallbacks(); - - void onResponse(RespValuePtr&& value) override { onResponse_(value); } - - MOCK_METHOD1(onResponse_, void(RespValuePtr& value)); - MOCK_METHOD0(onFailure, void()); -}; - class MockInstance : public Instance { public: MockInstance(); ~MockInstance(); - MOCK_METHOD3(makeRequest, PoolRequest*(const std::string& hash_key, const RespValue& request, - PoolCallbacks& callbacks)); + MOCK_METHOD3(makeRequest, + Common::Redis::Client::PoolRequest*( + const std::string& hash_key, const Common::Redis::RespValue& request, + Common::Redis::Client::PoolCallbacks& callbacks)); }; } // namespace ConnPool @@ -121,9 +48,9 @@ class MockSplitCallbacks : public SplitCallbacks { MockSplitCallbacks(); ~MockSplitCallbacks(); - void onResponse(RespValuePtr&& value) override { onResponse_(value); } + void onResponse(Common::Redis::RespValuePtr&& value) override { onResponse_(value); } - MOCK_METHOD1(onResponse_, void(RespValuePtr& value)); + MOCK_METHOD1(onResponse_, void(Common::Redis::RespValuePtr& value)); }; class MockInstance : public Instance { @@ -131,11 +58,13 @@ class MockInstance : public Instance { MockInstance(); ~MockInstance(); - SplitRequestPtr makeRequest(const RespValue& request, SplitCallbacks& callbacks) override { + SplitRequestPtr makeRequest(const Common::Redis::RespValue& request, + SplitCallbacks& callbacks) override { return SplitRequestPtr{makeRequest_(request, callbacks)}; } - MOCK_METHOD2(makeRequest_, SplitRequest*(const RespValue& request, SplitCallbacks& callbacks)); + MOCK_METHOD2(makeRequest_, + SplitRequest*(const Common::Redis::RespValue& request, SplitCallbacks& callbacks)); }; } // namespace CommandSplitter diff --git a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc index 9861b10e88f0f..333a9687dc501 100644 --- a/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/network/redis_proxy/proxy_filter_test.cc @@ -5,6 +5,7 @@ #include "extensions/filters/network/redis_proxy/proxy_filter.h" +#include "test/extensions/filters/network/common/redis/mocks.h" #include "test/extensions/filters/network/redis_proxy/mocks.h" #include "test/mocks/common.h" #include "test/mocks/network/mocks.h" @@ -73,7 +74,7 @@ TEST_F(RedisProxyFilterConfigTest, BadRedisProxyConfig) { EXPECT_THROW(parseProtoFromJson(json_string), Json::Exception); } -class RedisProxyFilterTest : public testing::Test, public DecoderFactory { +class RedisProxyFilterTest : public testing::Test, public Common::Redis::DecoderFactory { public: RedisProxyFilterTest() { std::string json_string = R"EOF( @@ -87,7 +88,8 @@ class RedisProxyFilterTest : public testing::Test, public DecoderFactory { envoy::config::filter::network::redis_proxy::v2::RedisProxy proto_config = parseProtoFromJson(json_string); config_.reset(new ProxyFilterConfig(proto_config, store_, drain_decision_, runtime_)); - filter_ = std::make_unique(*this, EncoderPtr{encoder_}, splitter_, config_); + filter_ = std::make_unique(*this, Common::Redis::EncoderPtr{encoder_}, splitter_, + config_); filter_->initializeReadFilterCallbacks(filter_callbacks_); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); EXPECT_EQ(1UL, config_->stats_.downstream_cx_total_.value()); @@ -105,15 +107,15 @@ class RedisProxyFilterTest : public testing::Test, public DecoderFactory { } } - // RedisProxy::DecoderFactory - DecoderPtr create(DecoderCallbacks& callbacks) override { + // Common::Redis::DecoderFactory + Common::Redis::DecoderPtr create(Common::Redis::DecoderCallbacks& callbacks) override { decoder_callbacks_ = &callbacks; - return DecoderPtr{decoder_}; + return Common::Redis::DecoderPtr{decoder_}; } - MockEncoder* encoder_{new MockEncoder()}; - MockDecoder* decoder_{new MockDecoder()}; - DecoderCallbacks* decoder_callbacks_{}; + Common::Redis::MockEncoder* encoder_{new Common::Redis::MockEncoder()}; + Common::Redis::MockDecoder* decoder_{new Common::Redis::MockDecoder()}; + Common::Redis::DecoderCallbacks* decoder_callbacks_{}; CommandSplitter::MockInstance splitter_; Stats::IsolatedStoreImpl store_; NiceMock drain_decision_; @@ -132,12 +134,12 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponseWithDrainClose) { CommandSplitter::MockSplitRequest* request_handle2 = new CommandSplitter::MockSplitRequest(); CommandSplitter::SplitCallbacks* request_callbacks2; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { - RespValuePtr request1(new RespValue()); + Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1))); decoder_callbacks_->onRespValue(std::move(request1)); - RespValuePtr request2(new RespValue()); + Common::Redis::RespValuePtr request2(new Common::Redis::RespValue()); EXPECT_CALL(splitter_, makeRequest_(Ref(*request2), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks2)), Return(request_handle2))); decoder_callbacks_->onRespValue(std::move(request2)); @@ -147,11 +149,11 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponseWithDrainClose) { EXPECT_EQ(2UL, config_->stats_.downstream_rq_total_.value()); EXPECT_EQ(2UL, config_->stats_.downstream_rq_active_.value()); - RespValuePtr response2(new RespValue()); - RespValue* response2_ptr = response2.get(); + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); + Common::Redis::RespValue* response2_ptr = response2.get(); request_callbacks2->onResponse(std::move(response2)); - RespValuePtr response1(new RespValue()); + Common::Redis::RespValuePtr response1(new Common::Redis::RespValue()); EXPECT_CALL(*encoder_, encode(Ref(*response1), _)); EXPECT_CALL(*encoder_, encode(Ref(*response2_ptr), _)); EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); @@ -173,12 +175,12 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponseDownstreamDisconnectBeforeFlush) CommandSplitter::MockSplitRequest* request_handle2 = new CommandSplitter::MockSplitRequest(); CommandSplitter::SplitCallbacks* request_callbacks2; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { - RespValuePtr request1(new RespValue()); + Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1))); decoder_callbacks_->onRespValue(std::move(request1)); - RespValuePtr request2(new RespValue()); + Common::Redis::RespValuePtr request2(new Common::Redis::RespValue()); EXPECT_CALL(splitter_, makeRequest_(Ref(*request2), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks2)), Return(request_handle2))); decoder_callbacks_->onRespValue(std::move(request2)); @@ -188,7 +190,7 @@ TEST_F(RedisProxyFilterTest, OutOfOrderResponseDownstreamDisconnectBeforeFlush) EXPECT_EQ(2UL, config_->stats_.downstream_rq_total_.value()); EXPECT_EQ(2UL, config_->stats_.downstream_rq_active_.value()); - RespValuePtr response2(new RespValue()); + Common::Redis::RespValuePtr response2(new Common::Redis::RespValue()); request_callbacks2->onResponse(std::move(response2)); EXPECT_CALL(*request_handle1, cancel()); @@ -202,7 +204,7 @@ TEST_F(RedisProxyFilterTest, DownstreamDisconnectWithActive) { CommandSplitter::MockSplitRequest* request_handle1 = new CommandSplitter::MockSplitRequest(); CommandSplitter::SplitCallbacks* request_callbacks1; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { - RespValuePtr request1(new RespValue()); + Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) .WillOnce(DoAll(WithArg<1>(SaveArgAddress(&request_callbacks1)), Return(request_handle1))); decoder_callbacks_->onRespValue(std::move(request1)); @@ -217,16 +219,16 @@ TEST_F(RedisProxyFilterTest, ImmediateResponse) { InSequence s; Buffer::OwnedImpl fake_data; - RespValuePtr request1(new RespValue()); + Common::Redis::RespValuePtr request1(new Common::Redis::RespValue()); EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { decoder_callbacks_->onRespValue(std::move(request1)); })); EXPECT_CALL(splitter_, makeRequest_(Ref(*request1), _)) .WillOnce( - Invoke([&](const RespValue&, + Invoke([&](const Common::Redis::RespValue&, CommandSplitter::SplitCallbacks& callbacks) -> CommandSplitter::SplitRequest* { - RespValuePtr error(new RespValue()); - error->type(RespType::Error); + Common::Redis::RespValuePtr error(new Common::Redis::RespValue()); + error->type(Common::Redis::RespType::Error); error->asString() = "no healthy upstream"; EXPECT_CALL(*encoder_, encode(Eq(ByRef(*error)), _)); EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); @@ -243,11 +245,11 @@ TEST_F(RedisProxyFilterTest, ProtocolError) { Buffer::OwnedImpl fake_data; EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { - throw ProtocolError("error"); + throw Common::Redis::ProtocolError("error"); })); - RespValue error; - error.type(RespType::Error); + Common::Redis::RespValue error; + error.type(Common::Redis::RespType::Error); error.asString() = "downstream protocol error"; EXPECT_CALL(*encoder_, encode(Eq(ByRef(error)), _)); EXPECT_CALL(filter_callbacks_.connection_, write(_, _)); diff --git a/test/extensions/health_checkers/redis/BUILD b/test/extensions/health_checkers/redis/BUILD index c2ccd68b2cb68..015f73f5e4f4f 100644 --- a/test/extensions/health_checkers/redis/BUILD +++ b/test/extensions/health_checkers/redis/BUILD @@ -19,6 +19,8 @@ envoy_extension_cc_test( "//source/extensions/health_checkers/redis", "//source/extensions/health_checkers/redis:utility", "//test/common/upstream:utility_lib", + "//test/extensions/filters/network/common/redis:redis_mocks", + "//test/extensions/filters/network/common/redis:test_utils_lib", "//test/extensions/filters/network/redis_proxy:redis_mocks", "//test/mocks/network:network_mocks", "//test/mocks/runtime:runtime_mocks", diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index be5df3a8bb642..68b9c99ff9e6c 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -4,6 +4,7 @@ #include "extensions/health_checkers/redis/utility.h" #include "test/common/upstream/utility.h" +#include "test/extensions/filters/network/common/redis/mocks.h" #include "test/extensions/filters/network/redis_proxy/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/runtime/mocks.h" @@ -27,7 +28,7 @@ namespace { class RedisHealthCheckerTest : public testing::Test, - public Extensions::NetworkFilters::RedisProxy::ConnPool::ClientFactory { + public Extensions::NetworkFilters::Common::Redis::Client::ClientFactory { public: RedisHealthCheckerTest() : cluster_(new NiceMock()), @@ -120,13 +121,13 @@ class RedisHealthCheckerTest random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *this)); } - Extensions::NetworkFilters::RedisProxy::ConnPool::ClientPtr + Extensions::NetworkFilters::Common::Redis::Client::ClientPtr create(Upstream::HostConstSharedPtr, Event::Dispatcher&, - const Extensions::NetworkFilters::RedisProxy::ConnPool::Config&) override { - return Extensions::NetworkFilters::RedisProxy::ConnPool::ClientPtr{create_()}; + const Extensions::NetworkFilters::Common::Redis::Client::Config&) override { + return Extensions::NetworkFilters::Common::Redis::Client::ClientPtr{create_()}; } - MOCK_METHOD0(create_, Extensions::NetworkFilters::RedisProxy::ConnPool::Client*()); + MOCK_METHOD0(create_, Extensions::NetworkFilters::Common::Redis::Client::Client*()); void expectSessionCreate() { interval_timer_ = new Event::MockTimer(&dispatcher_); @@ -134,7 +135,7 @@ class RedisHealthCheckerTest } void expectClientCreate() { - client_ = new Extensions::NetworkFilters::RedisProxy::ConnPool::MockClient(); + client_ = new Extensions::NetworkFilters::Common::Redis::Client::MockClient(); EXPECT_CALL(*this, create_()).WillOnce(Return(client_)); EXPECT_CALL(*client_, addConnectionCallbacks(_)); } @@ -158,9 +159,9 @@ class RedisHealthCheckerTest Upstream::MockHealthCheckEventLogger* event_logger_{}; Event::MockTimer* timeout_timer_{}; Event::MockTimer* interval_timer_{}; - Extensions::NetworkFilters::RedisProxy::ConnPool::MockClient* client_{}; - Extensions::NetworkFilters::RedisProxy::ConnPool::MockPoolRequest pool_request_; - Extensions::NetworkFilters::RedisProxy::ConnPool::PoolCallbacks* pool_callbacks_{}; + Extensions::NetworkFilters::Common::Redis::Client::MockClient* client_{}; + Extensions::NetworkFilters::Common::Redis::Client::MockPoolRequest pool_request_; + Extensions::NetworkFilters::Common::Redis::Client::PoolCallbacks* pool_callbacks_{}; std::shared_ptr health_checker_; }; @@ -182,9 +183,9 @@ TEST_F(RedisHealthCheckerTest, PingAndVariousFailures) { // Success EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); - Extensions::NetworkFilters::RedisProxy::RespValuePtr response( - new Extensions::NetworkFilters::RedisProxy::RespValue()); - response->type(Extensions::NetworkFilters::RedisProxy::RespType::SimpleString); + NetworkFilters::Common::Redis::RespValuePtr response( + new NetworkFilters::Common::Redis::RespValue()); + response->type(NetworkFilters::Common::Redis::RespType::SimpleString); response->asString() = "PONG"; pool_callbacks_->onResponse(std::move(response)); @@ -195,7 +196,7 @@ TEST_F(RedisHealthCheckerTest, PingAndVariousFailures) { EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); - response = std::make_unique(); + response = std::make_unique(); pool_callbacks_->onResponse(std::move(response)); expectPingRequestCreate(); @@ -250,9 +251,9 @@ TEST_F(RedisHealthCheckerTest, FailuresLogging) { // Success EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); - Extensions::NetworkFilters::RedisProxy::RespValuePtr response( - new Extensions::NetworkFilters::RedisProxy::RespValue()); - response->type(Extensions::NetworkFilters::RedisProxy::RespType::SimpleString); + NetworkFilters::Common::Redis::RespValuePtr response( + new NetworkFilters::Common::Redis::RespValue()); + response->type(NetworkFilters::Common::Redis::RespType::SimpleString); response->asString() = "PONG"; pool_callbacks_->onResponse(std::move(response)); @@ -264,7 +265,7 @@ TEST_F(RedisHealthCheckerTest, FailuresLogging) { EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, false)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); - response = std::make_unique(); + response = std::make_unique(); pool_callbacks_->onResponse(std::move(response)); expectPingRequestCreate(); @@ -274,7 +275,7 @@ TEST_F(RedisHealthCheckerTest, FailuresLogging) { EXPECT_CALL(*event_logger_, logUnhealthy(_, _, _, false)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); - response = std::make_unique(); + response = std::make_unique(); pool_callbacks_->onResponse(std::move(response)); expectPingRequestCreate(); @@ -321,9 +322,9 @@ TEST_F(RedisHealthCheckerTest, LogInitialFailure) { EXPECT_CALL(*event_logger_, logAddHealthy(_, _, false)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); - Extensions::NetworkFilters::RedisProxy::RespValuePtr response( - new Extensions::NetworkFilters::RedisProxy::RespValue()); - response->type(Extensions::NetworkFilters::RedisProxy::RespType::SimpleString); + NetworkFilters::Common::Redis::RespValuePtr response( + new NetworkFilters::Common::Redis::RespValue()); + response->type(NetworkFilters::Common::Redis::RespType::SimpleString); response->asString() = "PONG"; pool_callbacks_->onResponse(std::move(response)); @@ -358,9 +359,9 @@ TEST_F(RedisHealthCheckerTest, Exists) { // Success EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); - Extensions::NetworkFilters::RedisProxy::RespValuePtr response( - new Extensions::NetworkFilters::RedisProxy::RespValue()); - response->type(Extensions::NetworkFilters::RedisProxy::RespType::Integer); + NetworkFilters::Common::Redis::RespValuePtr response( + new NetworkFilters::Common::Redis::RespValue()); + response->type(NetworkFilters::Common::Redis::RespType::Integer); response->asInteger() = 0; pool_callbacks_->onResponse(std::move(response)); @@ -371,8 +372,8 @@ TEST_F(RedisHealthCheckerTest, Exists) { EXPECT_CALL(*event_logger_, logEjectUnhealthy(_, _, _)); EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); - response = std::make_unique(); - response->type(Extensions::NetworkFilters::RedisProxy::RespType::Integer); + response = std::make_unique(); + response->type(NetworkFilters::Common::Redis::RespType::Integer); response->asInteger() = 1; pool_callbacks_->onResponse(std::move(response)); @@ -382,7 +383,7 @@ TEST_F(RedisHealthCheckerTest, Exists) { // Failure, no value EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); - response = std::make_unique(); + response = std::make_unique(); pool_callbacks_->onResponse(std::move(response)); EXPECT_CALL(*client_, close()); @@ -409,9 +410,9 @@ TEST_F(RedisHealthCheckerTest, NoConnectionReuse) { EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); EXPECT_CALL(*client_, close()); - Extensions::NetworkFilters::RedisProxy::RespValuePtr response( - new Extensions::NetworkFilters::RedisProxy::RespValue()); - response->type(Extensions::NetworkFilters::RedisProxy::RespType::SimpleString); + NetworkFilters::Common::Redis::RespValuePtr response( + new NetworkFilters::Common::Redis::RespValue()); + response->type(NetworkFilters::Common::Redis::RespType::SimpleString); response->asString() = "PONG"; pool_callbacks_->onResponse(std::move(response)); @@ -424,7 +425,7 @@ TEST_F(RedisHealthCheckerTest, NoConnectionReuse) { EXPECT_CALL(*timeout_timer_, disableTimer()); EXPECT_CALL(*interval_timer_, enableTimer(_)); EXPECT_CALL(*client_, close()); - response = std::make_unique(); + response = std::make_unique(); pool_callbacks_->onResponse(std::move(response)); expectClientCreate(); diff --git a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc index b4e6a59b2168e..e43b4c0df2cc8 100644 --- a/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc +++ b/test/extensions/quic_listeners/quiche/platform/quic_platform_test.cc @@ -1,4 +1,5 @@ #include "test/extensions/transport_sockets/tls/ssl_test_utility.h" +#include "test/test_common/environment.h" #include "test/test_common/logging.h" #include "gmock/gmock.h" @@ -11,6 +12,7 @@ #include "quiche/quic/platform/api/quic_endian.h" #include "quiche/quic/platform/api/quic_estimate_memory_usage.h" #include "quiche/quic/platform/api/quic_exported_stats.h" +#include "quiche/quic/platform/api/quic_hostname_utils.h" #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/quic/platform/api/quic_map_util.h" #include "quiche/quic/platform/api/quic_mock_log.h" @@ -21,6 +23,8 @@ #include "quiche/quic/platform/api/quic_stack_trace.h" #include "quiche/quic/platform/api/quic_string.h" #include "quiche/quic/platform/api/quic_string_piece.h" +#include "quiche/quic/platform/api/quic_test_output.h" +#include "quiche/quic/platform/api/quic_thread.h" #include "quiche/quic/platform/api/quic_uint128.h" using testing::HasSubstr; @@ -67,6 +71,15 @@ TEST(QuicPlatformTest, QuicExportedStats) { QUIC_HISTOGRAM_COUNTS("my.count.histogram", 123, 0, 1000, 100, "doc"); } +TEST(QuicPlatformTest, QuicHostnameUtils) { + EXPECT_FALSE(quic::QuicHostnameUtils::IsValidSNI("!!")); + EXPECT_FALSE(quic::QuicHostnameUtils::IsValidSNI("envoyproxy")); + EXPECT_TRUE(quic::QuicHostnameUtils::IsValidSNI("www.envoyproxy.io")); + EXPECT_EQ("lyft.com", quic::QuicHostnameUtils::NormalizeHostname("lyft.com")); + EXPECT_EQ("google.com", quic::QuicHostnameUtils::NormalizeHostname("google.com...")); + EXPECT_EQ("quicwg.org", quic::QuicHostnameUtils::NormalizeHostname("QUICWG.ORG")); +} + TEST(QuicPlatformTest, QuicUnorderedMap) { quic::QuicUnorderedMap umap; umap.insert({"foo", 2}); @@ -182,6 +195,41 @@ TEST(QuicPlatformTest, QuicStringPiece) { EXPECT_EQ('b', sp[0]); } +TEST(QuicPlatformTest, QuicThread) { + class AdderThread : public quic::QuicThread { + public: + AdderThread(int* value, int increment) + : quic::QuicThread("adder_thread"), value_(value), increment_(increment) {} + + ~AdderThread() override = default; + + protected: + void Run() override { *value_ += increment_; } + + private: + int* value_; + int increment_; + }; + + int value = 0; + + // A QuicThread that is never started, which is ok. + { AdderThread t0(&value, 1); } + EXPECT_EQ(0, value); + + // A QuicThread that is started and joined as usual. + { + AdderThread t1(&value, 1); + t1.Start(); + t1.Join(); + } + EXPECT_EQ(1, value); + + // QuicThread will panic if it's started but not joined. + EXPECT_DEATH({ AdderThread(&value, 2).Start(); }, + "QuicThread should be joined before destruction"); +} + TEST(QuicPlatformTest, QuicUint128) { quic::QuicUint128 i = MakeQuicUint128(16777216, 315); EXPECT_EQ(315, QuicUint128Low64(i)); @@ -380,6 +428,22 @@ TEST(QuicPlatformTest, QuicCertUtils) { OPENSSL_free(static_cast(der)); } +TEST(QuicPlatformTest, QuicTestOutput) { + QuicLogThresholdSaver saver; + + Envoy::TestEnvironment::setEnvVar("QUIC_TEST_OUTPUT_DIR", "/tmp", /*overwrite=*/false); + + // Set log level to INFO to see the test output path in log. + quic::GetLogger().set_level(quic::INFO); + + EXPECT_LOG_NOT_CONTAINS("warn", "", + quic::QuicRecordTestOutput("quic_test_output.1", "output 1 content\n")); + EXPECT_LOG_NOT_CONTAINS("error", "", + quic::QuicRecordTestOutput("quic_test_output.2", "output 2 content\n")); + EXPECT_LOG_CONTAINS("info", "Recorded test output into", + quic::QuicRecordTestOutput("quic_test_output.3", "output 3 content\n")); +} + } // namespace } // namespace Quiche } // namespace QuicListeners diff --git a/test/fuzz/utility.h b/test/fuzz/utility.h index 8ea13ded8c297..ff90f7e1d394a 100644 --- a/test/fuzz/utility.h +++ b/test/fuzz/utility.h @@ -11,12 +11,27 @@ namespace Envoy { namespace Fuzz { // Convert from test proto Headers to TestHeaderMapImpl. -inline Http::TestHeaderMapImpl fromHeaders(const test::fuzz::Headers& headers) { +inline Http::TestHeaderMapImpl +fromHeaders(const test::fuzz::Headers& headers, + const std::unordered_set& ignore_headers = {}) { Http::TestHeaderMapImpl header_map; for (const auto& header : headers.headers()) { + // HeaderMapImpl and places such as the route lookup should never see strings with embedded NULL + // values, the HTTP codecs should reject them. So, don't inject any such strings into the fuzz + // tests. + const auto clean = [](const std::string& s) { + const auto n = s.find('\0'); + if (n == std::string::npos) { + return s; + } + return s.substr(0, n); + }; // When we are injecting headers, we don't allow the key to ever be empty, // since calling code is not supposed to do this. - header_map.addCopy(header.key().empty() ? "not-empty" : header.key(), header.value()); + const std::string key = header.key().empty() ? "not-empty" : clean(header.key()); + if (ignore_headers.find(StringUtil::toLower(key)) != ignore_headers.end()) { + header_map.addCopy(key, clean(header.value())); + } } return header_map; } diff --git a/test/integration/BUILD b/test/integration/BUILD index 8635220d0bc8d..db52ef4af1be8 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -96,6 +96,40 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "custom_cluster_integration_test", + srcs = ["custom_cluster_integration_test.cc"], + deps = [ + ":http_integration_lib", + "//source/common/upstream:load_balancer_lib", + "//test/config:utility_lib", + "//test/integration/clusters:custom_static_cluster", + "//test/test_common:network_utility_lib", + "@envoy_api//envoy/api/v2:eds_cc", + ], +) + +envoy_cc_test( + name = "delta_cds_integration_test", + srcs = ["delta_cds_integration_test.cc"], + data = [ + "//test/config/integration/certs", + ], + deps = [ + ":http_integration_lib", + "//source/common/config:protobuf_link_hacks", + "//source/common/config:resources_lib", + "//source/common/protobuf:utility_lib", + "//test/common/grpc:grpc_client_integration_lib", + "//test/mocks/runtime:runtime_mocks", + "//test/mocks/server:server_mocks", + "//test/test_common:network_utility_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/api/v2:cds_cc", + "@envoy_api//envoy/api/v2:discovery_cc", + ], +) + exports_files(["test_utility.sh"]) envoy_sh_test( diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index 181027ed4ee99..736a3dd783811 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -541,6 +541,7 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { sendDiscoveryResponse(Config::TypeUrl::get().Cluster, {buildCluster("warming_cluster_1")}, "2"); test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", {"warming_cluster_1"})); @@ -552,20 +553,89 @@ TEST_P(AdsIntegrationTest, CdsPausedDuringWarming) { EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", {"warming_cluster_2", "warming_cluster_1"})); - // Finish warming the first cluster. + // Finish warming the clusters. sendDiscoveryResponse( Config::TypeUrl::get().ClusterLoadAssignment, - {buildClusterLoadAssignment("warming_cluster_1")}, "2"); + {buildClusterLoadAssignment("warming_cluster_1"), + buildClusterLoadAssignment("warming_cluster_2")}, + "2"); - // Envoy will finish warming of the second cluster too because of the missing load assignments. + // Validate that clusters are warmed. test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); - // CDS is resumed. Also, warming_cluster_1 was not removed as it was in the warming state. + // CDS is resumed and EDS response was acknowledged. EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "3", {})); EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "2", {"warming_cluster_2", "warming_cluster_1"})); } +// Verify cluster warming is finished only on named EDS response. +TEST_P(AdsIntegrationTest, ClusterWarmingOnNamedResponse) { + initialize(); + + // Send initial configuration, validate we can process a request. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {})); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("cluster_0")}, "1"); + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", {"cluster_0"})); + + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {})); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "", {})); + sendDiscoveryResponse( + Config::TypeUrl::get().Listener, {buildListener("listener_0", "route_config_0")}, "1"); + + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", {"cluster_0"})); + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "", {"route_config_0"})); + sendDiscoveryResponse( + Config::TypeUrl::get().RouteConfiguration, {buildRouteConfig("route_config_0", "cluster_0")}, + "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Listener, "1", {})); + EXPECT_TRUE( + compareDiscoveryRequest(Config::TypeUrl::get().RouteConfiguration, "1", {"route_config_0"})); + + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + makeSingleRequest(); + + // Send the first warming cluster. + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("warming_cluster_1")}, "2"); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", + {"warming_cluster_1"})); + + // Send the second warming cluster. + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("warming_cluster_2")}, "3"); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 2); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "1", + {"warming_cluster_2", "warming_cluster_1"})); + + // Finish warming the first cluster. + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, + {buildClusterLoadAssignment("warming_cluster_1")}, "2"); + + // Envoy will not finish warming of the second cluster because of the missing load assignments + // i,e. no named EDS response. + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + + // Finish warming the second cluster. + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, + {buildClusterLoadAssignment("warming_cluster_2")}, "3"); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); +} + // Regression test for the use-after-free crash when processing RDS update (#3953). TEST_P(AdsIntegrationTest, RdsAfterLdsWithRdsChange) { initialize(); diff --git a/test/integration/autonomous_upstream.cc b/test/integration/autonomous_upstream.cc index 3d0df699b1f13..c70d51a6bf1b6 100644 --- a/test/integration/autonomous_upstream.cc +++ b/test/integration/autonomous_upstream.cc @@ -62,7 +62,8 @@ void AutonomousStream::sendResponse() { AutonomousHttpConnection::AutonomousHttpConnection(SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, AutonomousUpstream& upstream) - : FakeHttpConnection(shared_connection, store, type, upstream.timeSystem()), + : FakeHttpConnection(shared_connection, store, type, upstream.timeSystem(), + Http::DEFAULT_MAX_REQUEST_HEADERS_KB), upstream_(upstream) {} Http::StreamDecoder& AutonomousHttpConnection::newStream(Http::StreamEncoder& response_encoder, diff --git a/test/integration/clusters/BUILD b/test/integration/clusters/BUILD new file mode 100644 index 0000000000000..b091fb0a81e8f --- /dev/null +++ b/test/integration/clusters/BUILD @@ -0,0 +1,41 @@ +licenses(["notice"]) # Apache 2 + +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test_library", + "envoy_package", + "envoy_proto_library", +) + +envoy_package() + +envoy_cc_test_library( + name = "custom_static_cluster", + srcs = [ + "custom_static_cluster.cc", + "custom_static_cluster.h", + ], + deps = [ + ":cluster_factory_config_proto_cc", + "//include/envoy/api:api_interface", + "//include/envoy/http:codec_interface", + "//include/envoy/upstream:cluster_factory_interface", + "//include/envoy/upstream:cluster_manager_interface", + "//source/common/event:dispatcher_lib", + "//source/common/network:utility_lib", + "//source/common/singleton:manager_impl_lib", + "//source/common/upstream:cluster_factory_lib", + "//source/common/upstream:upstream_includes", + "//source/common/upstream:upstream_lib", + "//source/extensions/transport_sockets/raw_buffer:config", + "//source/server:transport_socket_config_lib", + "//test/common/upstream:utility_lib", + "//test/test_common:registry_lib", + "//test/test_common:utility_lib", + ], +) + +envoy_proto_library( + name = "cluster_factory_config_proto", + srcs = [":cluster_factory_config.proto"], +) diff --git a/test/integration/clusters/cluster_factory_config.proto b/test/integration/clusters/cluster_factory_config.proto new file mode 100644 index 0000000000000..ccf74e1d2c78a --- /dev/null +++ b/test/integration/clusters/cluster_factory_config.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package test.integration.clusters; + +message CustomStaticConfig { + uint32 priority = 1; + string address = 2; + uint32 port_value = 3; +} \ No newline at end of file diff --git a/test/integration/clusters/custom_static_cluster.cc b/test/integration/clusters/custom_static_cluster.cc new file mode 100644 index 0000000000000..a880f5e862b36 --- /dev/null +++ b/test/integration/clusters/custom_static_cluster.cc @@ -0,0 +1,32 @@ +#include "custom_static_cluster.h" + +namespace Envoy { + +// ClusterImplBase +void CustomStaticCluster::startPreInit() { + printf("startPreInit"); + Upstream::HostSharedPtr host = makeHost(); + Upstream::HostVector hosts{host}; + auto hosts_ptr = std::make_shared(hosts); + + this->priority_set_.updateHosts( + priority_, + Upstream::HostSetImpl::partitionHosts(hosts_ptr, Upstream::HostsPerLocalityImpl::empty()), {}, + hosts, {}, absl::nullopt); + + onPreInitComplete(); +} + +inline Upstream::HostSharedPtr CustomStaticCluster::makeHost() { + Network::Address::InstanceConstSharedPtr address = + Network::Utility::parseInternetAddress(address_, port_, false); + return Upstream::HostSharedPtr{new Upstream::HostImpl( + this->info(), "", address, this->info()->metadata(), 1, + envoy::api::v2::core::Locality::default_instance(), + envoy::api::v2::endpoint::Endpoint::HealthCheckConfig::default_instance(), priority_, + envoy::api::v2::core::HealthStatus::UNKNOWN)}; +} + +REGISTER_FACTORY(CustomStaticClusterFactory, Upstream::ClusterFactory); + +} // namespace Envoy \ No newline at end of file diff --git a/test/integration/clusters/custom_static_cluster.h b/test/integration/clusters/custom_static_cluster.h new file mode 100644 index 0000000000000..d3cf39edddd21 --- /dev/null +++ b/test/integration/clusters/custom_static_cluster.h @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include + +#include "envoy/api/api.h" +#include "envoy/http/codec.h" +#include "envoy/upstream/cluster_manager.h" + +#include "common/network/address_impl.h" +#include "common/upstream/cluster_factory_impl.h" + +#include "server/transport_socket_config_impl.h" + +#include "test/common/upstream/utility.h" +#include "test/integration/clusters/cluster_factory_config.pb.h" +#include "test/integration/clusters/cluster_factory_config.pb.validate.h" +#include "test/test_common/registry.h" + +namespace Envoy { + +class CustomStaticCluster : public Upstream::ClusterImplBase { +public: + CustomStaticCluster(const envoy::api::v2::Cluster& cluster, Runtime::Loader& runtime, + Server::Configuration::TransportSocketFactoryContext& factory_context, + Stats::ScopePtr&& stats_scope, bool added_via_api, uint32_t priority, + std::string address, uint32_t port) + : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api), + priority_(priority), address_(std::move(address)), port_(port) {} + + InitializePhase initializePhase() const override { return InitializePhase::Primary; } + +private: + // ClusterImplBase + void startPreInit() override; + + inline Upstream::HostSharedPtr makeHost(); + + const uint32_t priority_; + const std::string address_; + const uint32_t port_; +}; + +class CustomStaticClusterFactory : public Upstream::ConfigurableClusterFactoryBase< + test::integration::clusters::CustomStaticConfig> { +public: + CustomStaticClusterFactory() : ConfigurableClusterFactoryBase("envoy.clusters.custom_static") {} + +private: + Upstream::ClusterImplBaseSharedPtr createClusterWithConfig( + const envoy::api::v2::Cluster& cluster, + const test::integration::clusters::CustomStaticConfig& proto_config, + Upstream::ClusterFactoryContext& context, + Server::Configuration::TransportSocketFactoryContext& socket_factory_context, + Stats::ScopePtr&& stats_scope) override { + return std::make_unique(cluster, context.runtime(), socket_factory_context, + std::move(stats_scope), context.addedViaApi(), + proto_config.priority(), proto_config.address(), + proto_config.port_value()); + } +}; + +} // namespace Envoy \ No newline at end of file diff --git a/test/integration/custom_cluster_integration_test.cc b/test/integration/custom_cluster_integration_test.cc new file mode 100644 index 0000000000000..4d931d0a94733 --- /dev/null +++ b/test/integration/custom_cluster_integration_test.cc @@ -0,0 +1,74 @@ +#include "envoy/api/v2/eds.pb.h" + +#include "common/network/address_impl.h" +#include "common/upstream/load_balancer_impl.h" + +#include "test/config/utility.h" +#include "test/integration/clusters/cluster_factory_config.pb.h" +#include "test/integration/clusters/custom_static_cluster.h" +#include "test/integration/http_integration.h" + +namespace Envoy { +namespace { + +const int UpstreamIndex = 0; + +// Integration test for cluster extension using CustomStaticCluster. +class CustomClusterIntegrationTest : public testing::TestWithParam, + public HttpIntegrationTest { +public: + CustomClusterIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam(), realTime()) {} + + void initialize() override { + setUpstreamCount(1); + // change the configuration of the cluster_0 to a custom static cluster + config_helper_.addConfigModifier([this](envoy::config::bootstrap::v2::Bootstrap& bootstrap) { + auto* cluster_0 = bootstrap.mutable_static_resources()->mutable_clusters(0); + + cluster_0->clear_hosts(); + + envoy::api::v2::Cluster_CustomClusterType cluster_type; + cluster_type.set_name("envoy.clusters.custom_static"); + test::integration::clusters::CustomStaticConfig config; + config.set_priority(10); + config.set_address(Network::Test::getLoopbackAddressString(ipVersion())); + config.set_port_value(fake_upstreams_[UpstreamIndex]->localAddress()->ip()->port()); + cluster_type.mutable_typed_config()->PackFrom(config); + + cluster_0->mutable_cluster_type()->CopyFrom(cluster_type); + }); + HttpIntegrationTest::initialize(); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 1); + } + + Network::Address::IpVersion ipVersion() const { return version_; } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, CustomClusterIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest())); + +TEST_P(CustomClusterIntegrationTest, TestRouterHeaderOnly) { + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex); +} + +TEST_P(CustomClusterIntegrationTest, TestTwoRequests) { testTwoRequests(false); } + +TEST_P(CustomClusterIntegrationTest, TestCustomConfig) { + // Calls our initialize(), which includes establishing a listener, route, and cluster. + initialize(); + + // Verify the cluster is correctly setup with the custom priority + const auto& cluster_map = test_server_->server().clusterManager().clusters(); + EXPECT_EQ(1, cluster_map.size()); + EXPECT_EQ(1, cluster_map.count("cluster_0")); + const auto& cluster_ref = cluster_map.find("cluster_0")->second; + const auto& hostset_per_priority = cluster_ref.get().prioritySet().hostSetsPerPriority(); + EXPECT_EQ(11, hostset_per_priority.size()); + const Envoy::Upstream::HostSetPtr& host_set = hostset_per_priority[10]; + EXPECT_EQ(1, host_set->hosts().size()); + EXPECT_EQ(1, host_set->healthyHosts().size()); + EXPECT_EQ(10, host_set->priority()); +} +} // namespace +} // namespace Envoy diff --git a/test/integration/delta_cds_integration_test.cc b/test/integration/delta_cds_integration_test.cc new file mode 100644 index 0000000000000..42381b7f9c5af --- /dev/null +++ b/test/integration/delta_cds_integration_test.cc @@ -0,0 +1,324 @@ +#include "envoy/api/v2/cds.pb.h" +#include "envoy/api/v2/discovery.pb.h" +#include "envoy/grpc/status.h" +#include "envoy/stats/scope.h" + +#include "common/config/protobuf_link_hacks.h" +#include "common/config/resources.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +#include "test/common/grpc/grpc_client_integration.h" +#include "test/integration/http_integration.h" +#include "test/integration/utility.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "absl/synchronization/notification.h" +#include "gtest/gtest.h" + +using testing::AssertionFailure; +using testing::AssertionResult; +using testing::AssertionSuccess; +using testing::IsSubstring; + +namespace Envoy { +namespace { + +// TODO(fredlas) Move to test/config/utility.cc once there are other xDS tests that use gRPC. +const char Config[] = R"EOF( +admin: + access_log_path: /dev/null + address: + socket_address: + address: 127.0.0.1 + port_value: 0 +dynamic_resources: + cds_config: + api_config_source: + api_type: DELTA_GRPC + grpc_services: + envoy_grpc: + cluster_name: my_cds_cluster +static_resources: + clusters: + - name: my_cds_cluster + http2_protocol_options: {} + hosts: + socket_address: + address: 127.0.0.1 + port_value: 0 + listeners: + name: http + address: + socket_address: + address: 127.0.0.1 + port_value: 0 + filter_chains: + filters: + name: envoy.http_connection_manager + config: + stat_prefix: config_test + http_filters: + name: envoy.router + codec_type: HTTP2 + route_config: + name: route_config_0 + validate_clusters: false + virtual_hosts: + name: integration + routes: + - route: + cluster: cluster_1 + match: + prefix: "/cluster1" + - route: + cluster: cluster_2 + match: + prefix: "/cluster2" + domains: "*" +)EOF"; +const char ClusterName1[] = "cluster_1"; +const char ClusterName2[] = "cluster_2"; +const int UpstreamIndex1 = 1; +const int UpstreamIndex2 = 2; + +class DeltaCdsIntegrationTest : public HttpIntegrationTest, + public Grpc::GrpcClientIntegrationParamTest { +public: + DeltaCdsIntegrationTest() + : HttpIntegrationTest(Http::CodecClient::Type::HTTP2, ipVersion(), realTime(), Config) {} + + void TearDown() override { + cleanUpXdsConnection(); + test_server_.reset(); + fake_upstreams_.clear(); + } + + // TODO(fredlas) Move to test/config/utility.cc once there are other xDS tests that use gRPC. + envoy::api::v2::Cluster buildCluster(const std::string& name, int upstream_index) { + return TestUtility::parseYaml( + fmt::format(R"EOF( + name: {} + connect_timeout: 5s + type: STATIC + load_assignment: + cluster_name: {} + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: {} + port_value: {} + lb_policy: ROUND_ROBIN + http2_protocol_options: {{}} + )EOF", + name, name, Network::Test::getLoopbackAddressString(ipVersion()), + fake_upstreams_[upstream_index]->localAddress()->ip()->port())); + } + + // Overridden to insert this stuff into the initialize() at the very beginning of + // HttpIntegrationTest::testRouterRequestAndResponseWithBody(). + void initialize() override { + // Controls how many fake_upstreams_.emplace_back(new FakeUpstream) will happen in + // BaseIntegrationTest::createUpstreams() (which is part of initialize()). + // Make sure this number matches the size of the 'clusters' repeated field in the bootstrap + // config that you use! + setUpstreamCount(1); // the CDS cluster + setUpstreamProtocol(FakeHttpConnection::Type::HTTP2); // CDS uses gRPC uses HTTP2. + + // BaseIntegrationTest::initialize() does many things: + // 1) It appends to fake_upstreams_ as many as you asked for via setUpstreamCount(). + // 2) It updates your bootstrap config with the ports your fake upstreams are actually listening + // on (since you're supposed to leave them as 0). + // 3) It creates and starts an IntegrationTestServer - the thing that wraps the almost-actual + // Envoy used in the tests. + // 4) Bringing up the server usually entails waiting to ensure that any listeners specified in + // the bootstrap config have come up, and registering them in a port map (see lookupPort()). + // However, this test needs to defer all of that to later. + defer_listener_finalization_ = true; + HttpIntegrationTest::initialize(); + + // Create the regular (i.e. not an xDS server) upstream. We create it manually here after + // initialize() because finalize() expects all fake_upstreams_ to correspond to a static + // cluster in the bootstrap config - which we don't want since we're testing dynamic CDS! + fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, + timeSystem(), enable_half_close_)); + fake_upstreams_[UpstreamIndex1]->set_allow_unexpected_disconnects(false); + + // Now that the upstream has been created, process Envoy's request to discover it. + // (First, we have to let Envoy establish its connection to the CDS server.) + acceptXdsConnection(); + + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {})); + sendDeltaDiscoveryResponse( + {buildCluster(ClusterName1, UpstreamIndex1)}, {}, "55"); + // We can continue the test once we're sure that Envoy's ClusterManager has made use of + // the DiscoveryResponse describing cluster_1 that we sent. + // 2 because the statically specified CDS server itself counts as a cluster. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); + + // Wait for our statically specified listener to become ready, and register its port in the + // test framework's downstream listener port map. + test_server_->waitUntilListenersReady(); + registerTestServerPorts({"http"}); + } + + void acceptXdsConnection() { + AssertionResult result = // xds_connection_ is filled with the new FakeHttpConnection. + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, xds_connection_); + RELEASE_ASSERT(result, result.message()); + result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); + RELEASE_ASSERT(result, result.message()); + xds_stream_->startGrpcStream(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + } +}; + +INSTANTIATE_TEST_CASE_P(IpVersionsClientType, DeltaCdsIntegrationTest, + GRPC_CLIENT_INTEGRATION_PARAMS); + +// 1) Envoy starts up with no static clusters (other than the CDS-over-gRPC server). +// 2) Envoy is told of a cluster via CDS. +// 3) We send Envoy a request, which we verify is properly proxied to and served by that cluster. +// 4) Envoy is told that cluster is gone. +// 5) We send Envoy a request, which should 503. +// 6) Envoy is told that the cluster is back. +// 7) We send Envoy a request, which we verify is properly proxied to and served by that cluster. +TEST_P(DeltaCdsIntegrationTest, CdsClusterUpDownUp) { + // Calls our initialize(), which includes establishing a listener, route, and cluster. + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + + // Tell Envoy that cluster_1 is gone. + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {})); + sendDeltaDiscoveryResponse({}, {ClusterName1}, "42"); + // We can continue the test once we're sure that Envoy's ClusterManager has made use of + // the DiscoveryResponse that says cluster_1 is gone. + test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1); + + // Now that cluster_1 is gone, the listener (with its routing to cluster_1) should 503. + BufferingStreamDecoderPtr response = IntegrationUtil::makeSingleRequest( + lookupPort("http"), "GET", "/cluster1", "", downstream_protocol_, version_, "foo.com"); + ASSERT_TRUE(response->complete()); + EXPECT_STREQ("503", response->headers().Status()->value().c_str()); + + cleanupUpstreamAndDownstream(); + codec_client_->waitForDisconnect(); + + // Tell Envoy that cluster_1 is back. + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {})); + sendDeltaDiscoveryResponse({buildCluster(ClusterName1, UpstreamIndex1)}, + {}, "413"); + + // We can continue the test once we're sure that Envoy's ClusterManager has made use of + // the DiscoveryResponse describing cluster_1 that we sent. Again, 2 includes CDS server. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); + + // Does *not* call our initialize(). + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + + cleanupUpstreamAndDownstream(); +} + +// Tests adding a cluster, adding another, then removing the first. +TEST_P(DeltaCdsIntegrationTest, TwoClusters) { + // Calls our initialize(), which includes establishing a listener, route, and cluster. + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + cleanupUpstreamAndDownstream(); + codec_client_->waitForDisconnect(); + + // Add another fake upstream, to be cluster_2. + fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, + timeSystem(), enable_half_close_)); + fake_upstreams_[UpstreamIndex2]->set_allow_unexpected_disconnects(false); + + // Tell Envoy that cluster_2 is here. + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {})); + sendDeltaDiscoveryResponse({buildCluster(ClusterName2, UpstreamIndex2)}, + {}, "42"); + // The '3' includes the fake CDS server. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3); + + // A request for cluster_2 should be fine. + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2"); + cleanupUpstreamAndDownstream(); + codec_client_->waitForDisconnect(); + + // Tell Envoy that cluster_1 is gone. + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {})); + sendDeltaDiscoveryResponse({}, {ClusterName1}, "42"); + // We can continue the test once we're sure that Envoy's ClusterManager has made use of + // the DiscoveryResponse that says cluster_1 is gone. + test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1); + + // Even with cluster_1 is gone, a request for cluster_2 should be fine. + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2"); + cleanupUpstreamAndDownstream(); + codec_client_->waitForDisconnect(); + + // Tell Envoy that cluster_1 is back. + EXPECT_TRUE(compareDeltaDiscoveryRequest(Config::TypeUrl::get().Cluster, {}, {})); + sendDeltaDiscoveryResponse({buildCluster(ClusterName1, UpstreamIndex1)}, + {}, "413"); + + // We can continue the test once we're sure that Envoy's ClusterManager has made use of + // the DiscoveryResponse describing cluster_1 that we sent. Again, 3 includes CDS server. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3); + + // Does *not* call our initialize(). + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + + cleanupUpstreamAndDownstream(); +} + +// Tests that when Envoy's xDS gRPC stream dis/reconnects, Envoy can inform the server of the +// resources it already has: the reconnected stream need not start with a state-of-the-world update. +TEST_P(DeltaCdsIntegrationTest, VersionsRememberedAfterReconnect) { + // Calls our initialize(), which includes establishing a listener, route, and cluster. + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + cleanupUpstreamAndDownstream(); + codec_client_->waitForDisconnect(); + + // Close the connection carrying Envoy's xDS gRPC stream... + AssertionResult result = xds_connection_->close(); + RELEASE_ASSERT(result, result.message()); + result = xds_connection_->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + xds_connection_.reset(); + // ...and reconnect it. + acceptXdsConnection(); + + // Upon reconnecting, the Envoy should tell us its current resource versions. + envoy::api::v2::DeltaDiscoveryRequest request; + result = xds_stream_->waitForGrpcMessage(*dispatcher_, request); + RELEASE_ASSERT(result, result.message()); + const auto& initial_resource_versions = request.initial_resource_versions(); + EXPECT_EQ("55", initial_resource_versions.at(std::string(ClusterName1))); + EXPECT_EQ(1, initial_resource_versions.size()); + + // Add another fake upstream, to be cluster_2. + fake_upstreams_.emplace_back(new FakeUpstream(0, FakeHttpConnection::Type::HTTP2, version_, + timeSystem(), enable_half_close_)); + fake_upstreams_[UpstreamIndex2]->set_allow_unexpected_disconnects(false); + // Tell Envoy that cluster_2 is here. This update does *not* need to include cluster_1, + // which Envoy should already know about despite the disconnect. + sendDeltaDiscoveryResponse({buildCluster(ClusterName2, UpstreamIndex2)}, + {}, "42"); + // The '3' includes the fake CDS server. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3); + + // A request for cluster_1 should be fine. + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex1, "/cluster1"); + cleanupUpstreamAndDownstream(); + codec_client_->waitForDisconnect(); + // A request for cluster_2 should be fine. + testRouterHeaderOnlyRequestAndResponse(nullptr, UpstreamIndex2, "/cluster2"); + cleanupUpstreamAndDownstream(); + codec_client_->waitForDisconnect(); +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/delta_xds_integration_test_base.cc b/test/integration/delta_xds_integration_test_base.cc new file mode 100644 index 0000000000000..806668c847e73 --- /dev/null +++ b/test/integration/delta_xds_integration_test_base.cc @@ -0,0 +1,91 @@ +#include "test/integration/delta_xds_integration_test_base.h" + +#include "envoy/api/v2/discovery.pb.h" +#include "envoy/grpc/status.h" +#include "envoy/stats/scope.h" + +#include "common/config/resources.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +#include "test/integration/http_integration.h" +#include "test/integration/utility.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using testing::AssertionFailure; +using testing::AssertionResult; +using testing::AssertionSuccess; +using testing::IsSubstring; + +namespace Envoy { + +void DeltaXdsIntegrationTestBase::createXdsConnection(FakeUpstream& upstream) { + xds_upstream_ = &upstream; + AssertionResult result = xds_upstream_->waitForHttpConnection(*dispatcher_, xds_connection_); + RELEASE_ASSERT(result, result.message()); +} + +void DeltaXdsIntegrationTestBase::cleanUpXdsConnection() { + // Don't ASSERT fail if an xDS reconnect ends up unparented. + if (xds_upstream_) { + xds_upstream_->set_allow_unexpected_disconnects(true); + } + AssertionResult result = xds_connection_->close(); + RELEASE_ASSERT(result, result.message()); + result = xds_connection_->waitForDisconnect(); + RELEASE_ASSERT(result, result.message()); + xds_connection_.reset(); +} + +AssertionResult DeltaXdsIntegrationTestBase::compareDiscoveryRequest( + const std::string& expected_type_url, + const std::vector& expected_resource_subscriptions, + const std::vector& expected_resource_unsubscriptions, + const Protobuf::int32 expected_error_code, const std::string& expected_error_message) { + envoy::api::v2::DeltaDiscoveryRequest request; + VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, request)); + + EXPECT_TRUE(request.has_node()); + EXPECT_FALSE(request.node().id().empty()); + EXPECT_FALSE(request.node().cluster().empty()); + + // TODO(PiotrSikora): Remove this hack once fixed internally. + if (!(expected_type_url == request.type_url())) { + return AssertionFailure() << fmt::format("type_url {} does not match expected {}", + request.type_url(), expected_type_url); + } + if (!(expected_error_code == request.error_detail().code())) { + return AssertionFailure() << fmt::format("error_code {} does not match expected {}", + request.error_detail().code(), expected_error_code); + } + EXPECT_TRUE(IsSubstring("", "", expected_error_message, request.error_detail().message())); + + const std::vector resource_subscriptions(request.resource_names_subscribe().cbegin(), + request.resource_names_subscribe().cend()); + if (expected_resource_subscriptions != resource_subscriptions) { + return AssertionFailure() << fmt::format( + "newly subscribed resources {} do not match expected {} in {}", + fmt::join(resource_subscriptions.begin(), resource_subscriptions.end(), ","), + fmt::join(expected_resource_subscriptions.begin(), + expected_resource_subscriptions.end(), ","), + request.DebugString()); + } + const std::vector resource_unsubscriptions( + request.resource_names_unsubscribe().cbegin(), request.resource_names_unsubscribe().cend()); + if (expected_resource_unsubscriptions != resource_unsubscriptions) { + return AssertionFailure() << fmt::format( + "newly UNsubscribed resources {} do not match expected {} in {}", + fmt::join(resource_unsubscriptions.begin(), resource_unsubscriptions.end(), ","), + fmt::join(expected_resource_unsubscriptions.begin(), + expected_resource_unsubscriptions.end(), ","), + request.DebugString()); + } + return AssertionSuccess(); +} + +} // namespace Envoy diff --git a/test/integration/delta_xds_integration_test_base.h b/test/integration/delta_xds_integration_test_base.h new file mode 100644 index 0000000000000..a7b8c80270b1f --- /dev/null +++ b/test/integration/delta_xds_integration_test_base.h @@ -0,0 +1,47 @@ +#pragma once + +#include "envoy/api/v2/discovery.pb.h" +#include "envoy/grpc/status.h" +#include "envoy/stats/scope.h" + +#include "common/config/resources.h" +#include "common/protobuf/protobuf.h" +#include "common/protobuf/utility.h" + +#include "test/integration/http_integration.h" +#include "test/integration/utility.h" +#include "test/mocks/server/mocks.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +using testing::AssertionFailure; +using testing::AssertionResult; +using testing::AssertionSuccess; +using testing::IsSubstring; + +namespace Envoy { + +class DeltaXdsIntegrationTestBase : public HttpIntegrationTest { +public: + DeltaXdsIntegrationTestBase(Http::CodecClient::Type downstream_protocol, + Network::Address::IpVersion version) + : HttpIntegrationTest(downstream_protocol, version, realTime()) {} + DeltaXdsIntegrationTestBase(Http::CodecClient::Type downstream_protocol, + Network::Address::IpVersion version, const std::string& config) + : HttpIntegrationTest(downstream_protocol, version, realTime(), config) {} + + void createXdsConnection(FakeUpstream& upstream); + + void cleanUpXdsConnection(); + +protected: + FakeUpstream* xds_upstream_{}; + FakeHttpConnectionPtr xds_connection_; + FakeStreamPtr xds_stream_; + testing::NiceMock factory_context_; +}; + +} // namespace Envoy diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 334db20f41d2e..c41315175e06c 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -208,18 +208,18 @@ void FakeStream::finishGrpcStream(Grpc::Status::GrpcStatus status) { FakeHttpConnection::FakeHttpConnection(SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, - Event::TestTimeSystem& time_system) + Event::TestTimeSystem& time_system, + uint32_t max_request_headers_kb) : FakeConnectionBase(shared_connection, time_system) { if (type == Type::HTTP1) { - codec_ = std::make_unique(shared_connection_.connection(), - *this, Http::Http1Settings()); + codec_ = std::make_unique( + shared_connection_.connection(), *this, Http::Http1Settings(), max_request_headers_kb); } else { auto settings = Http::Http2Settings(); settings.allow_connect_ = true; settings.allow_metadata_ = true; codec_ = std::make_unique( - shared_connection_.connection(), *this, store, settings, - Http::DEFAULT_MAX_REQUEST_HEADERS_KB); + shared_connection_.connection(), *this, store, settings, max_request_headers_kb); ASSERT(type == Type::HTTP2); } @@ -432,7 +432,8 @@ void FakeUpstream::threadRoutine() { AssertionResult FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection, - milliseconds timeout) { + milliseconds timeout, + uint32_t max_request_headers_kb) { Event::TestTimeSystem& time_system = timeSystem(); auto end_time = time_system.monotonicTime() + timeout; { @@ -452,7 +453,7 @@ AssertionResult FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_di return AssertionFailure() << "Got a new connection event, but didn't create a connection."; } connection = std::make_unique(consumeConnection(), stats_store_, http_type_, - time_system); + time_system, max_request_headers_kb); } VERIFY_ASSERTION(connection->initialize()); VERIFY_ASSERTION(connection->readDisable(false)); @@ -482,7 +483,7 @@ FakeUpstream::waitForHttpConnection(Event::Dispatcher& client_dispatcher, } else { connection = std::make_unique( upstream.consumeConnection(), upstream.stats_store_, upstream.http_type_, - upstream.timeSystem()); + upstream.timeSystem(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB); lock.release(); VERIFY_ASSERTION(connection->initialize()); VERIFY_ASSERTION(connection->readDisable(false)); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 92d55417f2420..7c720f044ce09 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -401,7 +401,7 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo enum class Type { HTTP1, HTTP2 }; FakeHttpConnection(SharedConnectionWrapper& shared_connection, Stats::Store& store, Type type, - Event::TestTimeSystem& time_system); + Event::TestTimeSystem& time_system, uint32_t max_request_headers_kb); // By default waitForNewStream assumes the next event is a new stream and // returns AssertionFailure if an unexpected event occurs. If a caller truly @@ -529,7 +529,8 @@ class FakeUpstream : Logger::Loggable, ABSL_MUST_USE_RESULT testing::AssertionResult waitForHttpConnection(Event::Dispatcher& client_dispatcher, FakeHttpConnectionPtr& connection, - std::chrono::milliseconds timeout = TestUtility::DefaultTimeout); + std::chrono::milliseconds timeout = TestUtility::DefaultTimeout, + uint32_t max_request_headers_kb = Http::DEFAULT_MAX_REQUEST_HEADERS_KB); ABSL_MUST_USE_RESULT testing::AssertionResult diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index d48813426534d..dd8fe59451747 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -24,8 +24,8 @@ namespace Envoy { namespace { +// TODO(jmarantz): switch this to simulated-time after debugging flakes. class HdsIntegrationTest : public testing::TestWithParam, - public Event::TestUsingSimulatedTime, public HttpIntegrationTest { public: HdsIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} diff --git a/test/integration/http2_integration_test.cc b/test/integration/http2_integration_test.cc index 2804776e5d09f..c8275e762b8c2 100644 --- a/test/integration/http2_integration_test.cc +++ b/test/integration/http2_integration_test.cc @@ -430,10 +430,6 @@ TEST_P(Http2IntegrationTest, GrpcRouterNotFound) { TEST_P(Http2IntegrationTest, GrpcRetry) { testGrpcRetry(); } -TEST_P(Http2IntegrationTest, LargeHeadersInvokeResetStream) { testLargeRequestHeaders(62, 60); } - -TEST_P(Http2IntegrationTest, LargeHeadersAcceptedIfConfigured) { testLargeRequestHeaders(62, 63); } - TEST_P(Http2IntegrationTest, BadMagic) { initialize(); Buffer::OwnedImpl buffer("hello"); diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 8f9dea3bfdea7..d8d7eb2951ada 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -284,8 +284,9 @@ HttpIntegrationTest::waitForNextUpstreamRequest(const std::vector& ups if (!fake_upstream_connection_) { AssertionResult result = AssertionFailure(); for (auto upstream_index : upstream_indices) { - result = fake_upstreams_[upstream_index]->waitForHttpConnection(*dispatcher_, - fake_upstream_connection_); + result = fake_upstreams_[upstream_index]->waitForHttpConnection( + *dispatcher_, fake_upstream_connection_, TestUtility::DefaultTimeout, + max_request_headers_kb_); if (result) { upstream_with_request = upstream_index; break; @@ -344,7 +345,7 @@ void HttpIntegrationTest::testRouterRequestAndResponseWithBody( IntegrationStreamDecoderPtr HttpIntegrationTest::makeHeaderOnlyRequest(ConnectionCreationFunction* create_connection, - int upstream_index) { + int upstream_index, const std::string& path) { // This is called multiple times per test in ads_integration_test. Only call // initialize() the first time. if (!initialized()) { @@ -353,7 +354,7 @@ HttpIntegrationTest::makeHeaderOnlyRequest(ConnectionCreationFunction* create_co codec_client_ = makeHttpConnection( create_connection ? ((*create_connection)()) : makeClientConnection((lookupPort("http")))); Http::TestHeaderMapImpl request_headers{{":method", "GET"}, - {":path", "/test/long/url"}, + {":path", path}, {":scheme", "http"}, {":authority", "host"}, {"x-lyft-user-id", "123"}}; @@ -362,8 +363,8 @@ HttpIntegrationTest::makeHeaderOnlyRequest(ConnectionCreationFunction* create_co } void HttpIntegrationTest::testRouterHeaderOnlyRequestAndResponse( - ConnectionCreationFunction* create_connection, int upstream_index) { - auto response = makeHeaderOnlyRequest(create_connection, upstream_index); + ConnectionCreationFunction* create_connection, int upstream_index, const std::string& path) { + auto response = makeHeaderOnlyRequest(create_connection, upstream_index, path); checkSimpleRequestSuccess(0U, 0U, response.get()); } @@ -827,6 +828,7 @@ void HttpIntegrationTest::testLargeRequestHeaders(uint32_t size, uint32_t max_si config_helper_.addConfigModifier( [&](envoy::config::filter::network::http_connection_manager::v2::HttpConnectionManager& hcm) -> void { hcm.mutable_max_request_headers_kb()->set_value(max_size); }); + max_request_headers_kb_ = max_size; Http::TestHeaderMapImpl big_headers{ {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}; diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index c6ddd1476833d..3fb328ac2be56 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -139,7 +139,8 @@ class HttpIntegrationTest : public BaseIntegrationTest { typedef std::function ConnectionCreationFunction; // Sends a simple header-only HTTP request, and waits for a response. IntegrationStreamDecoderPtr makeHeaderOnlyRequest(ConnectionCreationFunction* create_connection, - int upstream_index); + int upstream_index, + const std::string& path = "/test/long/url"); void testRouterNotFound(); void testRouterNotFoundWithBody(); @@ -147,7 +148,8 @@ class HttpIntegrationTest : public BaseIntegrationTest { bool big_header, ConnectionCreationFunction* creator = nullptr); void testRouterHeaderOnlyRequestAndResponse(ConnectionCreationFunction* creator = nullptr, - int upstream_index = 0); + int upstream_index = 0, + const std::string& path = "/test/long/url"); void testRequestAndResponseShutdownWithActiveConnection(); // Disconnect tests @@ -196,5 +198,6 @@ class HttpIntegrationTest : public BaseIntegrationTest { {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}; // The codec type for the client-to-Envoy connection Http::CodecClient::Type downstream_protocol_{Http::CodecClient::Type::HTTP1}; + uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; }; } // namespace Envoy diff --git a/test/integration/http_protocol_integration.h b/test/integration/http_protocol_integration.h index 59cdf8316680f..c9429a4bd3327 100644 --- a/test/integration/http_protocol_integration.h +++ b/test/integration/http_protocol_integration.h @@ -18,7 +18,7 @@ struct HttpProtocolTestParams { // // typedef HttpProtocolIntegrationTest MyTest // -// INSTANTIATE_TEST_SUITE_P(Protocols, BufferIntegrationTest, +// INSTANTIATE_TEST_SUITE_P(Protocols, MyTest, // testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()), // HttpProtocolIntegrationTest::protocolTestParamsToString); // diff --git a/test/integration/integration.cc b/test/integration/integration.cc index e9499226ff344..2c0948acf7b63 100644 --- a/test/integration/integration.cc +++ b/test/integration/integration.cc @@ -525,4 +525,51 @@ AssertionResult BaseIntegrationTest::compareDiscoveryRequest( } return AssertionSuccess(); } + +AssertionResult BaseIntegrationTest::compareDeltaDiscoveryRequest( + const std::string& expected_type_url, + const std::vector& expected_resource_subscriptions, + const std::vector& expected_resource_unsubscriptions, + const Protobuf::int32 expected_error_code, const std::string& expected_error_message) { + envoy::api::v2::DeltaDiscoveryRequest request; + VERIFY_ASSERTION(xds_stream_->waitForGrpcMessage(*dispatcher_, request)); + + EXPECT_TRUE(request.has_node()); + EXPECT_FALSE(request.node().id().empty()); + EXPECT_FALSE(request.node().cluster().empty()); + + // TODO(PiotrSikora): Remove this hack once fixed internally. + if (!(expected_type_url == request.type_url())) { + return AssertionFailure() << fmt::format("type_url {} does not match expected {}", + request.type_url(), expected_type_url); + } + if (!(expected_error_code == request.error_detail().code())) { + return AssertionFailure() << fmt::format("error_code {} does not match expected {}", + request.error_detail().code(), expected_error_code); + } + EXPECT_TRUE(IsSubstring("", "", expected_error_message, request.error_detail().message())); + + const std::vector resource_subscriptions(request.resource_names_subscribe().cbegin(), + request.resource_names_subscribe().cend()); + if (expected_resource_subscriptions != resource_subscriptions) { + return AssertionFailure() << fmt::format( + "newly subscribed resources {} do not match expected {} in {}", + fmt::join(resource_subscriptions.begin(), resource_subscriptions.end(), ","), + fmt::join(expected_resource_subscriptions.begin(), + expected_resource_subscriptions.end(), ","), + request.DebugString()); + } + const std::vector resource_unsubscriptions( + request.resource_names_unsubscribe().cbegin(), request.resource_names_unsubscribe().cend()); + if (expected_resource_unsubscriptions != resource_unsubscriptions) { + return AssertionFailure() << fmt::format( + "newly UNsubscribed resources {} do not match expected {} in {}", + fmt::join(resource_unsubscriptions.begin(), resource_unsubscriptions.end(), ","), + fmt::join(expected_resource_unsubscriptions.begin(), + expected_resource_unsubscriptions.end(), ","), + request.DebugString()); + } + return AssertionSuccess(); +} + } // namespace Envoy diff --git a/test/integration/integration.h b/test/integration/integration.h index d6d03e98f9a22..db2879b624be5 100644 --- a/test/integration/integration.h +++ b/test/integration/integration.h @@ -215,6 +215,29 @@ class BaseIntegrationTest : Logger::Loggable { xds_stream_->sendGrpcMessage(discovery_response); } + AssertionResult compareDeltaDiscoveryRequest( + const std::string& expected_type_url, + const std::vector& expected_resource_subscriptions, + const std::vector& expected_resource_unsubscriptions, + const Protobuf::int32 expected_error_code = Grpc::Status::GrpcStatus::Ok, + const std::string& expected_error_message = ""); + template + void sendDeltaDiscoveryResponse(const std::vector& added_or_updated, + const std::vector& removed, + const std::string& version) { + envoy::api::v2::DeltaDiscoveryResponse response; + response.set_system_version_info("system_version_info_this_is_a_test"); + for (const auto& message : added_or_updated) { + auto* resource = response.add_resources(); + resource->set_name(message.name()); + resource->set_version(version); + resource->mutable_resource()->PackFrom(message); + } + *response.mutable_removed_resources() = {removed.begin(), removed.end()}; + response.set_nonce("noncense"); + xds_stream_->sendGrpcMessage(response); + } + private: Event::GlobalTimeSystem time_system_; diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 56b49c82c72f4..0141026a9478c 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -468,10 +468,6 @@ TEST_P(IntegrationTest, Connect) { EXPECT_EQ(normalizeDate(response1), normalizeDate(response2)); } -TEST_P(IntegrationTest, LargeHeadersRejected) { testLargeRequestHeaders(62, 60); } - -TEST_P(IntegrationTest, LargeHeadersAccepted) { testLargeRequestHeaders(62, 63); } - TEST_P(IntegrationTest, UpstreamProtocolError) { initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); diff --git a/test/integration/overload_integration_test.cc b/test/integration/overload_integration_test.cc index 27c9818f4a949..40f8c23756915 100644 --- a/test/integration/overload_integration_test.cc +++ b/test/integration/overload_integration_test.cc @@ -63,7 +63,8 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { // Put envoy in overloaded state and check that it drops new requests. // Test both header-only and header+body requests since the code paths are slightly different. updateResource(0.9); - test_server_->waitForGaugeEq("overload.envoy.overload_actions.stop_accepting_requests.active", 1); + test_server_->waitForBoolIndicatorEq( + "overload.envoy.overload_actions.stop_accepting_requests.active", true); Http::TestHeaderMapImpl request_headers{ {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}; @@ -87,7 +88,8 @@ TEST_P(OverloadIntegrationTest, CloseStreamsWhenOverloaded) { // Deactivate overload state and check that new requests are accepted. updateResource(0.8); - test_server_->waitForGaugeEq("overload.envoy.overload_actions.stop_accepting_requests.active", 0); + test_server_->waitForBoolIndicatorEq( + "overload.envoy.overload_actions.stop_accepting_requests.active", false); codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); @@ -109,7 +111,8 @@ TEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) { // Put envoy in overloaded state and check that it disables keepalive updateResource(0.8); - test_server_->waitForGaugeEq("overload.envoy.overload_actions.disable_http_keepalive.active", 1); + test_server_->waitForBoolIndicatorEq( + "overload.envoy.overload_actions.disable_http_keepalive.active", true); codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); Http::TestHeaderMapImpl request_headers{ @@ -123,7 +126,8 @@ TEST_P(OverloadIntegrationTest, DisableKeepaliveWhenOverloaded) { // Deactivate overload state and check that keepalive is not disabled updateResource(0.7); - test_server_->waitForGaugeEq("overload.envoy.overload_actions.disable_http_keepalive.active", 0); + test_server_->waitForBoolIndicatorEq( + "overload.envoy.overload_actions.disable_http_keepalive.active", false); codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); response = sendRequestAndWaitForResponse(request_headers, 1, default_response_headers_, 1); @@ -139,8 +143,8 @@ TEST_P(OverloadIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { // Put envoy in overloaded state and check that it doesn't accept the new client connection. updateResource(0.95); - test_server_->waitForGaugeEq("overload.envoy.overload_actions.stop_accepting_connections.active", - 1); + test_server_->waitForBoolIndicatorEq( + "overload.envoy.overload_actions.stop_accepting_connections.active", true); codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); Http::TestHeaderMapImpl request_headers{ {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}; @@ -151,8 +155,8 @@ TEST_P(OverloadIntegrationTest, StopAcceptingConnectionsWhenOverloaded) { // Reduce load a little to allow the connection to be accepted but then immediately reject the // request. updateResource(0.9); - test_server_->waitForGaugeEq("overload.envoy.overload_actions.stop_accepting_connections.active", - 0); + test_server_->waitForBoolIndicatorEq( + "overload.envoy.overload_actions.stop_accepting_connections.active", false); response->waitForEndStream(); EXPECT_TRUE(response->complete()); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index e8061b93dc86a..011266e9b8de9 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -739,6 +739,14 @@ name: decode-headers-only EXPECT_EQ(0, upstream_request_->body().length()); } +TEST_P(DownstreamProtocolIntegrationTest, LargeRequestHeadersRejected) { + testLargeRequestHeaders(95, 60); +} + +TEST_P(DownstreamProtocolIntegrationTest, LargeRequestHeadersAccepted) { + testLargeRequestHeaders(95, 96); +} + // For tests which focus on downstream-to-Envoy behavior, and don't need to be // run with both HTTP/1 and HTTP/2 upstreams. INSTANTIATE_TEST_SUITE_P(Protocols, DownstreamProtocolIntegrationTest, diff --git a/test/integration/server.h b/test/integration/server.h index 3652119081244..6648f1f9c6fa3 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -85,6 +85,11 @@ class TestScopeWrapper : public Scope { return wrapped_scope_->gauge(name); } + BoolIndicator& boolIndicator(const std::string& name) override { + Thread::LockGuard lock(lock_); + return wrapped_scope_->boolIndicator(name); + } + Histogram& histogram(const std::string& name) override { Thread::LockGuard lock(lock_); return wrapped_scope_->histogram(name); @@ -119,6 +124,10 @@ class TestIsolatedStoreImpl : public StoreRoot { Thread::LockGuard lock(lock_); return store_.gauge(name); } + BoolIndicator& boolIndicator(const std::string& name) override { + Thread::LockGuard lock(lock_); + return store_.boolIndicator(name); + } Histogram& histogram(const std::string& name) override { Thread::LockGuard lock(lock_); return store_.histogram(name); @@ -134,7 +143,10 @@ class TestIsolatedStoreImpl : public StoreRoot { Thread::LockGuard lock(lock_); return store_.gauges(); } - + std::vector boolIndicators() const override { + Thread::LockGuard lock(lock_); + return store_.boolIndicators(); + } std::vector histograms() const override { Thread::LockGuard lock(lock_); return store_.histograms(); @@ -194,6 +206,12 @@ class IntegrationTestServer : public Logger::Loggable, std::function on_server_init_function, bool deterministic, bool defer_listener_finalization); + void waitForBoolIndicatorEq(const std::string& name, uint64_t value) { + while (boolIndicator(name) == nullptr || boolIndicator(name)->value() != value) { + time_system_.sleep(std::chrono::milliseconds(10)); + } + } + void waitForCounterGe(const std::string& name, uint64_t value) override { while (counter(name) == nullptr || counter(name)->value() < value) { time_system_.sleep(std::chrono::milliseconds(10)); @@ -212,6 +230,12 @@ class IntegrationTestServer : public Logger::Loggable, } } + Stats::BoolIndicatorSharedPtr boolIndicator(const std::string& name) { + // When using the thread local store, only boolIndicators() is thread safe. This also allows us + // to test if an indicator exists at all versus just defaulting to false. + return TestUtility::findBoolIndicator(stat_store(), name); + } + Stats::CounterSharedPtr counter(const std::string& name) override { // When using the thread local store, only counters() is thread safe. This also allows us // to test if a counter exists at all versus just defaulting to zero. diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 4ee2971236d42..63a2391a04714 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -32,8 +32,8 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, StatsIntegrationTest, TEST_P(StatsIntegrationTest, WithDefaultConfig) { initialize(); - auto live = test_server_->gauge("server.live"); - EXPECT_EQ(live->value(), 1); + auto live = test_server_->boolIndicator("server.live"); + EXPECT_TRUE(live->value()); EXPECT_EQ(live->tags().size(), 0); auto counter = test_server_->counter("http.config_test.rq_total"); @@ -122,8 +122,8 @@ TEST_P(StatsIntegrationTest, WithTagSpecifierWithFixedValue) { }); initialize(); - auto live = test_server_->gauge("server.live"); - EXPECT_EQ(live->value(), 1); + auto live = test_server_->boolIndicator("server.live"); + EXPECT_TRUE(live->value()); EXPECT_EQ(live->tags().size(), 1); EXPECT_EQ(live->tags()[0].name_, "test.x"); EXPECT_EQ(live->tags()[0].value_, "xxx"); diff --git a/test/mocks/api/mocks.h b/test/mocks/api/mocks.h index 4a1be0b0b0df5..16d40d2574061 100644 --- a/test/mocks/api/mocks.h +++ b/test/mocks/api/mocks.h @@ -10,6 +10,10 @@ #include "common/api/os_sys_calls_impl.h" +#if defined(__linux__) +#include "common/api/os_sys_calls_impl_linux.h" +#endif + #include "test/mocks/filesystem/mocks.h" #include "test/test_common/test_time.h" @@ -56,6 +60,8 @@ class MockOsSysCalls : public OsSysCallsImpl { MOCK_METHOD3(writev, SysCallSizeResult(int, const iovec*, int)); MOCK_METHOD3(readv, SysCallSizeResult(int, const iovec*, int)); MOCK_METHOD4(recv, SysCallSizeResult(int socket, void* buffer, size_t length, int flags)); + MOCK_METHOD6(recvfrom, SysCallSizeResult(int sockfd, void* buffer, size_t length, int flags, + struct sockaddr* addr, socklen_t* addrlen)); MOCK_METHOD3(shmOpen, SysCallIntResult(const char*, int, mode_t)); MOCK_METHOD1(shmUnlink, SysCallIntResult(const char*)); @@ -74,5 +80,13 @@ class MockOsSysCalls : public OsSysCallsImpl { std::map boolsockopts_; }; +#if defined(__linux__) +class MockLinuxOsSysCalls : public LinuxOsSysCallsImpl { +public: + // Api::LinuxOsSysCalls + MOCK_METHOD3(sched_getaffinity, SysCallIntResult(pid_t pid, size_t cpusetsize, cpu_set_t* mask)); +}; +#endif + } // namespace Api } // namespace Envoy diff --git a/test/mocks/config/mocks.h b/test/mocks/config/mocks.h index 99014dad6defb..f6d8677642766 100644 --- a/test/mocks/config/mocks.h +++ b/test/mocks/config/mocks.h @@ -26,9 +26,14 @@ class MockSubscriptionCallbacks : public SubscriptionCallbacks { } template static std::string resourceName_(const T& resource) { return resource.name(); } + // TODO(fredlas) deduplicate MOCK_METHOD2_T(onConfigUpdate, void(const typename SubscriptionCallbacks::ResourceVector& resources, const std::string& version_info)); + MOCK_METHOD3_T(onConfigUpdate, + void(const Protobuf::RepeatedPtrField& added_resources, + const Protobuf::RepeatedPtrField& removed_resources, + const std::string& system_version_info)); MOCK_METHOD1_T(onConfigUpdateFailed, void(const EnvoyException* e)); MOCK_METHOD1_T(resourceName, std::string(const ProtobufWkt::Any& resource)); }; diff --git a/test/mocks/router/mocks.cc b/test/mocks/router/mocks.cc index da2b3f22aa204..f667c3982e574 100644 --- a/test/mocks/router/mocks.cc +++ b/test/mocks/router/mocks.cc @@ -25,6 +25,11 @@ void MockRetryState::expectHeadersRetry() { .WillOnce(DoAll(SaveArg<1>(&callback_), Return(RetryStatus::Yes))); } +void MockRetryState::expectHedgedPerTryTimeoutRetry() { + EXPECT_CALL(*this, shouldHedgeRetryPerTryTimeout(_)) + .WillOnce(DoAll(SaveArg<0>(&callback_), Return(RetryStatus::Yes))); +} + void MockRetryState::expectResetRetry() { EXPECT_CALL(*this, shouldRetryReset(_, _)) .WillOnce(DoAll(SaveArg<1>(&callback_), Return(RetryStatus::Yes))); diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 8a1e7d30653d8..cb90e04ed939b 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -73,11 +73,11 @@ class TestHedgePolicy : public HedgePolicy { const envoy::type::FractionalPercent& additionalRequestChance() const override { return additional_request_chance_; } - bool hedgeOnPerTryTimeout() const override { return hedge_on_per_try_timeout; } + bool hedgeOnPerTryTimeout() const override { return hedge_on_per_try_timeout_; } uint32_t initial_requests_{}; envoy::type::FractionalPercent additional_request_chance_{}; - bool hedge_on_per_try_timeout{}; + bool hedge_on_per_try_timeout_{}; }; class TestRetryPolicy : public RetryPolicy { @@ -106,6 +106,7 @@ class MockRetryState : public RetryState { ~MockRetryState(); void expectHeadersRetry(); + void expectHedgedPerTryTimeoutRetry(); void expectResetRetry(); MOCK_METHOD0(enabled, bool()); @@ -113,6 +114,7 @@ class MockRetryState : public RetryState { RetryStatus(const Http::HeaderMap& response_headers, DoRetryCallback callback)); MOCK_METHOD2(shouldRetryReset, RetryStatus(const Http::StreamResetReason reset_reason, DoRetryCallback callback)); + MOCK_METHOD1(shouldHedgeRetryPerTryTimeout, RetryStatus(DoRetryCallback callback)); MOCK_METHOD1(onHostAttempted, void(Upstream::HostDescriptionConstSharedPtr)); MOCK_METHOD1(shouldSelectAnotherHost, bool(const Upstream::Host& host)); MOCK_METHOD2(priorityLoadForRetry, diff --git a/test/mocks/server/mocks.cc b/test/mocks/server/mocks.cc index af2b5f299799e..5f58a2c6dada5 100644 --- a/test/mocks/server/mocks.cc +++ b/test/mocks/server/mocks.cc @@ -34,6 +34,7 @@ MockOptions::MockOptions(const std::string& config_path) : config_path_(config_p ON_CALL(*this, hotRestartDisabled()).WillByDefault(ReturnPointee(&hot_restart_disabled_)); ON_CALL(*this, signalHandlingEnabled()).WillByDefault(ReturnPointee(&signal_handling_enabled_)); ON_CALL(*this, mutexTracingEnabled()).WillByDefault(ReturnPointee(&mutex_tracing_enabled_)); + ON_CALL(*this, cpusetThreadsEnabled()).WillByDefault(ReturnPointee(&cpuset_threads_enabled_)); ON_CALL(*this, toCommandLineOptions()).WillByDefault(Invoke([] { return std::make_unique(); })); diff --git a/test/mocks/server/mocks.h b/test/mocks/server/mocks.h index 7c57b6e0da8e2..ec26b4bcbc2f6 100644 --- a/test/mocks/server/mocks.h +++ b/test/mocks/server/mocks.h @@ -79,6 +79,7 @@ class MockOptions : public Options { MOCK_CONST_METHOD0(hotRestartDisabled, bool()); MOCK_CONST_METHOD0(signalHandlingEnabled, bool()); MOCK_CONST_METHOD0(mutexTracingEnabled, bool()); + MOCK_CONST_METHOD0(cpusetThreadsEnabled, bool()); MOCK_CONST_METHOD0(toCommandLineOptions, Server::CommandLineOptionsPtr()); std::string config_path_; @@ -95,6 +96,7 @@ class MockOptions : public Options { bool hot_restart_disabled_{}; bool signal_handling_enabled_{true}; bool mutex_tracing_enabled_{}; + bool cpuset_threads_enabled_{}; }; class MockConfigTracker : public ConfigTracker { diff --git a/test/mocks/stats/mocks.cc b/test/mocks/stats/mocks.cc index 5b1dbf98df98c..ea078cf4ff7c4 100644 --- a/test/mocks/stats/mocks.cc +++ b/test/mocks/stats/mocks.cc @@ -30,6 +30,14 @@ MockGauge::MockGauge() { } MockGauge::~MockGauge() {} +MockBoolIndicator::MockBoolIndicator() { + ON_CALL(*this, tagExtractedName()).WillByDefault(ReturnRef(name_)); + ON_CALL(*this, tags()).WillByDefault(ReturnRef(tags_)); + ON_CALL(*this, used()).WillByDefault(ReturnPointee(&used_)); + ON_CALL(*this, value()).WillByDefault(ReturnPointee(&value_)); +} +MockBoolIndicator::~MockBoolIndicator() {} + MockHistogram::MockHistogram() { ON_CALL(*this, recordValue(_)).WillByDefault(Invoke([this](uint64_t value) { if (store_ != nullptr) { diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index c079e785be16b..c4dacdaaee22f 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -74,6 +74,28 @@ class MockGauge : public Gauge { std::vector tags_; }; +class MockBoolIndicator : public BoolIndicator { +public: + MockBoolIndicator(); + ~MockBoolIndicator(); + + // Note: cannot be mocked because it is accessed as a Property in a gmock EXPECT_CALL. This + // creates a deadlock in gmock and is an unintended use of mock functions. + std::string name() const override { return name_; }; + const char* nameCStr() const override { return name_.c_str(); }; + + MOCK_CONST_METHOD0(tagExtractedName, const std::string&()); + MOCK_CONST_METHOD0(tags, const std::vector&()); + MOCK_METHOD1(set, void(bool value)); + MOCK_CONST_METHOD0(used, bool()); + MOCK_CONST_METHOD0(value, bool()); + + bool used_; + uint64_t value_; + std::string name_; + std::vector tags_; +}; + class MockHistogram : public Histogram { public: MockHistogram(); @@ -129,6 +151,7 @@ class MockSource : public Source { MOCK_METHOD0(cachedCounters, const std::vector&()); MOCK_METHOD0(cachedGauges, const std::vector&()); + MOCK_METHOD0(cachedBoolIndicators, const std::vector&()); MOCK_METHOD0(cachedHistograms, const std::vector&()); MOCK_METHOD0(clearCache, void()); @@ -159,6 +182,8 @@ class MockStore : public Store { MOCK_METHOD1(createScope_, Scope*(const std::string& name)); MOCK_METHOD1(gauge, Gauge&(const std::string&)); MOCK_CONST_METHOD0(gauges, std::vector()); + MOCK_METHOD1(boolIndicator, BoolIndicator&(const std::string&)); + MOCK_CONST_METHOD0(boolIndicators, std::vector()); MOCK_METHOD1(histogram, Histogram&(const std::string& name)); MOCK_CONST_METHOD0(histograms, std::vector()); MOCK_CONST_METHOD0(statsOptions, const StatsOptions&()); diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index 4ad5a91f0f4e6..d4c4f2fa8a938 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -51,10 +51,12 @@ envoy_cc_mock( deps = [ ":load_balancer_context_mock", "//include/envoy/http:async_client_interface", + "//include/envoy/upstream:cluster_factory_interface", "//include/envoy/upstream:cluster_manager_interface", "//include/envoy/upstream:health_checker_interface", "//include/envoy/upstream:load_balancer_interface", "//include/envoy/upstream:upstream_interface", + "//source/common/upstream:cluster_factory_lib", "//source/common/upstream:health_discovery_service_lib", "//source/common/upstream:upstream_lib", "//test/mocks/config:config_mocks", diff --git a/test/server/BUILD b/test/server/BUILD index 813ac24ee5267..1c64006680419 100644 --- a/test/server/BUILD +++ b/test/server/BUILD @@ -114,6 +114,10 @@ envoy_cc_test( "//source/common/common:utility_lib", "//source/common/stats:stats_lib", "//source/server:options_lib", + "//test/mocks/api:api_mocks", + "//test/test_common:environment_lib", + "//test/test_common:logging_lib", + "//test/test_common:threadsafe_singleton_injector_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index 52ac0dc503423..31902e992add7 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -9,6 +10,13 @@ #include "server/options_impl.h" +#if defined(__linux__) +#include "server/options_impl_platform_linux.h" +#endif +#include "test/mocks/api/mocks.h" +#include "test/test_common/environment.h" +#include "test/test_common/logging.h" +#include "test/test_common/threadsafe_singleton_injector.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -67,7 +75,7 @@ TEST_F(OptionsImplTest, All) { "--service-cluster cluster --service-node node --service-zone zone " "--file-flush-interval-msec 9000 " "--drain-time-s 60 --log-format [%v] --parent-shutdown-time-s 90 --log-path /foo/bar " - "--disable-hot-restart"); + "--disable-hot-restart --cpuset-threads"); EXPECT_EQ(Server::Mode::Validate, options->mode()); EXPECT_EQ(2U, options->concurrency()); EXPECT_EQ("hello", options->configPath()); @@ -85,6 +93,7 @@ TEST_F(OptionsImplTest, All) { EXPECT_EQ(std::chrono::seconds(60), options->drainTime()); EXPECT_EQ(std::chrono::seconds(90), options->parentShutdownTime()); EXPECT_EQ(true, options->hotRestartDisabled()); + EXPECT_EQ(true, options->cpusetThreadsEnabled()); options = createOptionsImpl("envoy --mode init_only"); EXPECT_EQ(Server::Mode::InitOnly, options->mode()); @@ -94,6 +103,7 @@ TEST_F(OptionsImplTest, SetAll) { std::unique_ptr options = createOptionsImpl("envoy -c hello"); bool hot_restart_disabled = options->hotRestartDisabled(); bool signal_handling_enabled = options->signalHandlingEnabled(); + bool cpuset_threads_enabled = options->cpusetThreadsEnabled(); Stats::StatsOptionsImpl stats_options; stats_options.max_obj_name_length_ = 54321; stats_options.max_stat_suffix_length_ = 1234; @@ -119,6 +129,7 @@ TEST_F(OptionsImplTest, SetAll) { options->setStatsOptions(stats_options); options->setHotRestartDisabled(!options->hotRestartDisabled()); options->setSignalHandling(!options->signalHandlingEnabled()); + options->setCpusetThreads(!options->cpusetThreadsEnabled()); EXPECT_EQ(109876, options->baseId()); EXPECT_EQ(42U, options->concurrency()); @@ -142,6 +153,7 @@ TEST_F(OptionsImplTest, SetAll) { EXPECT_EQ(stats_options.max_stat_suffix_length_, options->statsOptions().maxStatSuffixLength()); EXPECT_EQ(!hot_restart_disabled, options->hotRestartDisabled()); EXPECT_EQ(!signal_handling_enabled, options->signalHandlingEnabled()); + EXPECT_EQ(!cpuset_threads_enabled, options->cpusetThreadsEnabled()); // Validate that CommandLineOptions is constructed correctly. Server::CommandLineOptionsPtr command_line_options = options->toCommandLineOptions(); @@ -172,6 +184,7 @@ TEST_F(OptionsImplTest, SetAll) { EXPECT_EQ(options->statsOptions().maxObjNameLength(), command_line_options->max_obj_name_len()); EXPECT_EQ(options->hotRestartDisabled(), command_line_options->disable_hot_restart()); EXPECT_EQ(options->mutexTracingEnabled(), command_line_options->enable_mutex_tracing()); + EXPECT_EQ(options->cpusetThreadsEnabled(), command_line_options->cpuset_threads()); } TEST_F(OptionsImplTest, DefaultParams) { @@ -182,6 +195,7 @@ TEST_F(OptionsImplTest, DefaultParams) { EXPECT_EQ(Network::Address::IpVersion::v4, options->localAddressIpVersion()); EXPECT_EQ(Server::Mode::Serve, options->mode()); EXPECT_EQ(false, options->hotRestartDisabled()); + EXPECT_EQ(false, options->cpusetThreadsEnabled()); // Validate that CommandLineOptions is constructed correctly with default params. Server::CommandLineOptionsPtr command_line_options = options->toCommandLineOptions(); @@ -193,6 +207,7 @@ TEST_F(OptionsImplTest, DefaultParams) { command_line_options->local_address_ip_version()); EXPECT_EQ(envoy::admin::v2alpha::CommandLineOptions::Serve, command_line_options->mode()); EXPECT_EQ(false, command_line_options->disable_hot_restart()); + EXPECT_EQ(false, command_line_options->cpuset_threads()); } // Validates that the server_info proto is in sync with the options. @@ -304,7 +319,62 @@ TEST_F(OptionsImplTest, SaneTestConstructor) { EXPECT_EQ(regular_options_impl->statsOptions().maxStatSuffixLength(), test_options_impl.statsOptions().maxStatSuffixLength()); EXPECT_EQ(regular_options_impl->hotRestartDisabled(), test_options_impl.hotRestartDisabled()); + EXPECT_EQ(regular_options_impl->cpusetThreadsEnabled(), test_options_impl.cpusetThreadsEnabled()); } +TEST_F(OptionsImplTest, SetBothConcurrencyAndCpuset) { + EXPECT_LOG_CONTAINS( + "warning", + "Both --concurrency and --cpuset-threads options are set; not applying --cpuset-threads.", + std::unique_ptr options = + createOptionsImpl("envoy -c hello --concurrency 42 --cpuset-threads")); +} + +#if defined(__linux__) + +using testing::Return; + +class OptionsImplPlatformLinuxTest : public testing::Test { +public: +}; + +TEST_F(OptionsImplPlatformLinuxTest, AffinityTest1) { + // Success case: cpuset size and hardware thread count are the same. + unsigned int fake_cpuset_size = std::thread::hardware_concurrency(); + unsigned int fake_hw_threads = fake_cpuset_size; + + EXPECT_EQ(OptionsImplPlatformLinux::getCpuAffinityCount(fake_hw_threads), fake_cpuset_size); +} + +TEST_F(OptionsImplPlatformLinuxTest, AffinityTest2) { + // Success case: cpuset size is half of the hardware thread count. + unsigned int fake_cpuset_size = std::thread::hardware_concurrency(); + unsigned int fake_hw_threads = 2 * fake_cpuset_size; + + EXPECT_EQ(OptionsImplPlatformLinux::getCpuAffinityCount(fake_hw_threads), fake_cpuset_size); +} + +TEST_F(OptionsImplPlatformLinuxTest, AffinityTest3) { + // Failure case: cpuset size is bigger than the hardware thread count. + unsigned int fake_cpuset_size = std::thread::hardware_concurrency(); + unsigned int fake_hw_threads = fake_cpuset_size - 1; + + EXPECT_EQ(OptionsImplPlatformLinux::getCpuAffinityCount(fake_hw_threads), fake_hw_threads); +} + +TEST_F(OptionsImplPlatformLinuxTest, AffinityTest4) { + // When sched_getaffinity() fails, expect to get the hardware thread count. + unsigned int fake_cpuset_size = std::thread::hardware_concurrency(); + unsigned int fake_hw_threads = 2 * fake_cpuset_size; + Api::MockLinuxOsSysCalls linux_os_sys_calls; + TestThreadsafeSingletonInjector linux_os_calls(&linux_os_sys_calls); + + EXPECT_CALL(linux_os_sys_calls, sched_getaffinity(_, _, _)) + .WillOnce(Return(Api::SysCallIntResult{-1, 0})); + EXPECT_EQ(OptionsImplPlatformLinux::getCpuAffinityCount(fake_hw_threads), fake_hw_threads); +} + +#endif + } // namespace } // namespace Envoy diff --git a/test/server/overload_manager_impl_test.cc b/test/server/overload_manager_impl_test.cc index 439103650548f..2b7ada09678ef 100644 --- a/test/server/overload_manager_impl_test.cc +++ b/test/server/overload_manager_impl_test.cc @@ -152,7 +152,8 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { [&](OverloadActionState) { EXPECT_TRUE(false); }); manager->start(); - Stats::Gauge& active_gauge = stats_.gauge("overload.envoy.overload_actions.dummy_action.active"); + Stats::BoolIndicator& active_indicator = + stats_.boolIndicator("overload.envoy.overload_actions.dummy_action.active"); Stats::Gauge& pressure_gauge1 = stats_.gauge("overload.envoy.resource_monitors.fake_resource1.pressure"); Stats::Gauge& pressure_gauge2 = @@ -165,7 +166,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { EXPECT_FALSE(is_active); EXPECT_EQ(action_state, OverloadActionState::Inactive); EXPECT_EQ(0, cb_count); - EXPECT_EQ(0, active_gauge.value()); + EXPECT_FALSE(active_indicator.value()); EXPECT_EQ(50, pressure_gauge1.value()); factory1_.monitor_->setPressure(0.95); @@ -173,7 +174,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { EXPECT_TRUE(is_active); EXPECT_EQ(action_state, OverloadActionState::Active); EXPECT_EQ(1, cb_count); - EXPECT_EQ(1, active_gauge.value()); + EXPECT_TRUE(active_indicator.value()); EXPECT_EQ(95, pressure_gauge1.value()); // Callback should not be invoked if action active state has not changed @@ -199,7 +200,7 @@ TEST_F(OverloadManagerImplTest, CallbackOnlyFiresWhenStateChanges) { EXPECT_FALSE(is_active); EXPECT_EQ(action_state, OverloadActionState::Inactive); EXPECT_EQ(2, cb_count); - EXPECT_EQ(0, active_gauge.value()); + EXPECT_FALSE(active_indicator.value()); EXPECT_EQ(40, pressure_gauge2.value()); manager->stop(); diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index c23d065303296..ef145bda07111 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -141,6 +141,11 @@ Stats::GaugeSharedPtr TestUtility::findGauge(Stats::Store& store, const std::str return findByName(store.gauges(), name); } +Stats::BoolIndicatorSharedPtr TestUtility::findBoolIndicator(Stats::Store& store, + const std::string& name) { + return findByName(store.boolIndicators(), name); +} + std::list TestUtility::makeDnsResponse(const std::list& addresses) { std::list ret; diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 43b7ed834801e..0c4a251918878 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -152,7 +152,7 @@ class TestUtility { * Find a counter in a stats store. * @param store supplies the stats store. * @param name supplies the name to search for. - * @return Stats::CounterSharedPtr the counter or nullptr if there is none. + * @return Stats::CounterSharedPtr the counter, or nullptr if there is none. */ static Stats::CounterSharedPtr findCounter(Stats::Store& store, const std::string& name); @@ -160,10 +160,19 @@ class TestUtility { * Find a gauge in a stats store. * @param store supplies the stats store. * @param name supplies the name to search for. - * @return Stats::GaugeSharedPtr the gauge or nullptr if there is none. + * @return Stats::GaugeSharedPtr the gauge, or nullptr if there is none. */ static Stats::GaugeSharedPtr findGauge(Stats::Store& store, const std::string& name); + /** + * Find a bool in a stats store. + * @param store supplies the stats store. + * @param name supplies the name to search for. + * @return Stats::BoolIndicatorSharedPtr the bool, or nullptr if there is none. + */ + static Stats::BoolIndicatorSharedPtr findBoolIndicator(Stats::Store& store, + const std::string& name); + /** * Convert a string list of IP addresses into a list of network addresses usable for DNS * response testing. diff --git a/tools/print_dependencies.py b/tools/print_dependencies.py index 20a10343b0780..4c0fefb1a03f2 100755 --- a/tools/print_dependencies.py +++ b/tools/print_dependencies.py @@ -11,8 +11,6 @@ API_DEPS = imp.load_source('api', 'api/bazel/repository_locations.bzl') DEPS = imp.load_source('deps', 'bazel/repository_locations.bzl') -RECIPE_MAP = imp.load_source('deps', 'bazel/target_recipes.bzl') -RECIPE_INFO = imp.load_source('recipes', 'ci/build_container/build_recipes/versions.py') def print_deps(deps): @@ -32,14 +30,6 @@ def print_deps(deps): 'file-prefix': loc.get('strip_prefix', ''), }) - for key, loc in RECIPE_INFO.RECIPES.items(): - deps.append({ - 'identifier': key, - 'file-sha256': loc.get('sha256'), - 'file-url': loc.get('url'), - 'file-prefix': loc.get('strip_prefix', ''), - }) - deps = sorted(deps, key=lambda k: k['identifier']) # Print all dependencies if a target is unspecified @@ -60,17 +50,5 @@ def print_deps(deps): if match: repos.add(match.group(1)) - # Gather the build recipes repositories - # These are part of @envoy_deps repository ie @envoy_deps//:luajit - recipe_regex = re.compile('^@envoy_deps//:(\w+)$') - for line in output.split('\n'): - match = recipe_regex.match(line) - if not match: - continue - key = match.group(1) - repo = RECIPE_MAP.TARGET_RECIPES[key] - if repo: - repos.add(repo) - deps = filter(lambda dep: dep['identifier'] in repos, deps) print_deps(deps) diff --git a/tools/spelling_dictionary.txt b/tools/spelling_dictionary.txt index 9b59f844f3ce3..34a034702ad1a 100644 --- a/tools/spelling_dictionary.txt +++ b/tools/spelling_dictionary.txt @@ -183,6 +183,7 @@ RCU RDN RDS RDWR +REIMPLEMENT REQ RFC RHS @@ -258,6 +259,7 @@ WS Welford's Werror XDS +xDSes XFCC XFF XM @@ -440,6 +442,7 @@ gregs gzip hacky handshaker +hd hdr healths healthz @@ -633,6 +636,7 @@ sanitization sanitizer scala scalability +sched schemas serializable serializer @@ -694,6 +698,7 @@ uint un unary unconfigurable +unconfigured uncontended undef undegraded