diff --git a/gradle.properties b/gradle.properties index 684a5c1..1a2e59a 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,2 +1,2 @@ -version=1.34.0-1 +version=1.31.8-1 org.gradle.jvmargs=-Xmx2g -XX:MaxMetaspaceSize=512m -XX:+HeapDumpOnOutOfMemoryError -Dfile.encoding=UTF-8 diff --git a/src/main/proto/cel/expr/checked.proto b/src/main/proto/cel/expr/checked.proto index 0105b93..e327db9 100644 --- a/src/main/proto/cel/expr/checked.proto +++ b/src/main/proto/cel/expr/checked.proto @@ -236,20 +236,6 @@ message Decl { Constant value = 2; // Documentation string for the identifier. - // - // Provide a brief description of what the variable represents and whether - // there are any constraints on the formatting or supported value range. - // - // Examples: - // - // 'request.auth.principal' - string which uniquely identifies an - // authenticated principal. For JSON Web Tokens (JWTs), the principal - // is the combination of the issuer ('iss') and subject ('sub') token - // fields concatenated by a forward slash: iss + `/` + sub. - // - // 'min_cpus' - integer value indicates the minimum number of CPUs - // required for a compute cluster. The 'min_cpus' value must be - // greater than zero and less than 'max_cpus' or 64 whichever is less. string doc = 3; } @@ -307,45 +293,11 @@ message Decl { bool is_instance_function = 5; // Documentation string for the overload. - // - // Provide examples of the overload behavior, preferring to use literal - // values as input with a comment on the return value. - // - // Examples: - // - // // Determine whether a value of type exists within a list. - // 2 in [1, 2, 3] // returns true - // - // // Determine whether a key of type exists within a map. - // 'hello' in {'hi': 'you', 'hello': 'there'} // returns true - // 'help' in {'hi': 'you', 'hello': 'there'} // returns false - // - // // Take the substring of a string starting at a specific character - // // offset (inclusive). - // "tacocat".substring(1) // returns "acocat" - // "tacocat".substring(20) // error - // - // // Take the substring of a string starting at a specific character - // // offset (inclusive) and ending at the given offset (exclusive). - // "tacocat".substring(1, 6) // returns "acoca" string doc = 6; } // Required. List of function overloads, must contain at least one overload. repeated Overload overloads = 1; - - // Documentation string for the function that indicates the general purpose - // of the function and its behavior. - // - // Documentation strings for the function should be general purpose with - // specific examples provided in the overload doc string. - // - // Examples: - // - // The 'in' operator tests whether an item exists in a collection. - // - // The 'substring' function returns a substring of a target string. - string doc = 2; } // The fully qualified name of the declaration. diff --git a/src/main/proto/cel/expr/conformance/env_config.proto b/src/main/proto/cel/expr/conformance/env_config.proto deleted file mode 100644 index dce0df6..0000000 --- a/src/main/proto/cel/expr/conformance/env_config.proto +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2025 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package cel.expr.conformance; - -import "cel/expr/checked.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/descriptor.proto"; - -option cc_enable_arenas = true; -option go_package = "cel.dev/expr/conformance"; -option java_multiple_files = true; -option java_outer_classname = "EnvironmentProto"; -option java_package = "cel.dev.expr.conformance"; - -// Representation of a CEL Environment, defining what features and extensions -// are available for conformance testing. -message Environment { - // Name of the environment - string name = 1; - - // Description for the current environment - string description = 2; - - // Sets the namespace (container) for the expression. - // This is used to simplify resolution. - // For example with container - // `google.rpc.context` - // an identifier of `google.rpc.context.AttributeContext` could be referred - // to simply as `AttributeContext` in the CEL expression. - string container = 3; - - // Import represents a type name that will be abbreviated by its simple name - // making it easier to reference simple type names from packages other than - // the expression container. - // For ex: - // Import{name: 'google.rpc.Status'} - // The above import will ensure that `google.rpc.Status` is available by the - // simple name `Status` within CEL expressions. - message Import { - // Qualified type name which will be abbreviated - string name = 1; - } - - // List of abbreviations to be added to the CEL environment - repeated Import imports = 4; - - // Set of options to subset a subsettable library - LibrarySubset stdlib = 5; - - // List of extensions to enable in the CEL environment. - repeated Extension extensions = 6; - - // ContextVariable represents a message type to be made available as a - // context variable to the CEL environment. - message ContextVariable { - // Fully qualified type name of the context proto. - string type_name = 1; - } - - // If set, adds a context declaration from a proto message. - // - // Context messages have all of their top-level fields available as variables - // in the type checker. - ContextVariable context_variable = 7; - - // List of declarations to be configured in the CEL environment. - // - // Note: The CEL environment can be configured with either the - // context_variable or a set of ident_decls provided as part of declarations. - // Providing both will result in an error. - repeated cel.expr.Decl declarations = 8; - - // List of validators for validating the parsed ast. - repeated Validator validators = 9; - - // List of feature flags to be enabled or disabled. - repeated Feature features = 10; - - // Disables including the declarations from the standard CEL environment. - // - // NOTE: Do not disable the standard CEL declarations unless you are aware of - // the implications and have discussed your use case on cel-discuss@ - // or with the members of the cel-governance-team@ - // - // Deprecated: Use LibrarySubset to disable standard cel declarations instead: - // stdlib = LibrarySubset{ disable: true } - bool disable_standard_cel_declarations = 11; - - // If provided, uses the provided FileDescriptorSet to extend types available - // the CEL expression. All "well-known" protobuf messages (google.protobuf.*) - // are known to the CEL compiler, but all others must be provided for type - // checking. - google.protobuf.FileDescriptorSet message_type_extension = 12; - - // When macro call tracking is enabled, the resulting SourceInfo in the - // CheckedExpr will contain a collection of expressions representing the - // function calls which were replaced by macros. - // - // Deprecated: Use Feature to enable macro call tracking - // Feature{ name: "cel.feature.macro_call_tracking", enabled: true } - bool enable_macro_call_tracking = 13; -} - -// Represents a named validator with an optional map-based configuration object. -// Naming convention followed by validators: -// .validator. -// For ex: -// `cel.validator.timestamp` -// -// Note: the map-keys must directly correspond to the internal representation of -// the original validator, and should only use primitive scalar types as values -// at this time. -message Validator { - string name = 1; - - // Additional configurations to be included as part of the validation - map config = 2; -} - -// Represents a named boolean feature flag supported by CEL. -// Naming convention followed by features: -// .feature. -// For ex: -// `cel.feature.cross_type_numeric_comparisons` -message Feature { - // Name of the feature flag. - string name = 1; - - // State of the feature flab. - bool enabled = 2; -} - -// Extension represents a versioned extension library reference to enable in the -// CEL environment. -message Extension { - // Name of the extension library. - string name = 1; - // Version of the extension library. - string version = 2; -} - -// LibrarySubset indicates a subset of the macros and functions supported by a -// subsettable library. -message LibrarySubset { - // Indicates whether the library has been disabled, typically only - // used for default-enabled libraries like stdlib. - bool disabled = 1; - - // Disables macros for the given library. - bool disable_macros = 2; - - // Specifies a set of macro function names to include in the subset. - repeated string include_macros = 3; - - // Specifies a set of macro function names to exclude from the subset. - // Note: if IncludeMacros is non-empty, then ExcludeFunctions is ignored. - repeated string exclude_macros = 4; - - // Specifies a set of functions to include in the subset. - // - // Note: the overloads specified in the subset need only specify their ID. - // Note: if IncludeFunctions is non-empty, then ExcludeFunctions is ignored. - repeated cel.expr.Decl include_functions = 5; - - // Specifies the set of functions to exclude from the subset. - // - // Note: the overloads specified in the subset need only specify their ID. - repeated cel.expr.Decl exclude_functions = 6; -} diff --git a/src/main/proto/cel/expr/conformance/envcheck.proto b/src/main/proto/cel/expr/conformance/envcheck.proto new file mode 100644 index 0000000..93585c6 --- /dev/null +++ b/src/main/proto/cel/expr/conformance/envcheck.proto @@ -0,0 +1,37 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Tests for runtime support of standard functions. + +syntax = "proto3"; + +package cel.expr.conformance; + +import "cel/expr/checked.proto"; + +option cc_enable_arenas = true; +option go_package = "cel.dev/expr/conformance"; +option java_multiple_files = true; +option java_outer_classname = "EnvcheckProto"; +option java_package = "dev.cel.expr.conformance"; + +// The format of a standard environment, i.e. a collection of declarations +// for the checker. +message Env { + // Required. The name of the environment. + string name = 1; + + // The declarations in this environment. + repeated cel.expr.Decl decl = 2; +} diff --git a/src/main/proto/cel/expr/conformance/proto2/test_all_types.proto b/src/main/proto/cel/expr/conformance/proto2/test_all_types.proto index 7a68093..737ca3a 100644 --- a/src/main/proto/cel/expr/conformance/proto2/test_all_types.proto +++ b/src/main/proto/cel/expr/conformance/proto2/test_all_types.proto @@ -1,25 +1,9 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - syntax = "proto2"; package cel.expr.conformance.proto2; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; @@ -63,9 +47,6 @@ message TestAllTypes { optional string single_string = 14 [default = "empty"]; optional bytes single_bytes = 15 [default = "none"]; - // Collides with 'in' operator. - optional bool in = 18; - // Wellknown. optional google.protobuf.Any single_any = 100; optional google.protobuf.Duration single_duration = 101; @@ -84,8 +65,6 @@ message TestAllTypes { optional google.protobuf.ListValue list_value = 114; optional google.protobuf.NullValue null_value = 115; optional google.protobuf.NullValue optional_null_value = 116; - optional google.protobuf.FieldMask field_mask = 117; - optional google.protobuf.Empty empty = 118; // Nested messages oneof nested_type { @@ -117,7 +96,7 @@ message TestAllTypes { repeated NestedEnum repeated_nested_enum = 52; repeated string repeated_string_piece = 53 [ctype = STRING_PIECE]; repeated string repeated_cord = 54 [ctype = CORD]; - repeated NestedMessage repeated_lazy_message = 55; + repeated NestedMessage repeated_lazy_message = 55 [lazy = true]; // Repeated wellknown. repeated google.protobuf.Any repeated_any = 120; @@ -307,19 +286,6 @@ message TestAllTypes { map map_string_string_wrapper = 321; map map_string_bool_wrapper = 322; map map_string_bytes_wrapper = 323; - - oneof kind { - NestedTestAllTypes oneof_type = 400; - NestedMessage oneof_msg = 401; - bool oneof_bool = 402; - } - - optional group NestedGroup = 403 { - optional int32 single_id = 404; - optional string single_name = 405; - } - - extensions 1000 to max; } // This proto includes a recursively nested message. diff --git a/src/main/proto/cel/expr/conformance/proto2/test_all_types_extensions.proto b/src/main/proto/cel/expr/conformance/proto2/test_all_types_extensions.proto deleted file mode 100644 index 1c37f4f..0000000 --- a/src/main/proto/cel/expr/conformance/proto2/test_all_types_extensions.proto +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto2"; - -package cel.expr.conformance.proto2; - -import "cel/expr/conformance/proto2/test_all_types.proto"; - -option cc_enable_arenas = true; -option go_package = "cel.dev/expr/conformance/proto2"; -option java_outer_classname = "TestAllTypesExtensions"; -option java_package = "dev.cel.expr.conformance.proto2"; -option java_multiple_files = true; - -// Package scoped extensions -extend TestAllTypes { - optional int32 int32_ext = 1000; - optional TestAllTypes nested_ext = 1001; - optional TestAllTypes test_all_types_ext = 1002; - optional TestAllTypes.NestedEnum nested_enum_ext = 1003; - repeated TestAllTypes repeated_test_all_types = 1004; -} - -// Message scoped extensions -message Proto2ExtensionScopedMessage { - extend TestAllTypes { - optional int64 int64_ext = 1005; - optional TestAllTypes message_scoped_nested_ext = 1006; - optional TestAllTypes.NestedEnum nested_enum_ext = 1007; - repeated TestAllTypes message_scoped_repeated_test_all_types = 1008; - } -} diff --git a/src/main/proto/cel/expr/conformance/proto3/test_all_types.proto b/src/main/proto/cel/expr/conformance/proto3/test_all_types.proto index 8ddc472..c904868 100644 --- a/src/main/proto/cel/expr/conformance/proto3/test_all_types.proto +++ b/src/main/proto/cel/expr/conformance/proto3/test_all_types.proto @@ -1,25 +1,9 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - syntax = "proto3"; package cel.expr.conformance.proto3; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; @@ -62,11 +46,6 @@ message TestAllTypes { bool single_bool = 13; string single_string = 14; bytes single_bytes = 15; - optional bool optional_bool = 16; - optional bool optional_string = 17; - - // Collides with 'in' operator. - bool in = 18; // Wellknown. google.protobuf.Any single_any = 100; @@ -86,8 +65,6 @@ message TestAllTypes { google.protobuf.ListValue list_value = 114; google.protobuf.NullValue null_value = 115; optional google.protobuf.NullValue optional_null_value = 116; - google.protobuf.FieldMask field_mask = 117; - google.protobuf.Empty empty = 118; // Nested messages oneof nested_type { @@ -119,7 +96,7 @@ message TestAllTypes { repeated NestedEnum repeated_nested_enum = 52; repeated string repeated_string_piece = 53 [ctype = STRING_PIECE]; repeated string repeated_cord = 54 [ctype = CORD]; - repeated NestedMessage repeated_lazy_message = 55; + repeated NestedMessage repeated_lazy_message = 55 [lazy = true]; // Repeated wellknown. repeated google.protobuf.Any repeated_any = 120; @@ -309,12 +286,6 @@ message TestAllTypes { map map_string_string_wrapper = 321; map map_string_bool_wrapper = 322; map map_string_bytes_wrapper = 323; - - oneof kind { - NestedTestAllTypes oneof_type = 400; - NestedMessage oneof_msg = 401; - bool oneof_bool = 402; - } } // This proto includes a recursively nested message. diff --git a/src/main/proto/cel/expr/conformance/test/simple.proto b/src/main/proto/cel/expr/conformance/simple.proto similarity index 83% rename from src/main/proto/cel/expr/conformance/test/simple.proto rename to src/main/proto/cel/expr/conformance/simple.proto index 227fc09..8b4ab80 100644 --- a/src/main/proto/cel/expr/conformance/test/simple.proto +++ b/src/main/proto/cel/expr/conformance/simple.proto @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,23 +16,21 @@ syntax = "proto3"; -package cel.expr.conformance.test; +package cel.expr.conformance; import "cel/expr/checked.proto"; import "cel/expr/eval.proto"; import "cel/expr/value.proto"; option cc_enable_arenas = true; -option go_package = "cel.dev/expr/conformance/test"; +option go_package = "cel.dev/expr/conformance"; option java_multiple_files = true; option java_outer_classname = "SimpleProto"; -option java_package = "dev.cel.expr.conformance.test"; +option java_package = "dev.cel.expr.conformance"; // The format of a simple test file, expected to be stored in text format. // A file is the unit of granularity for selecting conformance tests, // so tests of optional features should be segregated into separate files. -// -// Deprecated: Use cel.expr.conformance.test.Suite message SimpleTestFile { // Required. The name of the file. Should match the filename. string name = 1; @@ -78,9 +76,6 @@ message SimpleTest { // Disables the check phase. bool disable_check = 5; - // Disables the evaluate phase. - bool check_only = 15; - // The type environment to use for the check phase. repeated cel.expr.Decl type_env = 6; @@ -102,9 +97,6 @@ message SimpleTest { // * a floating point NaN should match any NaN. cel.expr.Value value = 8; - // A result and deduced expression type. - TypedResult typed_result = 16; - // Matches error evaluation results. cel.expr.ErrorSet eval_error = 9; @@ -119,17 +111,7 @@ message SimpleTest { // (Using explicit message since oneof can't handle repeated.) UnknownSetMatcher any_unknowns = 12; } - // Next is 17. -} - -// Matches a result along with deduced expression type. -message TypedResult { - // A normal value, which must match the evaluation result exactly - // via value equality semantics. This is ignored if the test is `check_only`. - cel.expr.Value result = 1; - - // The deduced type of the expression as reported by the checker. - cel.expr.Type deduced_type = 2; + // Next is 15. } // Matches error results from Eval. diff --git a/src/main/proto/cel/expr/conformance/test/suite.proto b/src/main/proto/cel/expr/conformance/test/suite.proto deleted file mode 100644 index d6789bd..0000000 --- a/src/main/proto/cel/expr/conformance/test/suite.proto +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2024 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Unit tests and end-to-end conformance tests. - -syntax = "proto3"; - -package cel.expr.conformance.test; - -import "cel/expr/checked.proto"; -import "cel/expr/eval.proto"; -import "cel/expr/value.proto"; -import "cel/expr/conformance/env_config.proto"; -import "google/protobuf/any.proto"; - -option cc_enable_arenas = true; -option go_package = "cel.dev/expr/conformance/test"; -option java_multiple_files = true; -option java_outer_classname = "SuiteProto"; -option java_package = "dev.cel.expr.conformance.test"; - -// A test suite is a collection of tests designed to evaluate the correctness of -// a CEL policy, a CEL expression or the conformance of a CEL implementation to -// the standard specification. -message TestSuite { - // The name of the test suite. - string name = 1; - - // Description of the test suite. - string description = 2; - - // Test sections of the test suite. - // Each section represents a behavior to be tested. - repeated TestSection sections = 3; -} - -// A collection of related test cases. -message TestSection { - // Name of the test section. - string name = 1; - - // Description of the test section. - string description = 2; - - // Test cases of the test section. - // Each test case represents a test scenario. - repeated TestCase tests = 3; -} - -// A test to validate a CEL policy or expression. The test case encompasses -// evaluation of the compiled expression using the provided input bindings and -// asserting the result against the expected result. -// It can also validate a raw CEL expression string through parse, check and -// eval stages, making use of the augmenting CEL environment if provided. -message TestCase { - // Name of the test case. - string name = 1; - - // A description of the test. - string description = 2; - - // The text of the CEL expression. - string expr = 3; - - // Serialized environment to be used for compilation and evaluation of the - // CEL expression for the current test case. - // This option allows validating the same expression against multiple - // environments. - cel.expr.conformance.Environment env = 4; - - // Input for the test case - TestInput input = 5; - - // Expected result of the test case. - TestOutput output = 6; - - // If specified validates that the deduced type at check time matches - // If the result kind is not set and this field is set, the test is considered - // "check-only". - cel.expr.Type deduced_type = 7; - - // Bypass the type-checking and only attempt to evaluate the parsed - // expression. - bool disable_check = 8; -} - -// Input for the test case -message TestInput { - // The type of input for the test case - oneof input_kind { - // A set of variable bindings to be used for evaluating a checked - // expression. - Bindings bindings = 1; - - // A context message represents an input kind in the form of a proto - // message whose type is defined at runtime. - google.protobuf.Any context_message = 2; - - // A context expression representing a context proto variable. The - // fields of the input proto.Messages are used as top-level variables within - // an Activation. The expression is evaluated using the cel environment - // configured for the test suite. - string context_expr = 3; - } -} - -// The bindings of input variables for the test case. -message Bindings { - // A map representing a variable binding where the key is the name of the - // input variable. - map values = 1; -} - -// The input value for a variable binding -message InputValue { - // The type of input value that can be used for a variable binding - oneof kind { - // A simple literal value for a variable binding - cel.expr.Value value = 1; - - // An expression which evaluates to the value of the variable binding. - // The expression is evaluated using the same runtime environment as the - // one used for evaluating the expression under test. - string expr = 2; - } -} - -// Expected result of the test case. -message TestOutput { - // Type of expected result of the test case. - oneof result_kind { - // A normal value, which must match the evaluation result exactly via value - // equality semantics. This coincides with proto equality, except for: - // * maps are order-agnostic - // * a floating point NaN should match any NaN - cel.expr.Value result_value = 8; - - // An expression to be evaluated using the cel environment configured for - // the test suite. The result of this expression must match the result of - // the test case. - string result_expr = 9; - - // An error evaluation result set. Success if we match all of the errors in - // the set. - cel.expr.ErrorSet eval_error = 10; - - // An unknown evaluation result. - cel.expr.UnknownSet unknown = 11; - } -} diff --git a/src/main/proto/cel/expr/syntax.proto b/src/main/proto/cel/expr/syntax.proto index 00635e6..ed124a7 100644 --- a/src/main/proto/cel/expr/syntax.proto +++ b/src/main/proto/cel/expr/syntax.proto @@ -185,8 +185,8 @@ message Expr { // macro tests whether the property is set to its default. For map and struct // types, the macro tests whether the property `x` is defined on `m`. // - // Comprehensions for the standard environment macros evaluation can be best - // visualized as the following pseudocode: + // Comprehension evaluation can be best visualized as the following + // pseudocode: // // ``` // let `accu_var` = `accu_init` @@ -198,34 +198,11 @@ message Expr { // } // return `result` // ``` - // - // Comprehensions for the optional V2 macros which support map-to-map - // translation differ slightly from the standard environment macros in that - // they expose both the key or index in addition to the value for each list - // or map entry: - // - // ``` - // let `accu_var` = `accu_init` - // for (let `iter_var`, `iter_var2` in `iter_range`) { - // if (!`loop_condition`) { - // break - // } - // `accu_var` = `loop_step` - // } - // return `result` - // ``` message Comprehension { - // The name of the first iteration variable. - // For the single iteration variable macros, when iter_range is a list, this - // variable is the list element and when the iter_range is a map, this - // variable is the map key. + // The name of the iteration variable. string iter_var = 1; - // The name of the second iteration variable, empty if not set. - // This field is only set for comprehension v2 macros. - string iter_var2 = 8; - - // The range over which the comprehension iterates. + // The range over which var iterates. Expr iter_range = 2; // The name of the variable used for accumulation of the result. @@ -234,13 +211,13 @@ message Expr { // The initial value of the accumulator. Expr accu_init = 4; - // An expression which can contain iter_var, iter_var2, and accu_var. + // An expression which can contain iter_var and accu_var. // // Returns false when the result has been computed and may be used as // a hint to short-circuit the remainder of the comprehension. Expr loop_condition = 5; - // An expression which can contain iter_var, iter_var2, and accu_var. + // An expression which can contain iter_var and accu_var. // // Computes the next value of accu_var. Expr loop_step = 6; diff --git a/src/main/proto/contrib/envoy/extensions/filters/http/golang/v3alpha/golang.proto b/src/main/proto/contrib/envoy/extensions/filters/http/golang/v3alpha/golang.proto index 84e87ff..2d05509 100644 --- a/src/main/proto/contrib/envoy/extensions/filters/http/golang/v3alpha/golang.proto +++ b/src/main/proto/contrib/envoy/extensions/filters/http/golang/v3alpha/golang.proto @@ -2,8 +2,6 @@ syntax = "proto3"; package envoy.extensions.filters.http.golang.v3alpha; -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; - import "google/protobuf/any.proto"; import "xds/annotations/v3/status.proto"; @@ -23,7 +21,7 @@ option (xds.annotations.v3.file_status).work_in_progress = true; // For an overview of the Golang HTTP filter please see the :ref:`configuration reference documentation `. // [#extension: envoy.filters.http.golang] -// [#next-free-field: 7] +// [#next-free-field: 6] message Config { // The meanings are as follows: // @@ -76,13 +74,6 @@ message Config { // // [#not-implemented-hide:] MergePolicy merge_policy = 5 [(validate.rules).enum = {defined_only: true}]; - - // Generic secret list available to the plugin. - // Looks into SDS or static bootstrap configuration. - // - // See :repo:`StreamFilter API ` - // for more information about how to access secrets from Go. - repeated transport_sockets.tls.v3.SdsSecretConfig generic_secrets = 6; } message RouterPlugin { diff --git a/src/main/proto/envoy/extensions/filters/network/generic_proxy/action/v3/action.proto b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/action/v3/action.proto similarity index 95% rename from src/main/proto/envoy/extensions/filters/network/generic_proxy/action/v3/action.proto rename to src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/action/v3/action.proto index 3e1b7a7..d7c3dd0 100644 --- a/src/main/proto/envoy/extensions/filters/network/generic_proxy/action/v3/action.proto +++ b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/action/v3/action.proto @@ -16,7 +16,7 @@ import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.generic_proxy.action.v3"; option java_outer_classname = "ActionProto"; option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/generic_proxy/action/v3;actionv3"; +option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/network/generic_proxy/action/v3;actionv3"; option (udpa.annotations.file_status).package_version_status = ACTIVE; option (xds.annotations.v3.file_status).work_in_progress = true; diff --git a/src/main/proto/envoy/extensions/filters/network/generic_proxy/codecs/dubbo/v3/dubbo.proto b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/codecs/dubbo/v3/dubbo.proto similarity index 81% rename from src/main/proto/envoy/extensions/filters/network/generic_proxy/codecs/dubbo/v3/dubbo.proto rename to src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/codecs/dubbo/v3/dubbo.proto index d5b6782..47a2af1 100644 --- a/src/main/proto/envoy/extensions/filters/network/generic_proxy/codecs/dubbo/v3/dubbo.proto +++ b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/codecs/dubbo/v3/dubbo.proto @@ -9,7 +9,7 @@ import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.generic_proxy.codecs.dubbo.v3"; option java_outer_classname = "DubboProto"; option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/generic_proxy/codecs/dubbo/v3;dubbov3"; +option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/network/generic_proxy/codecs/dubbo/v3;dubbov3"; option (udpa.annotations.file_status).package_version_status = ACTIVE; option (xds.annotations.v3.file_status).work_in_progress = true; diff --git a/src/main/proto/envoy/extensions/filters/network/generic_proxy/codecs/http1/v3/http1.proto b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/codecs/http1/v3/http1.proto similarity index 94% rename from src/main/proto/envoy/extensions/filters/network/generic_proxy/codecs/http1/v3/http1.proto rename to src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/codecs/http1/v3/http1.proto index 2a57db6..973a190 100644 --- a/src/main/proto/envoy/extensions/filters/network/generic_proxy/codecs/http1/v3/http1.proto +++ b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/codecs/http1/v3/http1.proto @@ -11,7 +11,7 @@ import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.generic_proxy.codecs.http1.v3"; option java_outer_classname = "Http1Proto"; option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/generic_proxy/codecs/http1/v3;http1v3"; +option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/network/generic_proxy/codecs/http1/v3;http1v3"; option (udpa.annotations.file_status).package_version_status = ACTIVE; option (xds.annotations.v3.file_status).work_in_progress = true; diff --git a/src/main/proto/envoy/extensions/filters/network/generic_proxy/matcher/v3/matcher.proto b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/matcher/v3/matcher.proto similarity index 95% rename from src/main/proto/envoy/extensions/filters/network/generic_proxy/matcher/v3/matcher.proto rename to src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/matcher/v3/matcher.proto index 1399086..2490a0b 100644 --- a/src/main/proto/envoy/extensions/filters/network/generic_proxy/matcher/v3/matcher.proto +++ b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/matcher/v3/matcher.proto @@ -12,7 +12,7 @@ import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.generic_proxy.matcher.v3"; option java_outer_classname = "MatcherProto"; option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/generic_proxy/matcher/v3;matcherv3"; +option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/network/generic_proxy/matcher/v3;matcherv3"; option (udpa.annotations.file_status).package_version_status = ACTIVE; option (xds.annotations.v3.file_status).work_in_progress = true; diff --git a/src/main/proto/envoy/extensions/filters/network/generic_proxy/router/v3/router.proto b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/router/v3/router.proto similarity index 93% rename from src/main/proto/envoy/extensions/filters/network/generic_proxy/router/v3/router.proto rename to src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/router/v3/router.proto index 67f14fe..a706415 100644 --- a/src/main/proto/envoy/extensions/filters/network/generic_proxy/router/v3/router.proto +++ b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/router/v3/router.proto @@ -9,7 +9,7 @@ import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.generic_proxy.router.v3"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/generic_proxy/router/v3;routerv3"; +option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/network/generic_proxy/router/v3;routerv3"; option (udpa.annotations.file_status).package_version_status = ACTIVE; option (xds.annotations.v3.file_status).work_in_progress = true; diff --git a/src/main/proto/envoy/extensions/filters/network/generic_proxy/v3/generic_proxy.proto b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/v3/generic_proxy.proto similarity index 92% rename from src/main/proto/envoy/extensions/filters/network/generic_proxy/v3/generic_proxy.proto rename to src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/v3/generic_proxy.proto index 9ae2560..7e35eab 100644 --- a/src/main/proto/envoy/extensions/filters/network/generic_proxy/v3/generic_proxy.proto +++ b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/v3/generic_proxy.proto @@ -2,10 +2,10 @@ syntax = "proto3"; package envoy.extensions.filters.network.generic_proxy.v3; +import "contrib/envoy/extensions/filters/network/generic_proxy/v3/route.proto"; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/extension.proto"; -import "envoy/extensions/filters/network/generic_proxy/v3/route.proto"; import "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto"; import "xds/annotations/v3/status.proto"; @@ -16,7 +16,7 @@ import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.generic_proxy.v3"; option java_outer_classname = "GenericProxyProto"; option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/generic_proxy/v3;generic_proxyv3"; +option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/network/generic_proxy/v3;generic_proxyv3"; option (udpa.annotations.file_status).package_version_status = ACTIVE; option (xds.annotations.v3.file_status).work_in_progress = true; diff --git a/src/main/proto/envoy/extensions/filters/network/generic_proxy/v3/route.proto b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/v3/route.proto similarity index 97% rename from src/main/proto/envoy/extensions/filters/network/generic_proxy/v3/route.proto rename to src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/v3/route.proto index c298266..2ac8b0f 100644 --- a/src/main/proto/envoy/extensions/filters/network/generic_proxy/v3/route.proto +++ b/src/main/proto/contrib/envoy/extensions/filters/network/generic_proxy/v3/route.proto @@ -11,7 +11,7 @@ import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.generic_proxy.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/generic_proxy/v3;generic_proxyv3"; +option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/network/generic_proxy/v3;generic_proxyv3"; option (udpa.annotations.file_status).package_version_status = ACTIVE; option (xds.annotations.v3.file_status).work_in_progress = true; diff --git a/src/main/proto/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/src/main/proto/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto index 926fdb5..b8ab1d7 100644 --- a/src/main/proto/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto +++ b/src/main/proto/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto @@ -15,7 +15,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Kafka Broker] // Kafka Broker :ref:`configuration overview `. // [#extension: envoy.filters.network.kafka_broker] -// [#next-free-field: 6] + message KafkaBroker { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker"; @@ -39,16 +39,6 @@ message KafkaBroker { // Broker address rewrite rules that match by broker ID. IdBasedBrokerRewriteSpec id_based_broker_address_rewrite_spec = 3; } - - // Optional list of allowed Kafka API keys. Only requests with provided API keys will be - // routed, otherwise the connection will be closed. No effect if empty. - repeated uint32 api_keys_allowed = 4 - [(validate.rules).repeated = {items {uint32 {lte: 32767 gte: 0}}}]; - - // Optional list of denied Kafka API keys. Requests with API keys matching this list will have - // the connection closed. No effect if empty. - repeated uint32 api_keys_denied = 5 - [(validate.rules).repeated = {items {uint32 {lte: 32767 gte: 0}}}]; } // Collection of rules matching by broker ID. diff --git a/src/main/proto/contrib/envoy/extensions/tap_sinks/udp_sink/v3alpha/udp_sink.proto b/src/main/proto/contrib/envoy/extensions/tap_sinks/udp_sink/v3alpha/udp_sink.proto deleted file mode 100644 index d98f6b9..0000000 --- a/src/main/proto/contrib/envoy/extensions/tap_sinks/udp_sink/v3alpha/udp_sink.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tap_sinks.udp_sink.v3alpha; - -import "envoy/config/core/v3/address.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tap_sinks.udp_sink.v3alpha"; -option java_outer_classname = "UdpSinkProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/tap_sinks/udp_sink/v3alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Udp sink configuration] -// [#extension: envoy.tap_sinks.udp_sink] - -// Udp sink configuration. -message UdpSink { - // Configure UDP Address. - config.core.v3.SocketAddress udp_address = 1; -} diff --git a/src/main/proto/contrib/envoy/extensions/upstreams/http/tcp/golang/v3alpha/golang.proto b/src/main/proto/contrib/envoy/extensions/upstreams/http/tcp/golang/v3alpha/golang.proto deleted file mode 100644 index 61abe46..0000000 --- a/src/main/proto/contrib/envoy/extensions/upstreams/http/tcp/golang/v3alpha/golang.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.upstreams.http.tcp.golang.v3alpha; - -import "google/protobuf/any.proto"; - -import "xds/annotations/v3/status.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.golang.v3alpha"; -option java_outer_classname = "GolangProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/upstreams/http/tcp/golang/v3alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; -option (xds.annotations.v3.file_status).work_in_progress = true; - -// [#protodoc-title: Golang] -// -// This bridge enables an Http client to connect to a TCP server via a Golang plugin, facilitating Protocol Convert from HTTP to any RPC protocol in Envoy. -// -// For an overview of the Golang HTTP TCP bridge please see the :ref:`configuration reference documentation `. -// [#extension: envoy.upstreams.http.tcp.golang] - -// [#extension-category: envoy.upstreams] -message Config { - // Globally unique ID for a dynamic library file. - string library_id = 1 [(validate.rules).string = {min_len: 1}]; - - // Path to a dynamic library implementing the - // :repo:`HttpTcpBridge API ` - // interface. - string library_path = 2 [(validate.rules).string = {min_len: 1}]; - - // Globally unique name of the Go plugin. - // - // This name **must** be consistent with the name registered in ``tcp::RegisterHttpTcpBridgeFactoryAndConfigParser`` - // - string plugin_name = 3 [(validate.rules).string = {min_len: 1}]; - - // Configuration for the Go plugin. - // - // .. note:: - // This configuration is only parsed in the Golang plugin, and is therefore not validated - // by Envoy. - // - // See the :repo:`HttpTcpBridge API ` - // for more information about how the plugin's configuration data can be accessed. - // - google.protobuf.Any plugin_config = 4; -} diff --git a/src/main/proto/envoy/admin/v3/clusters.proto b/src/main/proto/envoy/admin/v3/clusters.proto index 9fab60d..a2bc4ca 100644 --- a/src/main/proto/envoy/admin/v3/clusters.proto +++ b/src/main/proto/envoy/admin/v3/clusters.proto @@ -41,24 +41,22 @@ message ClusterStatus { bool added_via_api = 2; // The success rate threshold used in the last interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is ``false``, all errors: externally and locally generated were used to calculate the threshold. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is ``true``, only externally generated errors were used to calculate the threshold. + // The threshold is used to eject hosts based on their success rate. See + // :ref:`Cluster outlier detection ` documentation for details. // - // * If :ref:`outlier_detection.split_external_local_origin_errors` - // is ``false``, all errors: externally and locally generated were used to calculate the threshold. - // * If :ref:`outlier_detection.split_external_local_origin_errors` - // is ``true``, only externally generated errors were used to calculate the threshold. - // - // The threshold is used to eject hosts based on their success rate. For more information, see the - // :ref:`Cluster outlier detection ` documentation. - // - // .. note:: - // - // This field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no threshold for that - // interval. - // 3. Outlier detection is not enabled for this cluster. + // Note: this field may be omitted in any of the three following cases: // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. type.v3.Percent success_rate_ejection_threshold = 3; // Mapping from host address to the host's current status. @@ -69,18 +67,16 @@ message ClusterStatus { // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` // is ``true``. The threshold is used to eject hosts based on their success rate. + // See :ref:`Cluster outlier detection ` documentation for + // details. // - // For more information, see the :ref:`Cluster outlier detection ` documentation. - // - // .. note:: - // - // This field may be omitted in any of the three following cases: - // - // 1. There were not enough hosts with enough request volume to proceed with success rate based outlier ejection. - // 2. The threshold is computed to be < 0 because a negative value implies that there was no threshold for that - // interval. - // 3. Outlier detection is not enabled for this cluster. + // Note: this field may be omitted in any of the three following cases: // + // 1. There were not enough hosts with enough request volume to proceed with success rate based + // outlier ejection. + // 2. The threshold is computed to be < 0 because a negative value implies that there was no + // threshold for that interval. + // 3. Outlier detection is not enabled for this cluster. type.v3.Percent local_origin_success_rate_ejection_threshold = 5; // :ref:`Circuit breaking ` settings of the cluster. @@ -107,20 +103,19 @@ message HostStatus { // The host's current health status. HostHealthStatus health_status = 3; - // The success rate for this host during the last measurement interval. - // - // * If :ref:`outlier_detection.split_external_local_origin_errors` - // is ``false``, all errors: externally and locally generated were used in success rate calculation. - // * If :ref:`outlier_detection.split_external_local_origin_errors` - // is ``true``, only externally generated errors were used in success rate calculation. - // - // For more information, see the :ref:`Cluster outlier detection ` documentation. - // - // .. note:: - // - // The message will be missing if the host didn't receive enough traffic to calculate a reliable success rate, or - // if the cluster had too few hosts to apply outlier ejection based on success rate. + // Request success rate for this host over the last calculated interval. + // If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is ``false``, all errors: externally and locally generated were used in success rate + // calculation. If + // :ref:`outlier_detection.split_external_local_origin_errors` + // is ``true``, only externally generated errors were used in success rate calculation. + // See :ref:`Cluster outlier detection ` documentation for + // details. // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. type.v3.Percent success_rate = 4; // The host's weight. If not configured, the value defaults to 1. @@ -132,20 +127,18 @@ message HostStatus { // The host's priority. If not configured, the value defaults to 0 (highest priority). uint32 priority = 7; - // The success rate for this host during the last interval, considering only locally generated errors. Externally - // generated errors are treated as successes. - // - // This field is only relevant when + // Request success rate for this host over the last calculated + // interval when only locally originated errors are taken into account and externally originated + // errors were treated as success. + // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` - // is set to ``true``. - // - // For more information, see the :ref:`Cluster outlier detection ` documentation. - // - // .. note:: - // - // The message will be missing if the host didn’t receive enough traffic to compute a success rate, or if the - // cluster didn’t have enough hosts to perform outlier ejection based on success rate. + // is ``true``. + // See :ref:`Cluster outlier detection ` documentation for + // details. // + // Note: the message will not be present if host did not have enough request volume to calculate + // success rate or the cluster did not have enough hosts to run through success rate outlier + // ejection. type.v3.Percent local_origin_success_rate = 8; // locality of the host. diff --git a/src/main/proto/envoy/admin/v3/config_dump_shared.proto b/src/main/proto/envoy/admin/v3/config_dump_shared.proto index b34e004..8de77e1 100644 --- a/src/main/proto/envoy/admin/v3/config_dump_shared.proto +++ b/src/main/proto/envoy/admin/v3/config_dump_shared.proto @@ -39,14 +39,6 @@ enum ClientResourceStatus { // Client received this resource and replied with NACK. NACKED = 4; - - // Client received an error from the control plane. The attached config - // dump is the most recent accepted one. If no config is accepted yet, - // the attached config dump will be empty. - RECEIVED_ERROR = 5; - - // Client timed out waiting for the resource from the control plane. - TIMEOUT = 6; } message UpdateFailureState { diff --git a/src/main/proto/envoy/admin/v3/server_info.proto b/src/main/proto/envoy/admin/v3/server_info.proto index adf5ab4..4269d02 100644 --- a/src/main/proto/envoy/admin/v3/server_info.proto +++ b/src/main/proto/envoy/admin/v3/server_info.proto @@ -59,7 +59,7 @@ message ServerInfo { config.core.v3.Node node = 7; } -// [#next-free-field: 42] +// [#next-free-field: 41] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; @@ -125,9 +125,6 @@ message CommandLineOptions { // See :option:`--ignore-unknown-dynamic-fields` for details. bool ignore_unknown_dynamic_fields = 30; - // See :option:`--skip-deprecated-logs` for details. - bool skip_deprecated_logs = 41; - // See :option:`--admin-address-path` for details. string admin_address_path = 6; diff --git a/src/main/proto/envoy/config/accesslog/v3/accesslog.proto b/src/main/proto/envoy/config/accesslog/v3/accesslog.proto index 6753ab6..5599f80 100644 --- a/src/main/proto/envoy/config/accesslog/v3/accesslog.proto +++ b/src/main/proto/envoy/config/accesslog/v3/accesslog.proto @@ -152,38 +152,35 @@ message TraceableFilter { "envoy.config.filter.accesslog.v2.TraceableFilter"; } -// Filters requests based on runtime-configurable sampling rates. +// Filters for random sampling of requests. message RuntimeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.RuntimeFilter"; - // Specifies a key used to look up a custom sampling rate from the runtime configuration. If a value is found for this - // key, it will override the default sampling rate specified in ``percent_sampled``. + // Runtime key to get an optional overridden numerator for use in the + // ``percent_sampled`` field. If found in runtime, this value will replace the + // default numerator. string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; - // Defines the default sampling percentage when no runtime override is present. If not specified, the default is - // **0%** (with a denominator of 100). + // The default sampling percentage. If not specified, defaults to 0% with + // denominator of 100. type.v3.FractionalPercent percent_sampled = 2; - // Controls how sampling decisions are made. - // - // - Default behavior (``false``): - // - // * Uses the :ref:`x-request-id` as a consistent sampling pivot. - // * When :ref:`x-request-id` is present, sampling will be consistent - // across multiple hosts based on both the ``runtime_key`` and - // :ref:`x-request-id`. - // * Useful for tracking related requests across a distributed system. - // - // - When set to ``true`` or :ref:`x-request-id` is missing: - // - // * Sampling decisions are made randomly based only on the ``runtime_key``. - // * Useful in complex filter configurations (like nested - // :ref:`AndFilter`/ - // :ref:`OrFilter` blocks) where independent probability - // calculations are desired. - // * Can be used to implement logging kill switches with predictable probability distributions. - // + // By default, sampling pivots on the header + // :ref:`x-request-id` being + // present. If :ref:`x-request-id` + // is present, the filter will consistently sample across multiple hosts based + // on the runtime key value and the value extracted from + // :ref:`x-request-id`. If it is + // missing, or ``use_independent_randomness`` is set to true, the filter will + // randomly sample based on the runtime key value alone. + // ``use_independent_randomness`` can be used for logging kill switches within + // complex nested :ref:`AndFilter + // ` and :ref:`OrFilter + // ` blocks that are easier to + // reason about from a probability perspective (i.e., setting to true will + // cause the filter to behave like an independent random variable when + // composed within logical operator filters). bool use_independent_randomness = 3; } @@ -260,7 +257,6 @@ message ResponseFlagFilter { in: "DF" in: "DO" in: "DR" - in: "UDO" } } }]; diff --git a/src/main/proto/envoy/config/cluster/redis/redis_cluster.proto b/src/main/proto/envoy/config/cluster/redis/redis_cluster.proto index ec07961..78baa83 100644 --- a/src/main/proto/envoy/config/cluster/redis/redis_cluster.proto +++ b/src/main/proto/envoy/config/cluster/redis/redis_cluster.proto @@ -43,14 +43,14 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // address: foo.bar.com // port_value: 22120 // cluster_type: -// name: envoy.clusters.redis -// typed_config: -// "@type": type.googleapis.com/google.protobuf.Struct -// value: -// cluster_refresh_rate: 30s -// cluster_refresh_timeout: 0.5s -// redirect_refresh_interval: 10s -// redirect_refresh_threshold: 10 +// name: envoy.clusters.redis +// typed_config: +// "@type": type.googleapis.com/google.protobuf.Struct +// value: +// cluster_refresh_rate: 30s +// cluster_refresh_timeout: 0.5s +// redirect_refresh_interval: 10s +// redirect_refresh_threshold: 10 // [#extension: envoy.clusters.redis] // [#next-free-field: 7] diff --git a/src/main/proto/envoy/config/cluster/v3/cluster.proto b/src/main/proto/envoy/config/cluster/v3/cluster.proto index 51180b1..5f347ad 100644 --- a/src/main/proto/envoy/config/cluster/v3/cluster.proto +++ b/src/main/proto/envoy/config/cluster/v3/cluster.proto @@ -45,7 +45,7 @@ message ClusterCollection { } // Configuration for a single upstream cluster. -// [#next-free-field: 59] +// [#next-free-field: 58] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; @@ -942,7 +942,6 @@ message Cluster { // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. // [#next-major-version: make this a list of typed extensions.] - // [#extension-category: envoy.upstream_options] map typed_extension_protocol_options = 36; // If the DNS refresh rate is specified and the cluster type is either @@ -954,34 +953,8 @@ message Cluster { // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. - // This field is deprecated in favor of using the :ref:`cluster_type` - // extension point and configuring it with :ref:`DnsCluster`. - // If :ref:`cluster_type` is configured with - // :ref:`DnsCluster`, this field will be ignored. - google.protobuf.Duration dns_refresh_rate = 16 [ - deprecated = true, - (validate.rules).duration = {gt {nanos: 1000000}}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // DNS jitter can be optionally specified if the cluster type is either - // :ref:`STRICT_DNS`, - // or :ref:`LOGICAL_DNS`. - // DNS jitter causes the cluster to refresh DNS entries later by a random amount of time to avoid a - // stampede of DNS requests. This value sets the upper bound (exclusive) for the random amount. - // There will be no jitter if this value is omitted. For cluster types other than - // :ref:`STRICT_DNS` - // and :ref:`LOGICAL_DNS` - // this setting is ignored. - // This field is deprecated in favor of using the :ref:`cluster_type` - // extension point and configuring it with :ref:`DnsCluster`. - // If :ref:`cluster_type` is configured with - // :ref:`DnsCluster`, this field will be ignored. - google.protobuf.Duration dns_jitter = 58 [ - deprecated = true, - (validate.rules).duration = {gte {}}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; + google.protobuf.Duration dns_refresh_rate = 16 + [(validate.rules).duration = {gt {nanos: 1000000}}]; // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, @@ -991,31 +964,16 @@ message Cluster { // other than :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS` this setting is // ignored. - // This field is deprecated in favor of using the :ref:`cluster_type` - // extension point and configuring it with :ref:`DnsCluster`. - // If :ref:`cluster_type` is configured with - // :ref:`DnsCluster`, this field will be ignored. - RefreshRate dns_failure_refresh_rate = 44 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + RefreshRate dns_failure_refresh_rate = 44; // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS // resolution. - // This field is deprecated in favor of using the :ref:`cluster_type` - // extension point and configuring it with :ref:`DnsCluster`. - // If :ref:`cluster_type` is configured with - // :ref:`DnsCluster`, this field will be ignored. - bool respect_dns_ttl = 39 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + bool respect_dns_ttl = 39; // The DNS IP address resolution policy. If this setting is not specified, the // value defaults to // :ref:`AUTO`. - // For logical and strict dns cluster, this field is deprecated in favor of using the - // :ref:`cluster_type` - // extension point and configuring it with :ref:`DnsCluster`. - // If :ref:`cluster_type` is configured with - // :ref:`DnsCluster`, this field will be ignored. DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; // If DNS resolvers are specified and the cluster type is either @@ -1055,9 +1013,6 @@ message Cluster { // During the transition period when both ``dns_resolution_config`` and ``typed_dns_resolver_config`` exists, // when ``typed_dns_resolver_config`` is in place, Envoy will use it and ignore ``dns_resolution_config``. // When ``typed_dns_resolver_config`` is missing, the default behavior is in place. - // Also note that this field is deprecated for logical dns and strict dns clusters and will be ignored when - // :ref:`cluster_type` is configured with - // :ref:`DnsCluster`. // [#extension-category: envoy.network.dns_resolver] core.v3.TypedExtensionConfig typed_dns_resolver_config = 55; @@ -1196,13 +1151,12 @@ message Cluster { // from the LRS stream here.] core.v3.ConfigSource lrs_server = 42; - // A list of metric names from :ref:`ORCA load reports ` to propagate to LRS. - // - // If not specified, then ORCA load reports will not be propagated to LRS. + // [#not-implemented-hide:] + // A list of metric names from ORCA load reports to propagate to LRS. // // For map fields in the ORCA proto, the string will be of the form ``.``. // For example, the string ``named_metrics.foo`` will mean to look for the key ``foo`` in the ORCA - // :ref:`named_metrics ` field. + // ``named_metrics`` field. // // The special map key ``*`` means to report all entries in the map (e.g., ``named_metrics.*`` means to // report all entries in the ORCA named_metrics field). Note that this should be used only with trusted @@ -1345,7 +1299,7 @@ message TrackClusterStats { // If request_response_sizes is true, then the :ref:`histograms // ` tracking header and body sizes - // of requests and responses will be published. Additionally, number of headers in the requests and responses will be tracked. + // of requests and responses will be published. bool request_response_sizes = 2; // If true, some stats will be emitted per-endpoint, similar to the stats in admin ``/clusters`` diff --git a/src/main/proto/envoy/config/core/v3/address.proto b/src/main/proto/envoy/config/core/v3/address.proto index 38d74ef..d8d4788 100644 --- a/src/main/proto/envoy/config/core/v3/address.proto +++ b/src/main/proto/envoy/config/core/v3/address.proto @@ -50,7 +50,7 @@ message EnvoyInternalAddress { string endpoint_id = 2; } -// [#next-free-field: 8] +// [#next-free-field: 7] message SocketAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketAddress"; @@ -97,11 +97,6 @@ message SocketAddress { // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into // IPv6 space as ``::FFFF:``. bool ipv4_compat = 6; - - // The Linux network namespace to bind the socket to. If this is set, Envoy will - // create the socket in the specified network namespace. Only supported on Linux. - // [#not-implemented-hide:] - string network_namespace_filepath = 7; } message TcpKeepalive { diff --git a/src/main/proto/envoy/config/core/v3/base.proto b/src/main/proto/envoy/config/core/v3/base.proto index 48ff5e7..df91565 100644 --- a/src/main/proto/envoy/config/core/v3/base.proto +++ b/src/main/proto/envoy/config/core/v3/base.proto @@ -303,31 +303,12 @@ message RuntimeFeatureFlag { string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } -// Please use :ref:`KeyValuePair ` instead. -// [#not-implemented-hide:] message KeyValue { - // The key of the key/value pair. - string key = 1 [ - deprecated = true, - (validate.rules).string = {min_len: 1 max_bytes: 16384}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - - // The value of the key/value pair. - // - // The ``bytes`` type is used. This means if JSON or YAML is used to to represent the - // configuration, the value must be base64 encoded. This is unfriendly for users in most - // use scenarios of this message. - // - bytes value = 2 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; -} - -message KeyValuePair { // The key of the key/value pair. string key = 1 [(validate.rules).string = {min_len: 1 max_bytes: 16384}]; // The value of the key/value pair. - google.protobuf.Value value = 2; + bytes value = 2; } // Key/value pair plus option to control append behavior. This is used to specify @@ -358,18 +339,8 @@ message KeyValueAppend { OVERWRITE_IF_EXISTS = 3; } - // The single key/value pair record to be appended or overridden. This field must be set. - KeyValuePair record = 3; - - // Key/value pair entry that this option to append or overwrite. This field is deprecated - // and please use :ref:`record ` - // as replacement. - // [#not-implemented-hide:] - KeyValue entry = 1 [ - deprecated = true, - (validate.rules).message = {skip: true}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; + // Key/value pair entry that this option to append or overwrite. + KeyValue entry = 1 [(validate.rules).message = {required: true}]; // Describes the action taken to append/overwrite the given value for an existing // key or to only add this key if it's absent. @@ -378,12 +349,10 @@ message KeyValueAppend { // Key/value pair to append or remove. message KeyValueMutation { - // Key/value pair to append or overwrite. Only one of ``append`` or ``remove`` can be set or - // the configuration will be rejected. + // Key/value pair to append or overwrite. Only one of ``append`` or ``remove`` can be set. KeyValueAppend append = 1; - // Key to remove. Only one of ``append`` or ``remove`` can be set or the configuration will be - // rejected. + // Key to remove. Only one of ``append`` or ``remove`` can be set. string remove = 2 [(validate.rules).string = {max_bytes: 16384}]; } @@ -484,7 +453,6 @@ message HeaderValueOption { message HeaderMap { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderMap"; - // A list of header names and their values. repeated HeaderValue headers = 1; } diff --git a/src/main/proto/envoy/config/core/v3/health_check.proto b/src/main/proto/envoy/config/core/v3/health_check.proto index fd4440d..821f042 100644 --- a/src/main/proto/envoy/config/core/v3/health_check.proto +++ b/src/main/proto/envoy/config/core/v3/health_check.proto @@ -375,13 +375,13 @@ message HealthCheck { // The default value for "healthy edge interval" is the same as the default interval. google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; - // Specifies the path to the :ref:`health check event log `. - // // .. attention:: - // This field is deprecated in favor of the extension - // :ref:`event_logger ` and - // :ref:`event_log_path ` - // in the file sink extension. + // This field is deprecated in favor of the extension + // :ref:`event_logger ` and + // :ref:`event_log_path ` + // in the file sink extension. + // + // Specifies the path to the :ref:`health check event log `. string event_log_path = 17 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; diff --git a/src/main/proto/envoy/config/core/v3/protocol.proto b/src/main/proto/envoy/config/core/v3/protocol.proto index a90c074..6dbff8c 100644 --- a/src/main/proto/envoy/config/core/v3/protocol.proto +++ b/src/main/proto/envoy/config/core/v3/protocol.proto @@ -3,7 +3,6 @@ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/extension.proto"; -import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; @@ -40,21 +39,24 @@ message QuicKeepAliveSettings { // // If zero, disable keepalive probing. // If absent, use the QUICHE default interval to probe. - google.protobuf.Duration max_interval = 1; + google.protobuf.Duration max_interval = 1 [(validate.rules).duration = { + lte {} + gte {seconds: 1} + }]; // The interval to send the first few keep-alive probing packets to prevent connection from hitting the idle timeout. Subsequent probes will be sent, each one with an interval exponentially longer than previous one, till it reaches :ref:`max_interval `. And the probes afterwards will always use :ref:`max_interval `. // // The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout and smaller than max_interval to take effect. // - // If absent, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time. + // If absent or zero, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is also zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time. google.protobuf.Duration initial_interval = 2 [(validate.rules).duration = { lte {} - gte {nanos: 1000000} + gte {seconds: 1} }]; } // QUIC protocol options which apply to both downstream and upstream connections. -// [#next-free-field: 10] +// [#next-free-field: 9] message QuicProtocolOptions { // Maximum number of streams that the client can negotiate per connection. 100 // if not specified. @@ -109,10 +111,6 @@ message QuicProtocolOptions { lte {seconds: 600} gte {seconds: 1} }]; - - // Maximum packet length for QUIC connections. It refers to the largest size of a QUIC packet that can be transmitted over the connection. - // If not specified, one of the `default values in QUICHE `_ is used. - google.protobuf.UInt64Value max_packet_length = 9; } message UpstreamHttpProtocolOptions { @@ -124,9 +122,6 @@ message UpstreamHttpProtocolOptions { // header when :ref:`override_auto_sni_header ` // is set, as seen by the :ref:`router filter `. // Does nothing if a filter before the http router filter sets the corresponding metadata. - // - // See :ref:`SNI configuration ` for details on how this - // interacts with other validation options. bool auto_sni = 1; // Automatic validate upstream presented certificate for new upstream connections based on the @@ -134,9 +129,6 @@ message UpstreamHttpProtocolOptions { // is set, as seen by the :ref:`router filter `. // This field is intended to be set with ``auto_sni`` field. // Does nothing if a filter before the http router filter sets the corresponding metadata. - // - // See :ref:`validation configuration ` for how this interacts with - // other validation options. bool auto_san_validation = 2; // An optional alternative to the host/authority header to be used for setting the SNI value. @@ -213,7 +205,7 @@ message AlternateProtocolsCacheOptions { repeated string canonical_suffixes = 5; } -// [#next-free-field: 8] +// [#next-free-field: 7] message HttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpProtocolOptions"; @@ -263,31 +255,11 @@ message HttpProtocolOptions { // `. google.protobuf.Duration max_connection_duration = 3; - // The maximum number of headers (request headers if configured on HttpConnectionManager, - // response headers when configured on a cluster). - // If unconfigured, the default maximum number of headers allowed is 100. - // The default value for requests can be overridden by setting runtime key ``envoy.reloadable_features.max_request_headers_count``. - // The default value for responses can be overridden by setting runtime key ``envoy.reloadable_features.max_response_headers_count``. - // Downstream requests that exceed this limit will receive a 431 response for HTTP/1.x and cause a stream - // reset for HTTP/2. - // Upstream responses that exceed this limit will result in a 503 response. + // The maximum number of headers. If unconfigured, the default + // maximum number of request headers allowed is 100. Requests that exceed this limit will receive + // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; - // The maximum size of response headers. - // If unconfigured, the default is 60 KiB, except for HTTP/1 response headers which have a default - // of 80KiB. - // The default value can be overridden by setting runtime key ``envoy.reloadable_features.max_response_headers_size_kb``. - // Responses that exceed this limit will result in a 503 response. - // In Envoy, this setting is only valid when configured on an upstream cluster, not on the - // :ref:`HTTP Connection Manager - // `. - // - // Note: currently some protocol codecs impose limits on the maximum size of a single header: - // HTTP/2 (when using nghttp2) limits a single header to around 100kb. - // HTTP/3 limits a single header to around 1024kb. - google.protobuf.UInt32Value max_response_headers_kb = 7 - [(validate.rules).uint32 = {lte: 8192 gt: 0}]; - // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. google.protobuf.Duration max_stream_duration = 4; @@ -306,7 +278,7 @@ message HttpProtocolOptions { google.protobuf.UInt32Value max_requests_per_connection = 6; } -// [#next-free-field: 12] +// [#next-free-field: 11] message Http1ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http1ProtocolOptions"; @@ -418,14 +390,6 @@ message Http1ProtocolOptions { // ` // to reject custom methods. bool allow_custom_methods = 10 [(xds.annotations.v3.field_status).work_in_progress = true]; - - // Ignore HTTP/1.1 upgrade values matching any of the supplied matchers. - // - // .. note:: - // - // ``h2c`` upgrades are always removed for backwards compatibility, regardless of the - // value in this setting. - repeated type.matcher.v3.StringMatcher ignore_http_11_upgrade = 11; } message KeepaliveSettings { @@ -458,7 +422,7 @@ message KeepaliveSettings { [(validate.rules).duration = {gte {nanos: 1000000}}]; } -// [#next-free-field: 18] +// [#next-free-field: 17] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http2ProtocolOptions"; @@ -642,9 +606,6 @@ message Http2ProtocolOptions { // If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2. google.protobuf.BoolValue use_oghttp2_codec = 16 [(xds.annotations.v3.field_status).work_in_progress = true]; - - // Configure the maximum amount of metadata than can be handled per stream. Defaults to 1 MB. - google.protobuf.UInt64Value max_metadata_size = 17; } // [#not-implemented-hide:] @@ -656,7 +617,7 @@ message GrpcProtocolOptions { } // A message which allows using HTTP/3. -// [#next-free-field: 8] +// [#next-free-field: 7] message Http3ProtocolOptions { QuicProtocolOptions quic_protocol_options = 1; @@ -683,14 +644,6 @@ message Http3ProtocolOptions { // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more // information. bool allow_metadata = 6; - - // [#not-implemented-hide:] Hiding until Envoy has full HTTP/3 upstream support. - // Still under implementation. DO NOT USE. - // - // Disables QPACK compression related features for HTTP/3 including: - // No huffman encoding, zero dynamic table capacity and no cookie crumbing. - // This can be useful for trading off CPU vs bandwidth when an upstream HTTP/3 connection multiplexes multiple downstream connections. - bool disable_qpack = 7; } // A message to control transformations to the :scheme header diff --git a/src/main/proto/envoy/config/core/v3/proxy_protocol.proto b/src/main/proto/envoy/config/core/v3/proxy_protocol.proto index 564e76c..32747dd 100644 --- a/src/main/proto/envoy/config/core/v3/proxy_protocol.proto +++ b/src/main/proto/envoy/config/core/v3/proxy_protocol.proto @@ -32,15 +32,6 @@ message ProxyProtocolPassThroughTLVs { repeated uint32 tlv_type = 2 [(validate.rules).repeated = {items {uint32 {lt: 256}}}]; } -// Represents a single Type-Length-Value (TLV) entry. -message TlvEntry { - // The type of the TLV. Must be a uint8 (0-255) as per the Proxy Protocol v2 specification. - uint32 type = 1 [(validate.rules).uint32 = {lt: 256}]; - - // The value of the TLV. Must be at least one byte long. - bytes value = 2 [(validate.rules).bytes = {min_len: 1}]; -} - message ProxyProtocolConfig { enum Version { // PROXY protocol version 1. Human readable format. @@ -56,35 +47,4 @@ message ProxyProtocolConfig { // This config controls which TLVs can be passed to upstream if it is Proxy Protocol // V2 header. If there is no setting for this field, no TLVs will be passed through. ProxyProtocolPassThroughTLVs pass_through_tlvs = 2; - - // This config allows additional TLVs to be included in the upstream PROXY protocol - // V2 header. Unlike ``pass_through_tlvs``, which passes TLVs from the downstream request, - // ``added_tlvs`` provides an extension mechanism for defining new TLVs that are included - // with the upstream request. These TLVs may not be present in the downstream request and - // can be defined at either the transport socket level or the host level to provide more - // granular control over the TLVs that are included in the upstream request. - // - // Host-level TLVs are specified in the ``metadata.typed_filter_metadata`` field under the - // ``envoy.transport_sockets.proxy_protocol`` namespace. - // - // .. literalinclude:: /_configs/repo/proxy_protocol.yaml - // :language: yaml - // :lines: 49-57 - // :linenos: - // :lineno-start: 49 - // :caption: :download:`proxy_protocol.yaml ` - // - // **Precedence behavior**: - // - // - When a TLV is defined at both the host level and the transport socket level, the value - // from the host level configuration takes precedence. This allows users to define default TLVs - // at the transport socket level and override them at the host level. - // - Any TLV defined in the ``pass_through_tlvs`` field will be overridden by either the host-level - // or transport socket-level TLV. - repeated TlvEntry added_tlvs = 3; -} - -message PerHostConfig { - // Enables per-host configuration for Proxy Protocol. - repeated TlvEntry added_tlvs = 1; } diff --git a/src/main/proto/envoy/config/core/v3/socket_cmsg_headers.proto b/src/main/proto/envoy/config/core/v3/socket_cmsg_headers.proto deleted file mode 100644 index cc3e58e..0000000 --- a/src/main/proto/envoy/config/core/v3/socket_cmsg_headers.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.config.core.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.config.core.v3"; -option java_outer_classname = "SocketCmsgHeadersProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Socket CMSG headers] - -// Configuration for socket cmsg headers. -// See `:ref:CMSG `_ for further information. -message SocketCmsgHeaders { - // cmsg level. Default is unset. - google.protobuf.UInt32Value level = 1; - - // cmsg type. Default is unset. - google.protobuf.UInt32Value type = 2; - - // Expected size of cmsg value. Default is zero. - uint32 expected_size = 3; -} diff --git a/src/main/proto/envoy/config/core/v3/socket_option.proto b/src/main/proto/envoy/config/core/v3/socket_option.proto index ad73d72..44f1ce3 100644 --- a/src/main/proto/envoy/config/core/v3/socket_option.proto +++ b/src/main/proto/envoy/config/core/v3/socket_option.proto @@ -36,7 +36,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // :ref:`admin's ` socket_options etc. // // It should be noted that the name or level may have different values on different platforms. -// [#next-free-field: 8] +// [#next-free-field: 7] message SocketOption { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketOption"; @@ -51,29 +51,6 @@ message SocketOption { STATE_LISTENING = 2; } - // The `socket type `_ to apply the socket option to. - // Only one field should be set. If multiple fields are set, the precedence order will determine - // the selected one. If none of the fields is set, the socket option will be applied to all socket types. - // - // For example: - // If :ref:`stream ` is set, - // it takes precedence over :ref:`datagram `. - message SocketType { - // The stream socket type. - message Stream { - } - - // The datagram socket type. - message Datagram { - } - - // Apply the socket option to the stream socket type. - Stream stream = 1; - - // Apply the socket option to the datagram socket type. - Datagram datagram = 2; - } - // An optional name to give this socket option for debugging, etc. // Uniqueness is not required and no special meaning is assumed. string description = 1; @@ -97,10 +74,6 @@ message SocketOption { // The state in which the option will be applied. When used in BindConfig // STATE_PREBIND is currently the only valid value. SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; - - // Apply the socket option to the specified `socket type `_. - // If not specified, the socket option will be applied to all socket types. - SocketType type = 7; } message SocketOptionsOverride { diff --git a/src/main/proto/envoy/config/core/v3/substitution_format_string.proto b/src/main/proto/envoy/config/core/v3/substitution_format_string.proto index 3edbf5f..abe8afa 100644 --- a/src/main/proto/envoy/config/core/v3/substitution_format_string.proto +++ b/src/main/proto/envoy/config/core/v3/substitution_format_string.proto @@ -22,12 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Optional configuration options to be used with json_format. message JsonFormatOptions { // The output JSON string properties will be sorted. - // - // .. note:: - // As the properties are always sorted, this option has no effect and is deprecated. - // - bool sort_properties = 1 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + bool sort_properties = 1; } // Configuration to use multiple :ref:`command operators ` @@ -106,12 +101,6 @@ message SubstitutionFormatString { // * for ``text_format``, the output of the empty operator is changed from ``-`` to an // empty string, so that empty values are omitted entirely. // * for ``json_format`` the keys with null values are omitted in the output structure. - // - // .. note:: - // This option does not work perfectly with ``json_format`` as keys with ``null`` values - // will still be included in the output. See https://github.com/envoyproxy/envoy/issues/37941 - // for more details. - // bool omit_empty_values = 3; // Specify a ``content_type`` field. diff --git a/src/main/proto/envoy/config/endpoint/v3/endpoint_components.proto b/src/main/proto/envoy/config/endpoint/v3/endpoint_components.proto index eacc555..6673691 100644 --- a/src/main/proto/envoy/config/endpoint/v3/endpoint_components.proto +++ b/src/main/proto/envoy/config/endpoint/v3/endpoint_components.proto @@ -9,9 +9,6 @@ import "envoy/config/core/v3/health_check.proto"; import "google/protobuf/wrappers.proto"; -import "xds/core/v3/collection_entry.proto"; - -import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -136,24 +133,14 @@ message LbEndpoint { google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; } -// LbEndpoint list collection. Entries are `LbEndpoint` resources or references. // [#not-implemented-hide:] -message LbEndpointCollection { - xds.core.v3.CollectionEntry entries = 1; -} - -// A configuration for an LEDS collection. +// A configuration for a LEDS collection. message LedsClusterLocalityConfig { // Configuration for the source of LEDS updates for a Locality. core.v3.ConfigSource leds_config = 1; - // The name of the LbEndpoint collection resource. - // - // If the name ends in ``/*``, it indicates an LbEndpoint glob collection, - // which is supported only in the xDS incremental protocol variants. - // Otherwise, it indicates an LbEndpointCollection list collection. - // - // Envoy currently supports only glob collections. + // The xDS transport protocol glob collection resource name. + // The service is only supported in delta xDS (incremental) mode. string leds_collection_name = 2; } @@ -178,20 +165,18 @@ message LocalityLbEndpoints { core.v3.Metadata metadata = 9; // The group of endpoints belonging to the locality specified. - // This is ignored if :ref:`leds_cluster_locality_config - // ` is set. + // [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be + // deprecated and replaced by ``load_balancer_endpoints``.] repeated LbEndpoint lb_endpoints = 2; + // [#not-implemented-hide:] oneof lb_config { - // [#not-implemented-hide:] - // Not implemented and deprecated. - LbEndpointList load_balancer_endpoints = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + // The group of endpoints belonging to the locality. + // [#comment:TODO(adisuissa): Once LEDS is implemented the ``lb_endpoints`` field + // needs to be deprecated.] + LbEndpointList load_balancer_endpoints = 7; // LEDS Configuration for the current locality. - // If this is set, the :ref:`lb_endpoints - // ` - // field is ignored. LedsClusterLocalityConfig leds_cluster_locality_config = 8; } diff --git a/src/main/proto/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/src/main/proto/envoy/config/filter/http/jwt_authn/v2alpha/config.proto index 87ed741..e87c947 100644 --- a/src/main/proto/envoy/config/filter/http/jwt_authn/v2alpha/config.proto +++ b/src/main/proto/envoy/config/filter/http/jwt_authn/v2alpha/config.proto @@ -36,7 +36,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // * issuer: the principal that issues the JWT. It has to match the one from the token. // * allowed audiences: the ones in the token have to be listed here. // * how to fetch public key JWKS to verify the token signature. -// * how to extract the JWT in the request. +// * how to extract JWT token in the request. // * how to pass successfully verified token payload. // // Example: @@ -137,7 +137,7 @@ message JwtProvider { // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations // its provider specified or from the default locations. // - // Specify the HTTP headers to extract the JWT. For examples, following config: + // Specify the HTTP headers to extract JWT token. For examples, following config: // // .. code-block:: yaml // @@ -209,7 +209,7 @@ message RemoteJwks { google.protobuf.Duration cache_duration = 2; } -// This message specifies a header location to extract JWT. +// This message specifies a header location to extract JWT token. message JwtHeader { // The HTTP header name. string name = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -305,7 +305,7 @@ message JwtRequirement { // The requirement is always satisfied even if JWT is missing or the JWT // verification fails. A typical usage is: this filter is used to only verify // JWTs and pass the verified JWT payloads to another filter, the other filter - // will make decision. In this mode, all JWTs will be verified. + // will make decision. In this mode, all JWT tokens will be verified. google.protobuf.Empty allow_missing_or_failed = 5; // The requirement is satisfied if JWT is missing, but failed if JWT is @@ -391,7 +391,8 @@ message FilterStateRule { // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. - map requires = 3; + map + requires = 3; } // This is the Envoy HTTP filter config for JWT authentication. diff --git a/src/main/proto/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/src/main/proto/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto index 33a3316..82de0bc 100644 --- a/src/main/proto/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto +++ b/src/main/proto/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto @@ -163,6 +163,14 @@ message HttpConnectionManager { // Configuration for an external tracing provider. // If not specified, no tracing will be performed. + // + // .. attention:: + // Please be aware that *envoy.tracers.opencensus* provider can only be configured once + // in Envoy lifetime. + // Any attempts to reconfigure it or to use different configurations for different HCM filters + // will be rejected. + // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes + // on OpenCensus side. trace.v2.Tracing.Http provider = 9; } diff --git a/src/main/proto/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto b/src/main/proto/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto index e5f86da..829726a 100644 --- a/src/main/proto/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto +++ b/src/main/proto/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto @@ -17,7 +17,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Kafka Broker] // Kafka Broker :ref:`configuration overview `. // [#extension: envoy.filters.network.kafka_broker] -// [#next-free-field: 6] + message KafkaBroker { // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -38,16 +38,6 @@ message KafkaBroker { // Broker address rewrite rules that match by broker ID. IdBasedBrokerRewriteSpec id_based_broker_address_rewrite_spec = 3; } - - // Optional list of allowed Kafka API keys. Only requests with provided API keys will be - // routed, otherwise the connection will be closed. No effect if empty. - repeated uint32 api_keys_allowed = 4 - [(validate.rules).repeated = {items {uint32 {lte: 32767 gte: 0}}}]; - - // Optional list of denied Kafka API keys. Requests with API keys matching this list will have - // the connection closed. No effect if empty. - repeated uint32 api_keys_denied = 5 - [(validate.rules).repeated = {items {uint32 {lte: 32767 gte: 0}}}]; } // Collection of rules matching by broker ID. diff --git a/src/main/proto/envoy/config/grpc_credential/v3/aws_iam.proto b/src/main/proto/envoy/config/grpc_credential/v3/aws_iam.proto index 5137602..923d880 100644 --- a/src/main/proto/envoy/config/grpc_credential/v3/aws_iam.proto +++ b/src/main/proto/envoy/config/grpc_credential/v3/aws_iam.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.config.grpc_credential.v3; -import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -15,11 +14,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Grpc Credentials AWS IAM] // Configuration for AWS IAM Grpc Credentials Plugin -// .. warning:: -// -// This extension is deprecated and will be deleted in a future Envoy release, no -// later than Envoy 1.35, but possibly sooner. -// // [#extension: envoy.grpc_credentials.aws_iam] message AwsIamConfig { @@ -31,16 +25,12 @@ message AwsIamConfig { // of the Grpc endpoint. // // Example: appmesh - string service_name = 1 [ - deprecated = true, - (validate.rules).string = {min_len: 1}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; + string service_name = 1 [(validate.rules).string = {min_len: 1}]; // The `region `_ hosting the Grpc // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment // variable. // // Example: us-west-2 - string region = 2 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + string region = 2; } diff --git a/src/main/proto/envoy/config/listener/v3/listener.proto b/src/main/proto/envoy/config/listener/v3/listener.proto index 4bcd388..9381d4e 100644 --- a/src/main/proto/envoy/config/listener/v3/listener.proto +++ b/src/main/proto/envoy/config/listener/v3/listener.proto @@ -247,10 +247,10 @@ message Listener { google.protobuf.BoolValue freebind = 11; // Additional socket options that may not be present in Envoy source code or - // precompiled binaries. - // It is not allowed to update the socket options for any existing address if + // precompiled binaries. The socket options can be updated for a listener when // :ref:`enable_reuse_port ` - // is ``false`` to avoid the conflict when creating new sockets for the listener. + // is ``true``. Otherwise, if socket options change during a listener update the update will be rejected + // to make it clear that the options were not updated. repeated core.v3.SocketOption socket_options = 13; // Whether the listener should accept TCP Fast Open (TFO) connections. diff --git a/src/main/proto/envoy/config/listener/v3/listener_components.proto b/src/main/proto/envoy/config/listener/v3/listener_components.proto index 33eb349..2adb8bc 100644 --- a/src/main/proto/envoy/config/listener/v3/listener_components.proto +++ b/src/main/proto/envoy/config/listener/v3/listener_components.proto @@ -201,9 +201,24 @@ message FilterChainMatch { message FilterChain { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.FilterChain"; - reserved 2, 8; + // The configuration for on-demand filter chain. If this field is not empty in FilterChain message, + // a filter chain will be built on-demand. + // On-demand filter chains help speedup the warming up of listeners since the building and initialization of + // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. + // Filter chains that are not often used can be set as on-demand. + message OnDemandConfiguration { + // The timeout to wait for filter chain placeholders to complete rebuilding. + // 1. If this field is set to 0, timeout is disabled. + // 2. If not specified, a default timeout of 15s is used. + // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. + // Upon failure or timeout, all connections related to this filter chain will be closed. + // Rebuilding will start again on the next new connection. + google.protobuf.Duration rebuild_timeout = 1; + } + + reserved 2; - reserved "tls_context", "on_demand_configuration"; + reserved "tls_context"; // The criteria to use when matching a connection to this filter chain. FilterChainMatch filter_chain_match = 1; @@ -254,6 +269,11 @@ message FilterChain { // ` // requires that filter chains are uniquely named within a listener. string name = 7; + + // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. + // If this field is not empty, the filter chain will be built on-demand. + // Otherwise, the filter chain will be built normally and block listener warming. + OnDemandConfiguration on_demand_configuration = 8; } // Listener filter chain match configuration. This is a recursive structure which allows complex diff --git a/src/main/proto/envoy/config/listener/v3/quic_config.proto b/src/main/proto/envoy/config/listener/v3/quic_config.proto index 6c0a5bd..3ddebe9 100644 --- a/src/main/proto/envoy/config/listener/v3/quic_config.proto +++ b/src/main/proto/envoy/config/listener/v3/quic_config.proto @@ -5,7 +5,6 @@ package envoy.config.listener.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/protocol.proto"; -import "envoy/config/core/v3/socket_cmsg_headers.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; @@ -25,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: QUIC listener config] // Configuration specific to the UDP QUIC listener. -// [#next-free-field: 14] +// [#next-free-field: 12] message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.QuicProtocolOptions"; @@ -87,16 +86,4 @@ message QuicProtocolOptions { // If not specified, no debug visitor will be attached to connections. // [#extension-category: envoy.quic.connection_debug_visitor] core.v3.TypedExtensionConfig connection_debug_visitor_config = 11; - - // Configure a type of UDP cmsg to pass to listener filters via QuicReceivedPacket. - // Both level and type must be specified for cmsg to be saved. - // Cmsg may be truncated or omitted if expected size is not set. - // If not specified, no cmsg will be saved to QuicReceivedPacket. - repeated core.v3.SocketCmsgHeaders save_cmsg_config = 12 - [(validate.rules).repeated = {max_items: 1}]; - - // If true, the listener will reject connection-establishing packets at the - // QUIC layer by replying with an empty version negotiation packet to the - // client. - bool reject_new_connections = 13; } diff --git a/src/main/proto/envoy/config/overload/v3/overload.proto b/src/main/proto/envoy/config/overload/v3/overload.proto index 1f267c1..d3b8b01 100644 --- a/src/main/proto/envoy/config/overload/v3/overload.proto +++ b/src/main/proto/envoy/config/overload/v3/overload.proto @@ -103,12 +103,6 @@ message ScaleTimersOverloadActionConfig { // This affects the value of // :ref:`FilterChain.transport_socket_connect_timeout `. TRANSPORT_SOCKET_CONNECT = 3; - - // Adjusts the max connection duration timer for downstream HTTP connections. - // This affects the value of - // :ref:`HttpConnectionManager.common_http_protocol_options.max_connection_duration - // `. - HTTP_DOWNSTREAM_CONNECTION_MAX = 4; } message ScaleTimer { diff --git a/src/main/proto/envoy/config/rbac/v3/rbac.proto b/src/main/proto/envoy/config/rbac/v3/rbac.proto index 0f17788..8d98fd7 100644 --- a/src/main/proto/envoy/config/rbac/v3/rbac.proto +++ b/src/main/proto/envoy/config/rbac/v3/rbac.proto @@ -28,14 +28,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Role Based Access Control (RBAC)] -enum MetadataSource { - // Query :ref:`dynamic metadata ` - DYNAMIC = 0; - - // Query :ref:`route metadata ` - ROUTE = 1; -} - // Role Based Access Control (RBAC) provides service-level and method-level access control for a // service. Requests are allowed or denied based on the ``action`` and whether a matching policy is // found. For instance, if the action is ALLOW and a matching policy is found the request should be @@ -201,29 +193,8 @@ message Policy { [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; } -// SourcedMetadata enables matching against metadata from different sources in the request processing -// pipeline. It extends the base MetadataMatcher functionality by allowing specification of where the -// metadata should be sourced from, rather than only matching against dynamic metadata. -// -// The matcher can be configured to look up metadata from: -// -// * Dynamic metadata: Runtime metadata added by filters during request processing -// * Route metadata: Static metadata configured on the route entry -// -message SourcedMetadata { - // Metadata matcher configuration that defines what metadata to match against. This includes the filter name, - // metadata key path, and expected value. - type.matcher.v3.MetadataMatcher metadata_matcher = 1 - [(validate.rules).message = {required: true}]; - - // Specifies which metadata source should be used for matching. If not set, - // defaults to DYNAMIC (dynamic metadata). Set to ROUTE to match against - // static metadata configured on the route entry. - MetadataSource metadata_source = 2 [(validate.rules).enum = {defined_only: true}]; -} - // Permission defines an action (or actions) that a principal can take. -// [#next-free-field: 15] +// [#next-free-field: 14] message Permission { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission"; @@ -266,10 +237,8 @@ message Permission { // A port number range that describes a range of destination ports connecting to. type.v3.Int32Range destination_port_range = 11; - // Metadata that describes additional information about the action. This field is deprecated; please use - // :ref:`sourced_metadata` instead. - type.matcher.v3.MetadataMatcher metadata = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + // Metadata that describes additional information about the action. + type.matcher.v3.MetadataMatcher metadata = 7; // Negates matching the provided permission. For instance, if the value of // ``not_rule`` would match, this permission would not match. Conversely, if @@ -305,16 +274,12 @@ message Permission { // URI template path matching. // [#extension-category: envoy.path.match] core.v3.TypedExtensionConfig uri_template = 13; - - // Matches against metadata from either dynamic state or route configuration. Preferred over the - // ``metadata`` field as it provides more flexibility in metadata source selection. - SourcedMetadata sourced_metadata = 14; } } // Principal defines an identity or a group of identities for a downstream // subject. -// [#next-free-field: 15] +// [#next-free-field: 13] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; @@ -328,10 +293,6 @@ message Principal { } // Authentication attributes for a downstream. - // It is recommended to NOT use this type, but instead use - // :ref:`MTlsAuthenticated `, - // configured via :ref:`custom `, - // which should be used for most use cases due to its improved security. message Authenticated { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal.Authenticated"; @@ -340,11 +301,7 @@ message Principal { // The name of the principal. If set, The URI SAN or DNS SAN in that order // is used from the certificate, otherwise the subject field is used. If - // unset, it applies to any user that is allowed by the downstream TLS configuration. - // If :ref:`require_client_certificate ` - // is false or :ref:`trust_chain_verification ` - // is set to :ref:`ACCEPT_UNTRUSTED `, - // then no authentication is required. + // unset, it applies to any user that is authenticated. type.matcher.v3.StringMatcher principal_name = 2; } @@ -363,10 +320,6 @@ message Principal { bool any = 3 [(validate.rules).bool = {const: true}]; // Authenticated attributes that identify the downstream. - // It is recommended to NOT use this field, but instead use - // :ref:`MTlsAuthenticated `, - // configured via :ref:`custom `, - // which should be used for most use cases due to its improved security. Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. @@ -403,10 +356,8 @@ message Principal { // A URL path on the incoming HTTP request. Only available for HTTP. type.matcher.v3.PathMatcher url_path = 9; - // Metadata that describes additional information about the principal. This field is deprecated; please use - // :ref:`sourced_metadata` instead. - type.matcher.v3.MetadataMatcher metadata = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + // Metadata that describes additional information about the principal. + type.matcher.v3.MetadataMatcher metadata = 7; // Identifies the principal using a filter state object. type.matcher.v3.FilterStateMatcher filter_state = 12; @@ -415,14 +366,6 @@ message Principal { // ``not_id`` would match, this principal would not match. Conversely, if the // value of ``not_id`` would not match, this principal would match. Principal not_id = 8; - - // Matches against metadata from either dynamic state or route configuration. Preferred over the - // ``metadata`` field as it provides more flexibility in metadata source selection. - SourcedMetadata sourced_metadata = 13; - - // Extension for configuring custom principals for RBAC. - // [#extension-category: envoy.rbac.principals] - core.v3.TypedExtensionConfig custom = 14; } } diff --git a/src/main/proto/envoy/config/route/v3/route_components.proto b/src/main/proto/envoy/config/route/v3/route_components.proto index b12d510..7e2ff33 100644 --- a/src/main/proto/envoy/config/route/v3/route_components.proto +++ b/src/main/proto/envoy/config/route/v3/route_components.proto @@ -5,7 +5,6 @@ package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; -import "envoy/type/matcher/v3/filter_state.proto"; import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; @@ -18,6 +17,7 @@ import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; +import "xds/annotations/v3/status.proto"; import "xds/type/matcher/v3/matcher.proto"; import "envoy/annotations/deprecation.proto"; @@ -92,12 +92,13 @@ message VirtualHost { // The list of routes that will be matched, in order, for incoming requests. // The first route that matches will be used. // Only one of this and ``matcher`` can be specified. - repeated Route routes = 3 [(udpa.annotations.field_migrate).oneof_promotion = "route_selection"]; + repeated Route routes = 3; + // [#next-major-version: This should be included in a oneof with routes wrapped in a message.] // The match tree to use when resolving route actions for incoming requests. Only one of this and ``routes`` // can be specified. xds.type.matcher.v3.Matcher matcher = 21 - [(udpa.annotations.field_migrate).oneof_promotion = "route_selection"]; + [(xds.annotations.v3.field_status).work_in_progress = true]; // Specifies the type of TLS enforcement the virtual host expects. If this option is not // specified, there is no TLS requirement for the virtual host. @@ -511,7 +512,7 @@ message ClusterSpecifierPlugin { bool is_optional = 2; } -// [#next-free-field: 17] +// [#next-free-field: 16] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; @@ -662,12 +663,6 @@ message RouteMatch { // If the number of specified dynamic metadata matchers is nonzero, they all must match the // dynamic metadata for a match to occur. repeated type.matcher.v3.MetadataMatcher dynamic_metadata = 13; - - // Specifies a set of filter state matchers on which the route should match. - // The router will check the filter state against all the specified filter state matchers. - // If the number of specified filter state matchers is nonzero, they all must match the - // filter state for a match to occur. - repeated type.matcher.v3.FilterStateMatcher filter_state = 16; } // Cors policy configuration. @@ -822,10 +817,7 @@ message RouteAction { // value, the request will be mirrored. core.v3.RuntimeFractionalPercent runtime_fraction = 3; - // Specifies whether the trace span for the shadow request should be sampled. If this field is not explicitly set, - // the shadow request will inherit the sampling decision of its parent span. This ensures consistency with the trace - // sampling policy of the original request and prevents oversampling, especially in scenarios where runtime sampling - // is disabled. + // Determines if the trace span should be sampled. Defaults to true. google.protobuf.BoolValue trace_sampled = 4; // Disables appending the ``-shadow`` suffix to the shadowed ``Host`` header. Defaults to ``false``. @@ -1878,11 +1870,10 @@ message VirtualCluster { // Global rate limiting :ref:`architecture overview `. // Also applies to Local rate limiting :ref:`using descriptors `. -// [#next-free-field: 7] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 13] + // [#next-free-field: 12] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -1939,48 +1930,9 @@ message RateLimit { // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; - // Controls the behavior when the specified header is not present in the request. - // - // If set to ``false`` (default): - // - // * Envoy does **NOT** call the rate limiting service for this descriptor. - // * Useful if the header is optional and you prefer to skip rate limiting when it's absent. - // - // If set to ``true``: - // - // * Envoy calls the rate limiting service but omits this descriptor if the header is missing. - // * Useful if you want Envoy to enforce rate limiting even when the header is not present. - // - bool skip_if_absent = 3; - } - - // The following descriptor entry is appended when a query parameter contains a key that matches the - // ``query_parameter_name``: - // - // .. code-block:: cpp - // - // ("", "") - message QueryParameters { - // The name of the query parameter to use for rate limiting. Value of this query parameter is used to populate - // the value of the descriptor entry for the descriptor_key. - string query_parameter_name = 1 [(validate.rules).string = {min_len: 1}]; - - // The key to use when creating the rate limit descriptor entry. his descriptor key will be used to identify the - // rate limit rule in the rate limiting service. - string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; - - // Controls the behavior when the specified query parameter is not present in the request. - // - // If set to ``false`` (default): - // - // * Envoy does **NOT** call the rate limiting service for this descriptor. - // * Useful if the query parameter is optional and you prefer to skip rate limiting when it's absent. - // - // If set to ``true``: - // - // * Envoy calls the rate limiting service but omits this descriptor if the query parameter is missing. - // * Useful if you want Envoy to enforce rate limiting even when the query parameter is not present. - // + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. bool skip_if_absent = 3; } @@ -2115,19 +2067,9 @@ message RateLimit { // Source of metadata Source source = 4 [(validate.rules).enum = {defined_only: true}]; - // Controls the behavior when the specified ``metadata_key`` is empty and ``default_value`` is not set. - // - // If set to ``false`` (default): - // - // * Envoy does **NOT** call the rate limiting service for this descriptor. - // * Useful if the metadata is optional and you prefer to skip rate limiting when it's absent. - // - // If set to ``true``: - // - // * Envoy calls the rate limiting service but omits this descriptor if the ``metadata_key`` is empty and - // ``default_value`` is missing. - // * Useful if you want Envoy to enforce rate limiting even when the metadata is not present. - // + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when ``metadata_key`` is empty and ``default_value`` is not set. By default it skips calling the + // rate limiting service in that case. bool skip_if_absent = 5; } @@ -2170,9 +2112,6 @@ message RateLimit { // Rate limit on request headers. RequestHeaders request_headers = 3; - // Rate limit on query parameters. - QueryParameters query_parameters = 12; - // Rate limit on remote address. RemoteAddress remote_address = 4; @@ -2231,33 +2170,6 @@ message RateLimit { } } - message HitsAddend { - // Fixed number of hits to add to the rate limit descriptor. - // - // One of the ``number`` or ``format`` fields should be set but not both. - google.protobuf.UInt64Value number = 1 [(validate.rules).uint64 = {lte: 1000000000}]; - - // Substitution format string to extract the number of hits to add to the rate limit descriptor. - // The same :ref:`format specifier ` as used for - // :ref:`HTTP access logging ` applies here. - // - // .. note:: - // - // The format string must contains only single valid substitution field. If the format string - // not meets the requirement, the configuration will be rejected. - // - // The substitution field should generates a non-negative number or string representation of - // a non-negative number. The value of the non-negative number should be less than or equal - // to 1000000000 like the ``number`` field. If the output of the substitution field not meet - // the requirement, this will be treated as an error and the current descriptor will be ignored. - // - // For example, the ``%BYTES_RECEIVED%`` format string will be replaced with the number of bytes - // received in the request. - // - // One of the ``number`` or ``format`` fields should be set but not both. - string format = 2 [(validate.rules).string = {prefix: "%" suffix: "%" ignore_empty: true}]; - } - // Refers to the stage set in the filter. The rate limit configuration only // applies to filters with the same stage number. The default stage number is // 0. @@ -2265,19 +2177,9 @@ message RateLimit { // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. - // - // .. note:: - // This is not supported if the rate limit action is configured in the ``typed_per_filter_config`` like - // :ref:`VirtualHost.typed_per_filter_config` or - // :ref:`Route.typed_per_filter_config`, etc. google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; // The key to be set in runtime to disable this rate limit configuration. - // - // .. note:: - // This is not supported if the rate limit action is configured in the ``typed_per_filter_config`` like - // :ref:`VirtualHost.typed_per_filter_config` or - // :ref:`Route.typed_per_filter_config`, etc. string disable_key = 2; // A list of actions that are to be applied for this rate limit configuration. @@ -2292,38 +2194,7 @@ message RateLimit { // rate limit configuration. If the override value is invalid or cannot be resolved // from metadata, no override is provided. See :ref:`rate limit override // ` for more information. - // - // .. note:: - // This is not supported if the rate limit action is configured in the ``typed_per_filter_config`` like - // :ref:`VirtualHost.typed_per_filter_config` or - // :ref:`Route.typed_per_filter_config`, etc. Override limit = 4; - - // An optional hits addend to be appended to the descriptor produced by this rate limit - // configuration. - // - // .. note:: - // This is only supported if the rate limit action is configured in the ``typed_per_filter_config`` like - // :ref:`VirtualHost.typed_per_filter_config` or - // :ref:`Route.typed_per_filter_config`, etc. - HitsAddend hits_addend = 5; - - // If true, the rate limit request will be applied when the stream completes. The default value is false. - // This is useful when the rate limit budget needs to reflect the response context that is not available - // on the request path. - // - // For example, let's say the upstream service calculates the usage statistics and returns them in the response body - // and we want to utilize these numbers to apply the rate limit action for the subsequent requests. - // Combined with another filter that can set the desired addend based on the response (e.g. Lua filter), - // this can be used to subtract the usage statistics from the rate limit budget. - // - // A rate limit applied on the stream completion is "fire-and-forget" by nature, and rate limit is not enforced by this config. - // In other words, the current request won't be blocked when this is true, but the budget will be updated for the subsequent - // requests based on the action with this field set to true. Users should ensure that the rate limit is enforced by the actions - // applied on the request path, i.e. the ones with this field set to false. - // - // Currently, this is only supported by the HTTP global rate filter. - bool apply_on_stream_done = 6; } // .. attention:: diff --git a/src/main/proto/envoy/config/trace/v2/http_tracer.proto b/src/main/proto/envoy/config/trace/v2/http_tracer.proto index 23fec5a..778b9e7 100644 --- a/src/main/proto/envoy/config/trace/v2/http_tracer.proto +++ b/src/main/proto/envoy/config/trace/v2/http_tracer.proto @@ -41,6 +41,7 @@ message Tracing { // - *envoy.tracers.zipkin* // - *envoy.tracers.dynamic_ot* // - *envoy.tracers.datadog* + // - *envoy.tracers.opencensus* // - *envoy.tracers.xray* string name = 1 [(validate.rules).string = {min_bytes: 1}]; @@ -51,6 +52,7 @@ message Tracing { // - :ref:`ZipkinConfig ` // - :ref:`DynamicOtConfig ` // - :ref:`DatadogConfig ` + // - :ref:`OpenCensusConfig ` // - :ref:`AWS X-Ray ` oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; diff --git a/src/main/proto/envoy/config/trace/v2/opencensus.proto b/src/main/proto/envoy/config/trace/v2/opencensus.proto new file mode 100644 index 0000000..595f4fe --- /dev/null +++ b/src/main/proto/envoy/config/trace/v2/opencensus.proto @@ -0,0 +1,93 @@ +syntax = "proto3"; + +package envoy.config.trace.v2; + +import "envoy/api/v2/core/grpc_service.proto"; + +import "opencensus/proto/trace/v1/trace_config.proto"; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.config.trace.v2"; +option java_outer_classname = "OpencensusProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/trace/v2;tracev2"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: OpenCensus tracer] + +// Configuration for the OpenCensus tracer. +// [#next-free-field: 15] +// [#extension: envoy.tracers.opencensus] +message OpenCensusConfig { + enum TraceContext { + // No-op default, no trace context is utilized. + NONE = 0; + + // W3C Trace-Context format "traceparent:" header. + TRACE_CONTEXT = 1; + + // Binary "grpc-trace-bin:" header. + GRPC_TRACE_BIN = 2; + + // "X-Cloud-Trace-Context:" header. + CLOUD_TRACE_CONTEXT = 3; + + // X-B3-* headers. + B3 = 4; + } + + reserved 7; + + // Configures tracing, e.g. the sampler, max number of annotations, etc. + opencensus.proto.trace.v1.TraceConfig trace_config = 1; + + // Enables the stdout exporter if set to true. This is intended for debugging + // purposes. + bool stdout_exporter_enabled = 2; + + // Enables the Stackdriver exporter if set to true. The project_id must also + // be set. + bool stackdriver_exporter_enabled = 3; + + // The Cloud project_id to use for Stackdriver tracing. + string stackdriver_project_id = 4; + + // (optional) By default, the Stackdriver exporter will connect to production + // Stackdriver. If stackdriver_address is non-empty, it will instead connect + // to this address, which is in the gRPC format: + // https://github.com/grpc/grpc/blob/master/doc/naming.md + string stackdriver_address = 10; + + // (optional) The gRPC server that hosts Stackdriver tracing service. Only + // Google gRPC is supported. If :ref:`target_uri ` + // is not provided, the default production Stackdriver address will be used. + api.v2.core.GrpcService stackdriver_grpc_service = 13; + + // Enables the Zipkin exporter if set to true. The url and service name must + // also be set. + bool zipkin_exporter_enabled = 5; + + // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans" + string zipkin_url = 6; + + // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or + // ocagent_grpc_service must also be set. + bool ocagent_exporter_enabled = 11; + + // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC + // format: https://github.com/grpc/grpc/blob/master/doc/naming.md + // [#comment:TODO: deprecate this field] + string ocagent_address = 12; + + // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. + // This is only used if the ocagent_address is left empty. + api.v2.core.GrpcService ocagent_grpc_service = 14; + + // List of incoming trace context headers we will accept. First one found + // wins. + repeated TraceContext incoming_trace_context = 8; + + // List of outgoing trace context headers we will produce. + repeated TraceContext outgoing_trace_context = 9; +} diff --git a/src/main/proto/envoy/config/trace/v2/trace.proto b/src/main/proto/envoy/config/trace/v2/trace.proto index 479da8b..02d6fa2 100644 --- a/src/main/proto/envoy/config/trace/v2/trace.proto +++ b/src/main/proto/envoy/config/trace/v2/trace.proto @@ -6,6 +6,7 @@ import public "envoy/config/trace/v2/datadog.proto"; import public "envoy/config/trace/v2/dynamic_ot.proto"; import public "envoy/config/trace/v2/http_tracer.proto"; import public "envoy/config/trace/v2/lightstep.proto"; +import public "envoy/config/trace/v2/opencensus.proto"; import public "envoy/config/trace/v2/service.proto"; import public "envoy/config/trace/v2/zipkin.proto"; diff --git a/src/main/proto/envoy/config/trace/v3/dynamic_ot.proto b/src/main/proto/envoy/config/trace/v3/dynamic_ot.proto index 40fe852..d2664ef 100644 --- a/src/main/proto/envoy/config/trace/v3/dynamic_ot.proto +++ b/src/main/proto/envoy/config/trace/v3/dynamic_ot.proto @@ -20,10 +20,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamically loadable OpenTracing tracer] -// DynamicOtConfig was used to dynamically load a tracer from a shared library +// DynamicOtConfig is used to dynamically load a tracer from a shared library // that implements the `OpenTracing dynamic loading API // `_. -// [#not-implemented-hide:] +// [#extension: envoy.tracers.dynamic_ot] message DynamicOtConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.DynamicOtConfig"; diff --git a/src/main/proto/envoy/config/trace/v3/opencensus.proto b/src/main/proto/envoy/config/trace/v3/opencensus.proto new file mode 100644 index 0000000..a3399fa --- /dev/null +++ b/src/main/proto/envoy/config/trace/v3/opencensus.proto @@ -0,0 +1,156 @@ +syntax = "proto3"; + +package envoy.config.trace.v3; + +import "envoy/config/core/v3/grpc_service.proto"; + +import "opencensus/proto/trace/v1/trace_config.proto"; + +import "envoy/annotations/deprecation.proto"; +import "udpa/annotations/migrate.proto"; +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; + +option java_package = "io.envoyproxy.envoy.config.trace.v3"; +option java_outer_classname = "OpencensusProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3;tracev3"; +option (udpa.annotations.file_migrate).move_to_package = + "envoy.extensions.tracers.opencensus.v4alpha"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: OpenCensus tracer] + +// Configuration for the OpenCensus tracer. +// [#next-free-field: 15] +// [#extension: envoy.tracers.opencensus] +message OpenCensusConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.trace.v2.OpenCensusConfig"; + + enum TraceContext { + // No-op default, no trace context is utilized. + NONE = 0; + + // W3C Trace-Context format "traceparent:" header. + TRACE_CONTEXT = 1; + + // Binary "grpc-trace-bin:" header. + GRPC_TRACE_BIN = 2; + + // "X-Cloud-Trace-Context:" header. + CLOUD_TRACE_CONTEXT = 3; + + // X-B3-* headers. + B3 = 4; + } + + reserved 7; + + // Configures tracing, e.g. the sampler, max number of annotations, etc. + opencensus.proto.trace.v1.TraceConfig trace_config = 1 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // Enables the stdout exporter if set to true. This is intended for debugging + // purposes. + bool stdout_exporter_enabled = 2 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // Enables the Stackdriver exporter if set to true. The project_id must also + // be set. + bool stackdriver_exporter_enabled = 3 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // The Cloud project_id to use for Stackdriver tracing. + string stackdriver_project_id = 4 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // (optional) By default, the Stackdriver exporter will connect to production + // Stackdriver. If stackdriver_address is non-empty, it will instead connect + // to this address, which is in the gRPC format: + // https://github.com/grpc/grpc/blob/master/doc/naming.md + string stackdriver_address = 10 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // (optional) The gRPC server that hosts Stackdriver tracing service. Only + // Google gRPC is supported. If :ref:`target_uri ` + // is not provided, the default production Stackdriver address will be used. + core.v3.GrpcService stackdriver_grpc_service = 13 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // Enables the Zipkin exporter if set to true. The url and service name must + // also be set. This is deprecated, prefer to use Envoy's :ref:`native Zipkin + // tracer `. + bool zipkin_exporter_enabled = 5 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans". This is + // deprecated, prefer to use Envoy's :ref:`native Zipkin tracer + // `. + string zipkin_url = 6 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or + // ocagent_grpc_service must also be set. + bool ocagent_exporter_enabled = 11 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC + // format: https://github.com/grpc/grpc/blob/master/doc/naming.md + // [#comment:TODO: deprecate this field] + string ocagent_address = 12 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. + // This is only used if the ocagent_address is left empty. + core.v3.GrpcService ocagent_grpc_service = 14 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // List of incoming trace context headers we will accept. First one found + // wins. + repeated TraceContext incoming_trace_context = 8 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; + + // List of outgoing trace context headers we will produce. + repeated TraceContext outgoing_trace_context = 9 [ + deprecated = true, + (envoy.annotations.deprecated_at_minor_version) = "3.0", + (envoy.annotations.disallowed_by_default) = true + ]; +} diff --git a/src/main/proto/envoy/config/trace/v3/trace.proto b/src/main/proto/envoy/config/trace/v3/trace.proto index 296c6bd..8ca4371 100644 --- a/src/main/proto/envoy/config/trace/v3/trace.proto +++ b/src/main/proto/envoy/config/trace/v3/trace.proto @@ -6,6 +6,7 @@ import public "envoy/config/trace/v3/datadog.proto"; import public "envoy/config/trace/v3/dynamic_ot.proto"; import public "envoy/config/trace/v3/http_tracer.proto"; import public "envoy/config/trace/v3/lightstep.proto"; +import public "envoy/config/trace/v3/opencensus.proto"; import public "envoy/config/trace/v3/opentelemetry.proto"; import public "envoy/config/trace/v3/service.proto"; import public "envoy/config/trace/v3/zipkin.proto"; diff --git a/src/main/proto/envoy/config/trace/v3/xray.proto b/src/main/proto/envoy/config/trace/v3/xray.proto index 3ddd7a6..223aaaa 100644 --- a/src/main/proto/envoy/config/trace/v3/xray.proto +++ b/src/main/proto/envoy/config/trace/v3/xray.proto @@ -21,65 +21,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AWS X-Ray Tracer Configuration] // Configuration for AWS X-Ray tracer -// -// The X-Ray tracer will automatically attach :ref:`custom_tags ` as annotations to the span. (See: `Annotations `_.) -// -// AWS X-Ray trace annotations are also created by the tracing subsystem automatically based on metadata provided during creation of a span. -// -// An example X-Ray trace span that is generated by the envoy trace subsystem is as follows: -// -// .. code-block:: json -// -// { -// "Id": "1-6698980d-e829ae270ab34b69b488b098", -// "Duration": 0.016, -// "LimitExceeded": false, -// "Segments": -// [ -// { -// "Id": "15d65e5ced8dfe76", -// "Document": -// { -// "id": "15d65e5ced8dfe76", -// "name": "envoy-example", -// "start_time": 1721276429.410355, -// "trace_id": "1-6698980d-e829ae270ab34b69b488b098", -// "end_time": 1721276429.426068, -// "fault": true, -// "http": -// { -// "request": -// { -// "url": "http://example/path", -// "method": "GET", -// "user_agent": "curl/8.5.0", -// "client_ip": "127.0.0.1", -// "x_forwarded_for": false -// }, -// "response": -// { -// "status": 503, -// "content_length": 216 -// } -// }, -// "aws": {}, -// "annotations": -// { -// "response_flags": "UF", -// "component": "proxy", -// "upstream_cluster": "upstream_cluster", -// "annotation_from_custom_tag": "example", -// "http.protocol": "HTTP/1.1", -// "request_size": "0", -// "downstream_cluster": "-", -// "direction": "ingress", -// "upstream_cluster.name": "upstream_cluster" -// } -// } -// } -// ] -// } -// // [#extension: envoy.tracers.xray] message XRayConfig { diff --git a/src/main/proto/envoy/data/accesslog/v3/accesslog.proto b/src/main/proto/envoy/data/accesslog/v3/accesslog.proto index da029b7..2e02f1e 100644 --- a/src/main/proto/envoy/data/accesslog/v3/accesslog.proto +++ b/src/main/proto/envoy/data/accesslog/v3/accesslog.proto @@ -109,16 +109,14 @@ message AccessLogCommon { double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; // This field is the remote/origin address on which the request from the user was received. - // - // .. note:: - // This may not be the actual peer address. For example, it might be derived from headers like ``x-forwarded-for``, - // the proxy protocol, or similar sources. + // Note: This may not be the physical peer. E.g, if the remote address is inferred from for + // example the x-forwarder-for header, proxy protocol, etc. config.core.v3.Address downstream_remote_address = 2; // This field is the local/destination address on which the request from the user was received. config.core.v3.Address downstream_local_address = 3; - // If the connection is secure, this field will contain TLS properties. + // If the connection is secure,S this field will contain TLS properties. TLSProperties tls_properties = 4; // The time that Envoy started servicing this request. This is effectively the time that the first @@ -130,7 +128,7 @@ message AccessLogCommon { google.protobuf.Duration time_to_last_rx_byte = 6; // Interval between the first downstream byte received and the first upstream byte sent. There may - // be considerable delta between ``time_to_last_rx_byte`` and this value due to filters. + // by considerable delta between ``time_to_last_rx_byte`` and this value due to filters. // Additionally, the same caveats apply as documented in ``time_to_last_downstream_tx_byte`` about // not accounting for kernel socket buffer time, etc. google.protobuf.Duration time_to_first_upstream_tx_byte = 7; @@ -189,7 +187,7 @@ message AccessLogCommon { // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the // failure reason from the transport socket. The format of this field depends on the configured // upstream transport socket. Common TLS failures are in - // :ref:`TLS troubleshooting `. + // :ref:`TLS trouble shooting `. string upstream_transport_failure_reason = 18; // The name of the route @@ -206,7 +204,7 @@ message AccessLogCommon { map filter_state_objects = 21; // A list of custom tags, which annotate logs with additional information. - // To configure this value, see the documentation for + // To configure this value, users should configure // :ref:`custom_tags `. map custom_tags = 22; @@ -227,41 +225,40 @@ message AccessLogCommon { // This could be any format string that could be used to identify one stream. string stream_id = 26; - // Indicates whether this log entry is the final entry (flushed after the stream completed) or an intermediate entry - // (flushed periodically during the stream). - // - // For long-lived streams (e.g., TCP connections or long-lived HTTP/2 streams), there may be multiple intermediate - // entries and only one final entry. - // - // If needed, a unique identifier (see :ref:`stream_id `) - // can be used to correlate all intermediate and final log entries for the same stream. + // If this log entry is final log entry that flushed after the stream completed or + // intermediate log entry that flushed periodically during the stream. + // There may be multiple intermediate log entries and only one final log entry for each + // long-live stream (TCP connection, long-live HTTP2 stream). + // And if it is necessary, unique ID or identifier can be added to the log entry + // :ref:`stream_id ` to + // correlate all these intermediate log entries and final log entry. // // .. attention:: // - // This field is deprecated in favor of ``access_log_type``, which provides a clearer indication of the log entry - // type. + // This field is deprecated in favor of ``access_log_type`` for better indication of the + // type of the access log record. bool intermediate_log_entry = 27 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // If downstream connection in listener failed due to transport socket (e.g. TLS handshake), provides the // failure reason from the transport socket. The format of this field depends on the configured downstream - // transport socket. Common TLS failures are in :ref:`TLS troubleshooting `. + // transport socket. Common TLS failures are in :ref:`TLS trouble shooting `. string downstream_transport_failure_reason = 28; // For HTTP: Total number of bytes sent to the downstream by the http stream. - // For TCP: Total number of bytes sent to the downstream by the :ref:`TCP Proxy `. + // For TCP: Total number of bytes sent to the downstream by the tcp proxy. uint64 downstream_wire_bytes_sent = 29; // For HTTP: Total number of bytes received from the downstream by the http stream. Envoy over counts sizes of received HTTP/1.1 pipelined requests by adding up bytes of requests in the pipeline to the one currently being processed. - // For TCP: Total number of bytes received from the downstream by the :ref:`TCP Proxy `. + // For TCP: Total number of bytes received from the downstream by the tcp proxy. uint64 downstream_wire_bytes_received = 30; // For HTTP: Total number of bytes sent to the upstream by the http stream. This value accumulates during upstream retries. - // For TCP: Total number of bytes sent to the upstream by the :ref:`TCP Proxy `. + // For TCP: Total number of bytes sent to the upstream by the tcp proxy. uint64 upstream_wire_bytes_sent = 31; // For HTTP: Total number of bytes received from the upstream by the http stream. - // For TCP: Total number of bytes sent to the upstream by the :ref:`TCP Proxy `. + // For TCP: Total number of bytes sent to the upstream by the tcp proxy. uint64 upstream_wire_bytes_received = 32; // The type of the access log, which indicates when the log was recorded. @@ -300,7 +297,7 @@ message ResponseFlags { // Indicates there was no healthy upstream. bool no_healthy_upstream = 2; - // Indicates there was an upstream request timeout. + // Indicates an there was an upstream request timeout. bool upstream_request_timeout = 3; // Indicates local codec level reset was sent on the stream. @@ -361,7 +358,7 @@ message ResponseFlags { // Indicates that a filter configuration is not available. bool no_filter_config_found = 22; - // Indicates that the request or connection exceeded the downstream connection duration. + // Indicates that request or connection exceeded the downstream connection duration. bool duration_timeout = 23; // Indicates there was an HTTP protocol error in the upstream response. @@ -483,7 +480,7 @@ message HTTPRequestProperties { // do not already have a request ID. string request_id = 9; - // Value of the ``x-envoy-original-path`` request header. + // Value of the ``X-Envoy-Original-Path`` request header. string original_path = 10; // Size of the HTTP request headers in bytes. diff --git a/src/main/proto/envoy/data/tap/v3/transport.proto b/src/main/proto/envoy/data/tap/v3/transport.proto index a89e15d..9338165 100644 --- a/src/main/proto/envoy/data/tap/v3/transport.proto +++ b/src/main/proto/envoy/data/tap/v3/transport.proto @@ -20,7 +20,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // sequences on a socket. // Event in a socket trace. -// [#next-free-field: 6] message SocketEvent { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketEvent"; @@ -66,9 +65,6 @@ message SocketEvent { Closed closed = 4; } - - // Connection information per event - Connection connection = 5; } // Sequence of read/write events that constitute a buffered trace on a socket. diff --git a/src/main/proto/envoy/extensions/clusters/common/dns/v3/dns.proto b/src/main/proto/envoy/extensions/clusters/common/dns/v3/dns.proto deleted file mode 100644 index db4e31f..0000000 --- a/src/main/proto/envoy/extensions/clusters/common/dns/v3/dns.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.common.dns.v3; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.common.dns.v3"; -option java_outer_classname = "DnsProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/common/dns/v3;dnsv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: DNS configuration for clusters] - -enum DnsLookupFamily { - UNSPECIFIED = 0; - AUTO = 1; - V4_ONLY = 2; - V6_ONLY = 3; - V4_PREFERRED = 4; - ALL = 5; -} diff --git a/src/main/proto/envoy/extensions/clusters/dns/v3/dns_cluster.proto b/src/main/proto/envoy/extensions/clusters/dns/v3/dns_cluster.proto deleted file mode 100644 index 4266541..0000000 --- a/src/main/proto/envoy/extensions/clusters/dns/v3/dns_cluster.proto +++ /dev/null @@ -1,92 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.clusters.dns.v3; - -import "envoy/config/core/v3/extension.proto"; -import "envoy/extensions/clusters/common/dns/v3/dns.proto"; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.clusters.dns.v3"; -option java_outer_classname = "DnsClusterProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/dns/v3;dnsv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: DNS cluster configuration] - -// Configuration for DNS discovery clusters. -// [#extension: envoy.clusters.dns] - -// [#next-free-field: 10] -message DnsCluster { - message RefreshRate { - // Specifies the base interval between refreshes. This parameter is required and must be greater - // than zero and less than - // :ref:`max_interval `. - google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { - required: true - gt {nanos: 1000000} - }]; - - // Specifies the maximum interval between refreshes. This parameter is optional, but must be - // greater than or equal to the - // :ref:`base_interval ` if set. The default - // is 10 times the :ref:`base_interval `. - google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; - } - - // This value is the cluster’s DNS refresh rate. The value configured must be at least 1ms. - // If this setting is not specified, the - // value defaults to 5000ms. - google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gt {nanos: 1000000}}]; - - // This is the cluster’s DNS refresh rate when requests are failing. If this setting is - // not specified, the failure refresh rate defaults to the DNS refresh rate. - RefreshRate dns_failure_refresh_rate = 4; - - // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, - // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS - // resolution. - bool respect_dns_ttl = 5; - - // DNS jitter causes the cluster to refresh DNS entries later by a random amount of time to avoid a - // stampede of DNS requests. This value sets the upper bound (exclusive) for the random amount. - // There will be no jitter if this value is omitted. - google.protobuf.Duration dns_jitter = 6 [(validate.rules).duration = {gte {}}]; - - // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple, - // or any other DNS resolver types and the related parameters. - // For example, an object of - // :ref:`CaresDnsResolverConfig` - // can be packed into this ``typed_dns_resolver_config``. This configuration replaces the - // :ref:`Cluster.typed_dns_resolver_config` - // configuration which replaces :ref:`Cluster.dns_resolution_config`. - // During the transition period when - // :ref:`DnsCluster.typed_dns_resolver_config`, - // :ref:`Cluster.typed_dns_resolver_config`, - // and :ref:`Cluster.dns_resolution_config` - // exist, Envoy will use - // :ref:`DnsCluster.typed_dns_resolver_config` - // and ignore - // DNS resolver-related fields in :ref:`Cluster` if the cluster is configured via the - // :ref:`Cluster.cluster_type` extension point with the - // :ref:`DnsCluster` extension type. - // Otherwise, see :ref:`Cluster.typed_dns_resolver_config`. - // [#extension-category: envoy.network.dns_resolver] - config.core.v3.TypedExtensionConfig typed_dns_resolver_config = 7; - - // The DNS IP address resolution policy. If this setting is not specified, the - // value defaults to - // :ref:`AUTO`. - common.dns.v3.DnsLookupFamily dns_lookup_family = 8; - - // If true, all returned addresses are considered to be associated with a single endpoint, - // which maps to :ref:`logical DNS discovery ` - // semantics. Otherwise, each address is considered to be a separate endpoint, which maps to - // :ref:`strict DNS discovery ` semantics. - bool all_addresses_in_single_endpoint = 9; -} diff --git a/src/main/proto/envoy/extensions/clusters/redis/v3/redis_cluster.proto b/src/main/proto/envoy/extensions/clusters/redis/v3/redis_cluster.proto index ba8f434..88e4e89 100644 --- a/src/main/proto/envoy/extensions/clusters/redis/v3/redis_cluster.proto +++ b/src/main/proto/envoy/extensions/clusters/redis/v3/redis_cluster.proto @@ -44,14 +44,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // address: foo.bar.com // port_value: 22120 // cluster_type: -// name: envoy.clusters.redis -// typed_config: -// "@type": type.googleapis.com/google.protobuf.Struct -// value: -// cluster_refresh_rate: 30s -// cluster_refresh_timeout: 0.5s -// redirect_refresh_interval: 10s -// redirect_refresh_threshold: 10 +// name: envoy.clusters.redis +// typed_config: +// "@type": type.googleapis.com/google.protobuf.Struct +// value: +// cluster_refresh_rate: 30s +// cluster_refresh_timeout: 0.5s +// redirect_refresh_interval: 10s +// redirect_refresh_threshold: 10 // [#extension: envoy.clusters.redis] // [#next-free-field: 7] diff --git a/src/main/proto/envoy/extensions/common/aws/v3/credential_provider.proto b/src/main/proto/envoy/extensions/common/aws/v3/credential_provider.proto deleted file mode 100644 index 722e9b3..0000000 --- a/src/main/proto/envoy/extensions/common/aws/v3/credential_provider.proto +++ /dev/null @@ -1,79 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.common.aws.v3; - -import "envoy/config/core/v3/base.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.common.aws.v3"; -option java_outer_classname = "CredentialProviderProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/common/aws/v3;awsv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: AWS common configuration] - -// Configuration for AWS credential provider. This is optional and the credentials are normally -// retrieved from the environment or AWS configuration files by following the default credential -// provider chain. However, this configuration can be used to override the default behavior. -message AwsCredentialProvider { - // The option to use `AssumeRoleWithWebIdentity `_. - AssumeRoleWithWebIdentityCredentialProvider assume_role_with_web_identity_provider = 1; - - // The option to use an inline credential. If inline credential is provided, no chain will be created and only the inline credential will be used. - InlineCredentialProvider inline_credential = 2; - - // The option to specify parameters for credential retrieval from an envoy data source, such as a file in AWS credential format. - CredentialsFileCredentialProvider credentials_file_provider = 3; - - // Create a custom credential provider chain instead of the default credential provider chain. - // If set to TRUE, the credential provider chain that is created contains only those set in this credential provider message. - // If set to FALSE, the settings provided here will act as modifiers to the default credential provider chain. - // Defaults to FALSE. - // - // This has no effect if inline_credential is provided. - bool custom_credential_provider_chain = 4; -} - -// Configuration to use an inline AWS credential. This is an equivalent to setting the well-known -// environment variables ``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and the optional ``AWS_SESSION_TOKEN``. -message InlineCredentialProvider { - // The AWS access key ID. - string access_key_id = 1 [(validate.rules).string = {min_len: 1}]; - - // The AWS secret access key. - string secret_access_key = 2 - [(validate.rules).string = {min_len: 1}, (udpa.annotations.sensitive) = true]; - - // The AWS session token. This is optional. - string session_token = 3 [(udpa.annotations.sensitive) = true]; -} - -// Configuration to use `AssumeRoleWithWebIdentity `_ -// to retrieve AWS credentials. -message AssumeRoleWithWebIdentityCredentialProvider { - // Data source for a web identity token that is provided by the identity provider to assume the role. - // When using this data source, even if a ``watched_directory`` is provided, the token file will only be re-read when the credentials - // returned from AssumeRoleWithWebIdentity expire. - config.core.v3.DataSource web_identity_token_data_source = 1 - [(udpa.annotations.sensitive) = true]; - - // The ARN of the role to assume. - string role_arn = 2 [(validate.rules).string = {min_len: 1}]; - - // Optional role session name to use in AssumeRoleWithWebIdentity API call. - string role_session_name = 3; -} - -message CredentialsFileCredentialProvider { - // Data source from which to retrieve AWS credentials - // When using this data source, if a ``watched_directory`` is provided, the credential file will be re-read when a file move is detected. - // See :ref:`watched_directory ` for more information about the ``watched_directory`` field. - config.core.v3.DataSource credentials_data_source = 1 [(udpa.annotations.sensitive) = true]; - - // The profile within the credentials_file data source. If not provided, the default profile will be used. - string profile = 2; -} diff --git a/src/main/proto/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto b/src/main/proto/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto index cbd2309..eae3b8f 100644 --- a/src/main/proto/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto +++ b/src/main/proto/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto @@ -33,7 +33,7 @@ message DnsCacheCircuitBreakers { // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. -// [#next-free-field: 16] +// [#next-free-field: 15] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; @@ -94,12 +94,6 @@ message DnsCacheConfig { // value depending on timing. This is similar to how other circuit breakers work. google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; - // Disable the DNS refresh on failure. If this field is set to true, it will ignore the - // :ref:`typed_dns_resolver_config `. - // If not specified, it defaults to false. By enabling this feature, the failed hosts will now be treated as a cache miss, - // allowing the failed hosts to be resolved on demand. - bool disable_dns_refresh_on_failure = 15; - // If the DNS failure refresh rate is specified, // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is // not specified, the failure refresh rate defaults to the dns_refresh_rate. @@ -142,9 +136,8 @@ message DnsCacheConfig { // The timeout used for DNS queries. This timeout is independent of any timeout and retry policy // used by the underlying DNS implementation (e.g., c-areas and Apple DNS) which are opaque. // Setting this timeout will ensure that queries succeed or fail within the specified time frame - // and are then retried using the standard refresh rates. Setting it to 0 will disable the Envoy DNS - // query timeout and use the underlying DNS implementation timeout. Defaults to 5s if not set. - google.protobuf.Duration dns_query_timeout = 11 [(validate.rules).duration = {gte {}}]; + // and are then retried using the standard refresh rates. Defaults to 5s if not set. + google.protobuf.Duration dns_query_timeout = 11 [(validate.rules).duration = {gt {}}]; // Configuration to flush the DNS cache to long term storage. config.common.key_value.v3.KeyValueStoreConfig key_value_config = 13; diff --git a/src/main/proto/envoy/extensions/common/ratelimit/v3/ratelimit.proto b/src/main/proto/envoy/extensions/common/ratelimit/v3/ratelimit.proto index f9cba6d..73d729a 100644 --- a/src/main/proto/envoy/extensions/common/ratelimit/v3/ratelimit.proto +++ b/src/main/proto/envoy/extensions/common/ratelimit/v3/ratelimit.proto @@ -5,8 +5,6 @@ package envoy.extensions.common.ratelimit.v3; import "envoy/type/v3/ratelimit_unit.proto"; import "envoy/type/v3/token_bucket.proto"; -import "google/protobuf/wrappers.proto"; - import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -103,10 +101,8 @@ message RateLimitDescriptor { // Descriptor key. string key = 1 [(validate.rules).string = {min_len: 1}]; - // Descriptor value. Blank value is treated as wildcard to create dynamic token buckets for each unique value. - // Blank Values as wild card is currently supported only with envoy server instance level HTTP local rate limiting - // and will not work if HTTP local rate limiting is enabled per connection level. - string value = 2 [(validate.rules).string = {min_len: 0}]; + // Descriptor value. + string value = 2 [(validate.rules).string = {min_len: 1}]; } // Override rate limit to apply to this descriptor instead of the limit @@ -125,19 +121,8 @@ message RateLimitDescriptor { // Optional rate limit override to supply to the ratelimit service. RateLimitOverride limit = 2; - - // Optional hits_addend for the rate limit descriptor. If set the value will override the - // request level hits_addend. - google.protobuf.UInt64Value hits_addend = 3; } -// Configuration used to enable local rate limiting. -// -// .. note:: -// The ``LocalRateLimitDescriptor`` is used to configure a local rate limit rule with a token -// bucket algorithm. The ``RateLimitDescriptor`` is used to represent a list of symbols that -// are used to match against the rate limit rule. -// message LocalRateLimitDescriptor { // Descriptor entries. repeated v3.RateLimitDescriptor.Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; diff --git a/src/main/proto/envoy/extensions/dynamic_modules/v3/dynamic_modules.proto b/src/main/proto/envoy/extensions/dynamic_modules/v3/dynamic_modules.proto deleted file mode 100644 index 3f3ec41..0000000 --- a/src/main/proto/envoy/extensions/dynamic_modules/v3/dynamic_modules.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.dynamic_modules.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.dynamic_modules.v3"; -option java_outer_classname = "DynamicModulesProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/dynamic_modules/v3;dynamic_modulesv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Dynamic Modules common configuration] - -// Configuration of a dynamic module. A dynamic module is a shared object file that can be loaded via dlopen -// by various Envoy extension points. Currently, only HTTP filter (envoy.filters.http.dynamic_modules) is supported. -// -// How a module is loaded is determined by the extension point that uses it. For example, the HTTP filter -// loads the module with dlopen when Envoy receives a configuration that references the module at load time. -// If loading the module fails, the configuration will be rejected. -// -// Whether or not the shared object is the same is determined by the file path as well as the file's inode depending -// on the platform. Notably, if the file path and the content of the file are the same, the shared object will be reused. -// -// A module must be compatible with the ABI specified in :repo:`abi.h `. -// Currently, compatibility is only guaranteed by an exact version match between the Envoy -// codebase and the dynamic module SDKs. In the future, after the ABI is stabilized, we will revisit -// this restriction and hopefully provide a wider compatibility guarantee. Until then, Envoy -// checks the hash of the ABI header files to ensure that the dynamic modules are built against the -// same version of the ABI. -message DynamicModuleConfig { - // The name of the dynamic module. The client is expected to have some configuration indicating where to search for the module. - // In Envoy, the search path can only be configured via the environment variable ``ENVOY_DYNAMIC_MODULES_SEARCH_PATH``. - // The actual search path is ``${ENVOY_DYNAMIC_MODULES_SEARCH_PATH}/lib${name}.so``. TODO: make the search path configurable via - // command line options. - string name = 1 [(validate.rules).string = {min_len: 1}]; - - // Set true to prevent the module from being unloaded with dlclose. - // This is useful for modules that have global state that should not be unloaded. - // A module is closed when no more references to it exist in the process. For example, - // no HTTP filters are using the module (e.g. after configuration update). - bool do_not_close = 3; -} diff --git a/src/main/proto/envoy/extensions/filters/common/dependency/v3/dependency.proto b/src/main/proto/envoy/extensions/filters/common/dependency/v3/dependency.proto index 6fd390b..e1cda98 100644 --- a/src/main/proto/envoy/extensions/filters/common/dependency/v3/dependency.proto +++ b/src/main/proto/envoy/extensions/filters/common/dependency/v3/dependency.proto @@ -34,10 +34,10 @@ message FilterDependencies { // A list of dependencies required on the decode path. repeated Dependency decode_required = 1; - // A list of dependencies provided on the decode path. + // A list of dependencies provided on the encode path. repeated Dependency decode_provided = 2; - // A list of dependencies required on the encode path. + // A list of dependencies required on the decode path. repeated Dependency encode_required = 3; // A list of dependencies provided on the encode path. diff --git a/src/main/proto/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto b/src/main/proto/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto index 6ba7477..68f8294 100644 --- a/src/main/proto/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto +++ b/src/main/proto/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto @@ -47,19 +47,18 @@ message GradientControllerConfig { } // Parameters controlling the periodic minRTT recalculation. - // [#next-free-field: 7] + // [#next-free-field: 6] message MinimumRTTCalculationParams { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig." "MinimumRTTCalculationParams"; // The time interval between recalculating the minimum request round-trip time. Has to be - // positive. If set to zero, dynamic sampling of the minRTT is disabled. - google.protobuf.Duration interval = 1 [(validate.rules).duration = {gte {nanos: 1000000}}]; - - // The fixed value for the minRTT. This value is used when minRTT is not sampled dynamically. - // If dynamic sampling of the minRTT is disabled, this field must be set. - google.protobuf.Duration fixed_value = 6 [(validate.rules).duration = {gt {}}]; + // positive. + google.protobuf.Duration interval = 1 [(validate.rules).duration = { + required: true + gte {nanos: 1000000} + }]; // The number of requests to aggregate/sample during the minRTT recalculation window before // updating. Defaults to 50. diff --git a/src/main/proto/envoy/extensions/filters/http/api_key_auth/v3/api_key_auth.proto b/src/main/proto/envoy/extensions/filters/http/api_key_auth/v3/api_key_auth.proto deleted file mode 100644 index a75b803..0000000 --- a/src/main/proto/envoy/extensions/filters/http/api_key_auth/v3/api_key_auth.proto +++ /dev/null @@ -1,103 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.api_key_auth.v3; - -import "xds/annotations/v3/status.proto"; - -import "udpa/annotations/sensitive.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.api_key_auth.v3"; -option java_outer_classname = "ApiKeyAuthProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/api_key_auth/v3;api_key_authv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; -option (xds.annotations.v3.file_status).work_in_progress = true; - -// [#protodoc-title: APIKey Auth] -// APIKey Auth :ref:`configuration overview `. -// [#extension: envoy.filters.http.api_key_auth] - -// API Key HTTP authentication. -// -// For example, the following configuration configures the filter to authenticate the clients using -// the API key from the header ``X-API-KEY``. And only the clients with the key ``real-key`` are -// considered as authenticated. -// -// .. code-block:: yaml -// -// credentials: -// - key: real-key -// client: user -// key_sources: -// - header: "X-API-KEY" -// -message ApiKeyAuth { - // The credentials that are used to authenticate the clients. - repeated Credential credentials = 1 [(udpa.annotations.sensitive) = true]; - - // The key sources to fetch the key from the coming request. - repeated KeySource key_sources = 2; -} - -// API key auth configuration of per route or per virtual host or per route configuration. -message ApiKeyAuthPerRoute { - // The credentials that are used to authenticate the clients. If this field is non-empty, then the - // credentials in the filter level configuration will be ignored and the credentials in this - // configuration will be used. - repeated Credential credentials = 1 [(udpa.annotations.sensitive) = true]; - - // The key sources to fetch the key from the coming request. If this field is non-empty, then the - // key sources in the filter level configuration will be ignored and the key sources in this - // configuration will be used. - repeated KeySource key_sources = 2; - - // A list of clients that are allowed to access the route or vhost. The clients listed here - // should be subset of the clients listed in the ``credentials`` to provide authorization control - // after the authentication is successful. If the list is empty, then all authenticated clients - // are allowed. This provides very limited but simple authorization. If more complex authorization - // is required, then use the :ref:`HTTP RBAC filter ` instead. - // - // .. note:: - // Setting this field and ``credentials`` at the same configuration entry is not an error but - // also makes no much sense because they provide similar functionality. Please only use - // one of them at same configuration entry except for the case that you want to share the same - // credentials list across multiple routes but still use different allowed clients for each - // route. - // - repeated string allowed_clients = 3; -} - -// Single credential entry that contains the API key and the related client id. -message Credential { - // The value of the unique API key. - string key = 1 [(validate.rules).string = {min_len: 1}]; - - // The unique id or identity that used to identify the client or consumer. - string client = 2 [(validate.rules).string = {min_len: 1}]; -} - -message KeySource { - // The header name to fetch the key. If multiple header values are present, the first one will be - // used. If the header value starts with 'Bearer ', this prefix will be stripped to get the - // key value. - // - // If set, takes precedence over ``query`` and ``cookie``. - string header = 1 - [(validate.rules).string = - {max_len: 1024 well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true}]; - - // The query parameter name to fetch the key. If multiple query values are present, the first one - // will be used. - // - // The field will be used if ``header`` is not set. If set, takes precedence over ``cookie``. - string query = 2 [(validate.rules).string = {max_len: 1024}]; - - // The cookie name to fetch the key. - // - // The field will be used if the ``header`` and ``query`` are not set. - string cookie = 3 - [(validate.rules).string = - {max_len: 1024 well_known_regex: HTTP_HEADER_NAME strict: false ignore_empty: true}]; -} diff --git a/src/main/proto/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto b/src/main/proto/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto index 254352e..5729d7f 100644 --- a/src/main/proto/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto +++ b/src/main/proto/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.extensions.filters.http.aws_request_signing.v3; -import "envoy/extensions/common/aws/v3/credential_provider.proto"; import "envoy/type/matcher/v3/string.proto"; import "google/protobuf/duration.proto"; @@ -22,7 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#extension: envoy.filters.http.aws_request_signing] // Top level configuration for the AWS request signing filter. -// [#next-free-field: 9] +// [#next-free-field: 8] message AwsRequestSigning { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.aws_request_signing.v2alpha.AwsRequestSigning"; @@ -108,10 +107,6 @@ message AwsRequestSigning { // query_string: {} // QueryString query_string = 7; - - // The credential provider for signing the request. This is optional and if not set, - // it will be retrieved from the procedure described in :ref:`config_http_filters_aws_request_signing`. - common.aws.v3.AwsCredentialProvider credential_provider = 8; } message AwsRequestSigningPerRoute { diff --git a/src/main/proto/envoy/extensions/filters/http/basic_auth/v3/basic_auth.proto b/src/main/proto/envoy/extensions/filters/http/basic_auth/v3/basic_auth.proto index af3c442..995d2c3 100644 --- a/src/main/proto/envoy/extensions/filters/http/basic_auth/v3/basic_auth.proto +++ b/src/main/proto/envoy/extensions/filters/http/basic_auth/v3/basic_auth.proto @@ -41,12 +41,6 @@ message BasicAuth { // If it is not specified, the username will not be forwarded. string forward_username_header = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // This field specifies the request header to load the basic credential from. - // - // If it is not specified, the filter loads the credential from the "Authorization" header. - string authentication_header = 3 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; } // Extra settings that may be added to per-route configuration for diff --git a/src/main/proto/envoy/extensions/filters/http/compressor/v3/compressor.proto b/src/main/proto/envoy/extensions/filters/http/compressor/v3/compressor.proto index c49ccfe..6fe4b13 100644 --- a/src/main/proto/envoy/extensions/filters/http/compressor/v3/compressor.proto +++ b/src/main/proto/envoy/extensions/filters/http/compressor/v3/compressor.proto @@ -67,13 +67,6 @@ message Compressor { // To avoid interfering with other compression filters in the same chain use this option in // the filter closest to the upstream. bool remove_accept_encoding_header = 3; - - // Set of response codes for which compression is disabled, e.g. 206 Partial Content should not - // be compressed. - repeated uint32 uncompressible_response_codes = 4 [(validate.rules).repeated = { - unique: true - items {uint32 {lt: 600 gte: 200}} - }]; } // Minimum response length, in bytes, which will trigger compression. The default value is 30. diff --git a/src/main/proto/envoy/extensions/filters/http/cors/v3/cors.proto b/src/main/proto/envoy/extensions/filters/http/cors/v3/cors.proto index ba960d6..11ce910 100644 --- a/src/main/proto/envoy/extensions/filters/http/cors/v3/cors.proto +++ b/src/main/proto/envoy/extensions/filters/http/cors/v3/cors.proto @@ -21,7 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#extension: envoy.filters.http.cors] // Cors filter config. Set this in -// :ref:`http_filters ` +// ref:`http_filters ` // to enable the CORS filter. // // Please note that the :ref:`CorsPolicy ` diff --git a/src/main/proto/envoy/extensions/filters/http/dynamic_modules/v3/dynamic_modules.proto b/src/main/proto/envoy/extensions/filters/http/dynamic_modules/v3/dynamic_modules.proto deleted file mode 100644 index d713bf8..0000000 --- a/src/main/proto/envoy/extensions/filters/http/dynamic_modules/v3/dynamic_modules.proto +++ /dev/null @@ -1,61 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.dynamic_modules.v3; - -import "envoy/extensions/dynamic_modules/v3/dynamic_modules.proto"; - -import "google/protobuf/any.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamic_modules.v3"; -option java_outer_classname = "DynamicModulesProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/dynamic_modules/v3;dynamic_modulesv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: HTTP filter for dynamic modules] -// [#extension: envoy.filters.http.dynamic_modules] - -// Configuration of the HTTP filter for dynamic modules. This filter allows loading shared object files -// that can be loaded via dlopen by the HTTP filter. -// -// A module can be loaded by multiple HTTP filters, hence the program can be structured in a way that -// the module is loaded only once and shared across multiple filters providing multiple functionalities. -message DynamicModuleFilter { - // Specifies the shared-object level configuration. - envoy.extensions.dynamic_modules.v3.DynamicModuleConfig dynamic_module_config = 1; - - // The name for this filter configuration. This can be used to distinguish between different filter implementations - // inside a dynamic module. For example, a module can have completely different filter implementations. - // When Envoy receives this configuration, it passes the filter_name to the dynamic module's HTTP filter config init function - // together with the filter_config. - // That way a module can decide which in-module filter implementation to use based on the name at load time. - string filter_name = 2; - - // The configuration for the filter chosen by filter_name. This is passed to the module's HTTP filter initialization function. - // Together with the filter_name, the module can decide which in-module filter implementation to use and - // fine-tune the behavior of the filter. - // - // For example, if a module has two filter implementations, one for logging and one for header manipulation, - // filter_name is used to choose either logging or header manipulation. The filter_config can be used to - // configure the logging level or the header manipulation behavior. - // - // ``google.protobuf.Struct`` is serialized as JSON before - // passing it to the plugin. ``google.protobuf.BytesValue`` and - // ``google.protobuf.StringValue`` are passed directly without the wrapper. - // - // .. code-block:: yaml - // - // # Passing in a string - // filter_config: - // "@type": "type.googleapis.com/google.protobuf.StringValue" - // value: hello - // - // # Passing in raw bytes - // filter_config: - // "@type": "type.googleapis.com/google.protobuf.BytesValue" - // value: aGVsbG8= # echo -n "hello" | base64 - // - google.protobuf.Any filter_config = 3; -} diff --git a/src/main/proto/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/src/main/proto/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index 0a2492b..4b4e79e 100644 --- a/src/main/proto/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/src/main/proto/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -11,7 +11,6 @@ import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; -import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; @@ -30,7 +29,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 30] +// [#next-free-field: 28] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v3.ExtAuthz"; @@ -53,7 +52,7 @@ message ExtAuthz { config.core.v3.ApiVersion transport_api_version = 12 [(validate.rules).enum = {defined_only: true}]; - // Changes filter's behavior on errors: + // Changes filter's behaviour on errors: // // 1. When set to true, the filter will ``accept`` client request even if the communication with // the authorization service has failed, or if the authorization service has returned a HTTP 5xx @@ -210,12 +209,12 @@ message ExtAuthz { // // .. note:: // - // 1. For requests to an HTTP authorization server: in addition to the user's supplied matchers, ``Host``, ``Method``, ``Path``, + // 1. For requests to an HTTP authorization server: in addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, // ``Content-Length``, and ``Authorization`` are **additionally included** in the list. // // .. note:: // - // 2. For requests to an HTTP authorization server: value of ``Content-Length`` will be set to 0 and the request to the + // 2. For requests to an HTTP authorization server: *Content-Length* will be set to 0 and the request to the // authorization server will not have a message body. However, the check request can include the buffered // client request body (controlled by :ref:`with_request_body // ` setting), @@ -243,11 +242,11 @@ message ExtAuthz { google.protobuf.BoolValue charge_cluster_response_stats = 20; // Whether to encode the raw headers (i.e. unsanitized values & unconcatenated multi-line headers) - // in authentication request. Works with both HTTP and gRPC clients. + // in authentication request. Works with both HTTP and GRPC clients. // // When this is set to true, header values are not sanitized. Headers with the same key will also // not be combined into a single, comma-separated header. - // Requests to gRPC services will populate the field + // Requests to GRPC services will populate the field // :ref:`header_map`. // Requests to HTTP services will be constructed with the unsanitized header values and preserved // multi-line headers with the same key. @@ -255,7 +254,7 @@ message ExtAuthz { // If this field is set to false, header values will be sanitized, with any non-UTF-8-compliant // bytes replaced with '!'. Headers with the same key will have their values concatenated into a // single comma-separated header value. - // Requests to gRPC services will populate the field + // Requests to GRPC services will populate the field // :ref:`headers`. // Requests to HTTP services will have their header values sanitized and will not preserve // multi-line headers with the same key. @@ -291,25 +290,6 @@ message ExtAuthz { // // If unset, defaults to true. google.protobuf.BoolValue enable_dynamic_metadata_ingestion = 27; - - // Additional metadata to be added to the filter state for logging purposes. The metadata will be - // added to StreamInfo's filter state under the namespace corresponding to the ext_authz filter - // name. - google.protobuf.Struct filter_metadata = 28; - - // When set to true, the filter will emit per-stream stats for access logging. The filter state - // key will be the same as the filter name. - // - // If using Envoy gRPC, emits latency, bytes sent / received, upstream info, and upstream cluster - // info. If not using Envoy gRPC, emits only latency. Note that stats are ONLY added to filter - // state if a check request is actually made to an ext_authz service. - // - // If this is false the filter will not emit stats, but filter_metadata will still be respected if - // it has a value. - // - // Field ``latency_us`` is exposed for CEL and logging when using gRPC or HTTP service. - // Fields ``bytesSent`` and ``bytesReceived`` are exposed for CEL and logging only when using gRPC service. - bool emit_filter_state_stats = 29; } // Configuration for buffering the request data. @@ -399,8 +379,8 @@ message AuthorizationRequest { // // .. note:: // - // In addition to the user's supplied matchers, ``Host``, ``Method``, ``Path``, - // ``Content-Length``, and ``Authorization`` are **automatically included** in the list. + // In addition to the the user's supplied matchers, ``Host``, ``Method``, ``Path``, + // ``Content-Length``, and ``Authorization`` are **automatically included** to the list. // // .. note:: // @@ -466,9 +446,7 @@ message ExtAuthzPerRoute { // Disable the ext auth filter for this particular vhost or route. // If disabled is specified in multiple per-filter-configs, the most specific one will be used. - // If the filter is disabled by default and this is set to false, the filter will be enabled - // for this vhost or route. - bool disabled = 1; + bool disabled = 1 [(validate.rules).bool = {const: true}]; // Check request settings for this route. CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; @@ -509,7 +487,7 @@ message CheckSettings { // :ref:`with_request_body ` // option for a specific route. // - // Please note that only one of ``with_request_body`` or + // Please note that only only one of *with_request_body* or // :ref:`disable_request_body_buffering ` // may be specified. BufferSettings with_request_body = 3; diff --git a/src/main/proto/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto b/src/main/proto/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto index f7c1d84..aeaed7a 100644 --- a/src/main/proto/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto +++ b/src/main/proto/envoy/extensions/filters/http/ext_proc/v3/ext_proc.proto @@ -4,17 +4,13 @@ package envoy.extensions.filters.http.ext_proc.v3; import "envoy/config/common/mutation_rules/v3/mutation_rules.proto"; import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/grpc_service.proto"; -import "envoy/config/core/v3/http_service.proto"; import "envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto"; import "envoy/type/matcher/v3/string.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; -import "xds/annotations/v3/status.proto"; - import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -31,6 +27,11 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // The External Processing filter allows an external service to act on HTTP traffic in a flexible way. +// **Current Implementation Status:** +// All options and processing modes are implemented except for the following: +// +// * "async mode" is not implemented. + // The filter communicates with an external gRPC service called an "external processor" // that can do a variety of things with the request and response: // @@ -97,7 +98,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` object in a namespace matching the filter // name. // -// [#next-free-field: 24] +// [#next-free-field: 20] message ExternalProcessor { // Describes the route cache action to be taken when an external processor response // is received in response to request headers. @@ -124,48 +125,7 @@ message ExternalProcessor { // Configuration for the gRPC service that the filter will communicate with. // The filter supports both the "Envoy" and "Google" gRPC clients. - // Only one of ``grpc_service`` or ``http_service`` can be set. - // It is required that one of them must be set. - config.core.v3.GrpcService grpc_service = 1 - [(udpa.annotations.field_migrate).oneof_promotion = "ext_proc_service_type"]; - - // Configuration for the HTTP service that the filter will communicate with. - // Only one of ``http_service`` or - // :ref:`grpc_service `. - // can be set. It is required that one of them must be set. - // - // If ``http_service`` is set, the - // :ref:`processing_mode ` - // can not be configured to send any body or trailers. i.e, http_service only supports - // sending request or response headers to the side stream server. - // - // With this configuration, Envoy behavior: - // - // 1. The headers are first put in a proto message - // :ref:`ProcessingRequest `. - // - // 2. This proto message is then transcoded into a JSON text. - // - // 3. Envoy then sends a HTTP POST message with content-type as "application/json", - // and this JSON text as body to the side stream server. - // - // After the side-stream receives this HTTP request message, it is expected to do as follows: - // - // 1. It converts the body, which is a JSON string, into a ``ProcessingRequest`` - // proto message to examine and mutate the headers. - // - // 2. It then sets the mutated headers into a new proto message - // :ref:`ProcessingResponse `. - // - // 3. It converts ``ProcessingResponse`` proto message into a JSON text. - // - // 4. It then sends a HTTP response back to Envoy with status code as "200", - // content-type as "application/json" and sets the JSON text as the body. - // - ExtProcHttpService http_service = 20 [ - (udpa.annotations.field_migrate).oneof_promotion = "ext_proc_service_type", - (xds.annotations.v3.field_status).work_in_progress = true - ]; + config.core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; // By default, if the gRPC stream cannot be established, or if it is closed // prematurely with an error, the filter will fail. Specifically, if the @@ -303,48 +263,6 @@ message ExternalProcessor { // backend stream lifetime. In this case, Envoy will eventually timeout the external processor stream according to this time limit. // The default value is 5000 milliseconds (5 seconds) if not specified. google.protobuf.Duration deferred_close_timeout = 19; - - // Send body to the side stream server once it arrives without waiting for the header response from that server. - // It only works for STREAMED body processing mode. For any other body processing modes, it is ignored. - // The server has two options upon receiving a header request: - // - // 1. Instant Response: send the header response as soon as the header request is received. - // - // 2. Delayed Response: wait for the body before sending any response. - // - // In all scenarios, the header-body ordering must always be maintained. - // - // If enabled Envoy will ignore the - // :ref:`mode_override ` - // value that the server sends in the header response. This is because Envoy may have already - // sent the body to the server, prior to processing the header response. - bool send_body_without_waiting_for_header_response = 21; - - // When :ref:`allow_mode_override - // ` is enabled and - // ``allowed_override_modes`` is configured, the filter config :ref:`processing_mode - // ` - // can only be overridden by the response message from the external processing server iff the - // :ref:`mode_override ` is allowed by - // the ``allowed_override_modes`` allow-list below. - // Since request_header_mode is not applicable in any way, it's ignored in comparison. - repeated ProcessingMode allowed_override_modes = 22; - - // Decorator to introduce custom logic that runs after a message received from - // the External Processor is processed, but before continuing filter chain iteration. - // - // .. note:: - // Response processors are currently in alpha. - // - // [#extension-category: envoy.http.ext_proc.response_processors] - config.core.v3.TypedExtensionConfig on_processing_response = 23 - [(xds.annotations.v3.field_status).work_in_progress = true]; -} - -// ExtProcHttpService is used for HTTP communication between the filter and the external processing service. -message ExtProcHttpService { - // Sets the HTTP service which the external processing requests must be sent to. - config.core.v3.HttpService http_service = 1; } // The MetadataOptions structure defines options for the sending and receiving of diff --git a/src/main/proto/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto b/src/main/proto/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto index 467320d..66c04ac 100644 --- a/src/main/proto/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto +++ b/src/main/proto/envoy/extensions/filters/http/ext_proc/v3/processing_mode.proto @@ -22,18 +22,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message ProcessingMode { // Control how headers and trailers are handled enum HeaderSendMode { - // When used to configure the ext_proc filter :ref:`processing_mode - // `, - // the default HeaderSendMode depends on which part of the message is being processed. By - // default, request and response headers are sent, while trailers are skipped. - // - // When used in :ref:`mode_override - // ` or - // :ref:`allowed_override_modes - // `, - // a value of DEFAULT indicates that there is no change from the behavior that is configured for - // the filter in :ref:`processing_mode - // `. + // The default HeaderSendMode depends on which part of the message is being + // processed. By default, request and response headers are sent, + // while trailers are skipped. DEFAULT = 0; // Send the header or trailer. @@ -45,12 +36,11 @@ message ProcessingMode { // Control how the request and response bodies are handled // When body mutation by external processor is enabled, ext_proc filter will always remove - // the content length header in four cases below because content length can not be guaranteed + // the content length header in three cases below because content length can not be guaranteed // to be set correctly: // 1) STREAMED BodySendMode: header processing completes before body mutation comes back. // 2) BUFFERED_PARTIAL BodySendMode: body is buffered and could be injected in different phases. // 3) BUFFERED BodySendMode + SKIP HeaderSendMode: header processing (e.g., update content-length) is skipped. - // 4) FULL_DUPLEX_STREAMED BodySendMode: header processing completes before body mutation comes back. // // In Envoy's http1 codec implementation, removing content length will enable chunked transfer // encoding whenever feasible. The recipient (either client or server) must be able @@ -78,43 +68,9 @@ message ProcessingMode { // chunk. If the body exceeds the configured buffer limit, then the body contents // up to the buffer limit will be sent. BUFFERED_PARTIAL = 3; - - // Envoy streams the body to the server in pieces as they arrive. - // - // 1) The server may choose to buffer any number chunks of data before processing them. - // After it finishes buffering, the server processes the buffered data. Then it splits the processed - // data into any number of chunks, and streams them back to Envoy one by one. - // The server may continuously do so until the complete body is processed. - // The individual response chunk size is recommended to be no greater than 64K bytes, or - // :ref:`max_receive_message_length ` - // if EnvoyGrpc is used. - // - // 2) The server may also choose to buffer the entire message, including the headers (if header mode is - // ``SEND``), the entire body, and the trailers (if present), before sending back any response. - // The server response has to maintain the headers-body-trailers ordering. - // - // 3) Note that the server might also choose not to buffer data. That is, upon receiving a - // body request, it could process the data and send back a body response immediately. - // - // In this body mode: - // * The corresponding trailer mode has to be set to ``SEND``. - // * Envoy will send body and trailers (if present) to the server as they arrive. - // Sending the trailers (if present) is to inform the server the complete body arrives. - // In case there are no trailers, then Envoy will set - // :ref:`end_of_stream ` - // to true as part of the last body chunk request to notify the server that no other data is to be sent. - // * The server needs to send - // :ref:`StreamedBodyResponse ` - // to Envoy in the body response. - // * Envoy will stream the body chunks in the responses from the server to the upstream/downstream as they arrive. - - FULL_DUPLEX_STREAMED = 4; } // How to handle the request header. Default is "SEND". - // Note this field is ignored in :ref:`mode_override - // `, since mode - // overrides can only affect messages exchanged after the request header is processed. HeaderSendMode request_header_mode = 1 [(validate.rules).enum = {defined_only: true}]; // How to handle the response header. Default is "SEND". diff --git a/src/main/proto/envoy/extensions/filters/http/grpc_field_extraction/v3/config.proto b/src/main/proto/envoy/extensions/filters/http/grpc_field_extraction/v3/config.proto index 6eb2259..3684f99 100644 --- a/src/main/proto/envoy/extensions/filters/http/grpc_field_extraction/v3/config.proto +++ b/src/main/proto/envoy/extensions/filters/http/grpc_field_extraction/v3/config.proto @@ -52,9 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // Here are config requirements // -// 1. the target field should be among the following primitive types: `string`, -// `uint32`, `uint64`, `int32`, `int64`, `sint32`, `sint64`, `fixed32`, -// `fixed64`, `sfixed32`, `sfixed64`, `float`, `double`, `map`. +// 1. the target field should be among the following primitive types: `string`, `uint32`, `uint64`, `int32`, `int64`, `sint32`, `sint64`, `fixed32`, `fixed64`, `sfixed32`, `sfixed64`, `float`, `double`. // // 2. the target field could be repeated. // @@ -63,10 +61,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Output Format // ------------- // -// 1. the extracted field names/values will be wrapped in be ``field`` -> ``values``, which will be added in the dynamic ``metadata``. +// 1. the extracted field names/values will be wrapped in be ``field`` -> ``values``, which will be added in the dynamic ``metadata``. // -// 2. if the field value is empty, an empty ``Value`` will be set. +// 2. if the field value is empty, a empty ```` will be set. // // Performance // ----------- diff --git a/src/main/proto/envoy/extensions/filters/http/grpc_json_reverse_transcoder/v3/transcoder.proto b/src/main/proto/envoy/extensions/filters/http/grpc_json_reverse_transcoder/v3/transcoder.proto deleted file mode 100644 index ddcae1a..0000000 --- a/src/main/proto/envoy/extensions/filters/http/grpc_json_reverse_transcoder/v3/transcoder.proto +++ /dev/null @@ -1,61 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.http.grpc_json_reverse_transcoder.v3; - -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_json_reverse_transcoder.v3"; -option java_outer_classname = "TranscoderProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_json_reverse_transcoder/v3;grpc_json_reverse_transcoderv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: gRPC-JSON reverse transcoder] -// gRPC-JSON reverse transcoder :ref:`configuration overview `. -// [#extension: envoy.filters.http.grpc_json_reverse_transcoder] - -// [#next-free-field: 6] -// ``GrpcJsonReverseTranscoder`` is the filter configuration for the gRPC JSON -// reverse transcoder. The reverse transcoder acts as a bridge between a gRPC -// client and an HTTP/JSON server, converting the gRPC request into HTTP/JSON -// for the HTTP backend and the HTTP/JSON response back to gRPC for the gRPC -// client. This effectively reverses the behavior of the -// :ref:`grpc_json_transcoder filter `, -// allowing a gRPC client to communicate with an HTTP/JSON server. -message GrpcJsonReverseTranscoder { - // Supplies the filename of - // :ref:`the proto descriptor set - // ` for the gRPC services. - // If set, takes precedence over the ``descriptor_binary`` field. - string descriptor_path = 1; - - // Supplies the binary content of - // :ref:`the proto descriptor set - // ` for the gRPC services. - // If ``descriptor_path`` is set, this field is not used. - bytes descriptor_binary = 2; - - // The maximum size of a request body to be transcoded, in bytes. A body exceeding this size will - // provoke a ``gRPC status: ResourceExhausted`` response. - // - // Large values may cause envoy to use a lot of memory if there are many - // concurrent requests. - // - // If unset, the current stream buffer size is used. - google.protobuf.UInt32Value max_request_body_size = 3 [(validate.rules).uint32 = {gt: 0}]; - - // The maximum size of a response body to be transcoded, in bytes. A body exceeding this size will - // provoke a ``gRPC status: Internal`` response. - // - // Large values may cause envoy to use a lot of memory if there are many - // concurrent requests. - // - // If unset, the current stream buffer size is used. - google.protobuf.UInt32Value max_response_body_size = 4 [(validate.rules).uint32 = {gt: 0}]; - - // The name of the header field that has the API version of the request. - string api_version_header = 5; -} diff --git a/src/main/proto/envoy/extensions/filters/http/header_mutation/v3/header_mutation.proto b/src/main/proto/envoy/extensions/filters/http/header_mutation/v3/header_mutation.proto index ca951db..db267d2 100644 --- a/src/main/proto/envoy/extensions/filters/http/header_mutation/v3/header_mutation.proto +++ b/src/main/proto/envoy/extensions/filters/http/header_mutation/v3/header_mutation.proto @@ -3,7 +3,6 @@ syntax = "proto3"; package envoy.extensions.filters.http.header_mutation.v3; import "envoy/config/common/mutation_rules/v3/mutation_rules.proto"; -import "envoy/config/core/v3/base.proto"; import "udpa/annotations/status.proto"; @@ -20,10 +19,6 @@ message Mutations { // The request mutations are applied before the request is forwarded to the upstream cluster. repeated config.common.mutation_rules.v3.HeaderMutation request_mutations = 1; - // The ``path`` header query parameter mutations are applied after ``request_mutations`` and before the request - // is forwarded to the next filter in the filter chain. - repeated config.core.v3.KeyValueMutation query_parameter_mutations = 3; - // The response mutations are applied before the response is sent to the downstream client. repeated config.common.mutation_rules.v3.HeaderMutation response_mutations = 2; } diff --git a/src/main/proto/envoy/extensions/filters/http/health_check/v3/health_check.proto b/src/main/proto/envoy/extensions/filters/http/health_check/v3/health_check.proto index 49b465c..146ea3c 100644 --- a/src/main/proto/envoy/extensions/filters/http/health_check/v3/health_check.proto +++ b/src/main/proto/envoy/extensions/filters/http/health_check/v3/health_check.proto @@ -38,8 +38,7 @@ message HealthCheck { // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that - // must be healthy or degraded in order for the filter to return a 200. If any of - // the clusters configured here does not exist, the filter will not return a 200. + // must be healthy or degraded in order for the filter to return a 200. // // .. note:: // diff --git a/src/main/proto/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto b/src/main/proto/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto index 87f725e..edc9ef1 100644 --- a/src/main/proto/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto +++ b/src/main/proto/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto @@ -18,7 +18,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // IP tagging :ref:`configuration overview `. // [#extension: envoy.filters.http.ip_tagging] -// [#next-free-field: 6] message IPTagging { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ip_tagging.v2.IPTagging"; @@ -53,38 +52,6 @@ message IPTagging { repeated config.core.v3.CidrRange ip_list = 2; } - // Specify to which header the tags will be written. - message IpTagHeader { - // Describes how to apply the tags to the headers. - enum HeaderAction { - // (DEFAULT) The header specified in :ref:`ip_tag_header ` - // will be dropped, before the tags are applied. The incoming header will be "sanitized" regardless of whether the request is internal or external. - // - // Note that the header will be visible unsanitized to any filters that are invoked before the ip-tag-header filter, unless it has an *x-envoy* prefix. - SANITIZE = 0; - - // Tags will be appended to the header specified in - // :ref:`ip_tag_header `. - // - // Please note that this could cause the header to retain values set by the http client regardless of whether the request is internal or external. - APPEND_IF_EXISTS_OR_ADD = 1; - } - - // Header to use for ip-tagging. - // - // This header will be sanitized based on the config in - // :ref:`action ` - // rather than the defaults for x-envoy prefixed headers. - string header = 1 - [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; - - // Control if the :ref:`header ` - // will be sanitized, or be appended to. - // - // Default: *SANITIZE*. - HeaderAction action = 2; - } - // The type of request the filter should apply to. RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; @@ -92,9 +59,4 @@ message IPTagging { // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] // The set of IP tags for the filter. repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; - - // Specify to which header the tags will be written. - // - // If left unspecified, the tags will be appended to the ``x-envoy-ip-tags`` header. - IpTagHeader ip_tag_header = 5; } diff --git a/src/main/proto/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/src/main/proto/envoy/extensions/filters/http/jwt_authn/v3/config.proto index 02ab21d..41d694a 100644 --- a/src/main/proto/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/src/main/proto/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -35,7 +35,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // * issuer: the principal that issues the JWT. If specified, it has to match the ``iss`` field in JWT. // * allowed audiences: the ones in the token have to be listed here. // * how to fetch public key JWKS to verify the token signature. -// * how to extract the JWT in the request. +// * how to extract JWT token in the request. // * how to pass successfully verified token payload. // // Example: @@ -63,9 +63,9 @@ message JwtProvider { message NormalizePayload { // Each claim in this list will be interpreted as a space-delimited string // and converted to a list of strings based on the delimited values. - // Example: a token with a claim ``scope: "email profile"`` is translated - // to dynamic metadata ``scope: ["email", "profile"]`` if this field is - // set value ``["scope"]``. This special handling of ``scope`` is + // Example: a token with a claim ``scopes: "email profile"`` is translated + // to dynamic metadata ``scopes: ["email", "profile"]`` if this field is + // set value ``["scopes"]``. This special handling of ``scopes`` is // recommended by `RFC8693 // `_. repeated string space_delimited_claims = 1; @@ -208,7 +208,7 @@ message JwtProvider { // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations // its provider specified or from the default locations. // - // Specify the HTTP headers to extract the JWT. For examples, following config: + // Specify the HTTP headers to extract JWT token. For examples, following config: // // .. code-block:: yaml // @@ -348,24 +348,23 @@ message JwtProvider { uint32 clock_skew_seconds = 10; // Enables JWT cache, its size is specified by ``jwt_cache_size``. - // Only valid JWTs are cached. + // Only valid JWT tokens are cached. JwtCacheConfig jwt_cache_config = 12; // Add JWT claim to HTTP Header // Specify the claim name you want to copy in which HTTP header. For examples, following config: // The claim must be of type; string, int, double, bool. Array type claims are not supported // - // .. literalinclude:: /_configs/repo/jwt_authn.yaml - // :language: yaml - // :lines: 44-48 - // :linenos: - // :lineno-start: 44 - // :caption: :download:`jwt_authn.yaml ` + // .. code-block:: yaml + // + // claim_to_headers: + // - name: x-jwt-claim-nested-claim + // claim: claim.nested.key // // This header is only reserved for jwt claim; any other value will be overwritten. repeated JwtClaimToHeader claim_to_headers = 15; - // Clears route cache in order to allow the JWT to correctly affect + // Clears route cache in order to allow JWT token to correctly affect // routing decisions. Filter clears all cached routes when: // // 1. The field is set to ``true``. @@ -378,13 +377,8 @@ message JwtProvider { // This message specifies JWT Cache configuration. message JwtCacheConfig { - // The unit is number of JWTs, default to 100. + // The unit is number of JWT tokens, default to 100. uint32 jwt_cache_size = 1; - - // The maximum size of a single cached token in bytes. - // If this field is not set or is set to 0, then the default value 4096 bytes is used. - // The maximum value for a token is inclusive. - uint32 jwt_max_token_size = 2; } // This message specifies how to fetch JWKS from remote and how to cache it. @@ -474,7 +468,7 @@ message JwksAsyncFetch { google.protobuf.Duration failed_refetch_duration = 2; } -// This message specifies a header location to extract the JWT. +// This message specifies a header location to extract JWT token. message JwtHeader { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtHeader"; @@ -581,7 +575,7 @@ message JwtRequirement { // The requirement is always satisfied even if JWT is missing or the JWT // verification fails. A typical usage is: this filter is used to only verify // JWTs and pass the verified JWT payloads to another filter, the other filter - // will make decision. In this mode, all JWTs will be verified. + // will make decision. In this mode, all JWT tokens will be verified. google.protobuf.Empty allow_missing_or_failed = 5; // The requirement is satisfied if JWT is missing, but failed if JWT is @@ -689,7 +683,8 @@ message FilterStateRule { // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the ``name`` field above. - map requires = 3; + map + requires = 3; } // This is the Envoy HTTP filter config for JWT authentication. @@ -734,7 +729,7 @@ message FilterStateRule { // - provider_name: provider1 // - provider_name: provider2 // -// [#next-free-field: 8] +// [#next-free-field: 7] message JwtAuthentication { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtAuthentication"; @@ -812,9 +807,6 @@ message JwtAuthentication { // in the body along with WWWAuthenticate header value set with "invalid token". If this value is set to true, // the response details will be stripped and only a 401 response code will be returned. Default value is false bool strip_failure_response = 6; - - // Optional additional prefix to use when emitting statistics. - string stat_prefix = 7; } // Specify per-route config. diff --git a/src/main/proto/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto b/src/main/proto/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto index b0199c0..a32475f 100644 --- a/src/main/proto/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto +++ b/src/main/proto/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto @@ -3,7 +3,6 @@ syntax = "proto3"; package envoy.extensions.filters.http.local_ratelimit.v3; import "envoy/config/core/v3/base.proto"; -import "envoy/config/route/v3/route_components.proto"; import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; import "envoy/type/v3/http_status.proto"; import "envoy/type/v3/token_bucket.proto"; @@ -23,7 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Local Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.local_ratelimit] -// [#next-free-field: 19] +// [#next-free-field: 17] message LocalRateLimit { // The human readable prefix to use when emitting stats. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; @@ -148,32 +147,4 @@ message LocalRateLimit { // of the default ``UNAVAILABLE`` gRPC code for a rate limited gRPC call. The // HTTP code will be 200 for a gRPC response. bool rate_limited_as_resource_exhausted = 15; - - // Rate limit configuration that is used to generate a list of descriptor entries based on - // the request context. The generated entries will be used to find one or multiple matched rate - // limit rule from the ``descriptors``. - // If this is set, then - // :ref:`VirtualHost.rate_limits` or - // :ref:`RouteAction.rate_limits` fields - // will be ignored. - // - // .. note:: - // Not all configuration fields of - // :ref:`rate limit config ` is supported at here. - // Following fields are not supported: - // - // 1. :ref:`rate limit stage `. - // 2. :ref:`dynamic metadata `. - // 3. :ref:`disable_key `. - // 4. :ref:`override limit `. - repeated config.route.v3.RateLimit rate_limits = 17; - - // Specifies the max dynamic descriptors kept in the cache for a particular wildcard descriptor - // configured in the global :ref:`descriptors`. - // Wildcard descriptor means descriptor has one or more entries with just key and value omitted. For example if user has configured two descriptors - // with blank value entries, then max dynamic descriptors stored in the LRU cache will be 2 * max_dynamic_descriptors. - // Actual number of dynamic descriptors will depend on the cardinality of unique values received from the http request for the omitted - // values. - // Minimum is 1. Default is 20. - google.protobuf.UInt32Value max_dynamic_descriptors = 18 [(validate.rules).uint32 = {gte: 1}]; } diff --git a/src/main/proto/envoy/extensions/filters/http/lua/v3/lua.proto b/src/main/proto/envoy/extensions/filters/http/lua/v3/lua.proto index c8b9149..4092769 100644 --- a/src/main/proto/envoy/extensions/filters/http/lua/v3/lua.proto +++ b/src/main/proto/envoy/extensions/filters/http/lua/v3/lua.proto @@ -4,8 +4,6 @@ package envoy.extensions.filters.http.lua.v3; import "envoy/config/core/v3/base.proto"; -import "google/protobuf/wrappers.proto"; - import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; @@ -21,7 +19,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Lua :ref:`configuration overview `. // [#extension: envoy.filters.http.lua] -// [#next-free-field: 6] message Lua { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.lua.v2.Lua"; @@ -81,12 +78,6 @@ message Lua { // stat_prefix: bar_script # This emits lua.bar_script.errors etc. // string stat_prefix = 4; - - // If set to true, the Lua filter will clear the route cache automatically if the request - // headers are modified by the Lua script. If set to false, the Lua filter will not clear the - // route cache automatically. - // Default is true for backward compatibility. - google.protobuf.BoolValue clear_route_cache = 5; } message LuaPerRoute { diff --git a/src/main/proto/envoy/extensions/filters/http/oauth2/v3/oauth.proto b/src/main/proto/envoy/extensions/filters/http/oauth2/v3/oauth.proto index 90f4401..703e342 100644 --- a/src/main/proto/envoy/extensions/filters/http/oauth2/v3/oauth.proto +++ b/src/main/proto/envoy/extensions/filters/http/oauth2/v3/oauth.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.extensions.filters.http.oauth2.v3; -import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/http_uri.proto"; import "envoy/config/route/v3/route_components.proto"; import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; @@ -25,47 +24,8 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#extension: envoy.filters.http.oauth2] // -// OAuth cookie configuration attributes. -// -message CookieConfig { - enum SameSite { - DISABLED = 0; - STRICT = 1; - LAX = 2; - NONE = 3; - } - - // The value used for the SameSite cookie attribute. - SameSite same_site = 1 [(validate.rules).enum = {defined_only: true}]; -} - -// [#next-free-field: 8] -message CookieConfigs { - // Configuration for the bearer token cookie. - CookieConfig bearer_token_cookie_config = 1; - - // Configuration for the OAuth HMAC cookie. - CookieConfig oauth_hmac_cookie_config = 2; - - // Configuration for the OAuth expires cookie. - CookieConfig oauth_expires_cookie_config = 3; - - // Configuration for the ID token cookie. - CookieConfig id_token_cookie_config = 4; - - // Configuration for the refresh token cookie. - CookieConfig refresh_token_cookie_config = 5; - - // Configuration for the OAuth nonce cookie. - CookieConfig oauth_nonce_cookie_config = 6; - - // Configuration for the code verifier cookie. - CookieConfig code_verifier_cookie_config = 7; -} - -// [#next-free-field: 6] message OAuth2Credentials { - // [#next-free-field: 8] + // [#next-free-field: 6] message CookieNames { // Cookie name to hold OAuth bearer token value. When the authentication server validates the // client and returns an authorization token back to the OAuth filter, no matter what format @@ -90,14 +50,6 @@ message OAuth2Credentials { // Cookie name to hold the refresh token. Defaults to ``RefreshToken``. string refresh_token = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; - - // Cookie name to hold the nonce value. Defaults to ``OauthNonce``. - string oauth_nonce = 6 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; - - // Cookie name to hold the PKCE code verifier. Defaults to ``OauthCodeVerifier``. - string code_verifier = 7 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; } // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. @@ -118,15 +70,11 @@ message OAuth2Credentials { // The cookie names used in OAuth filters flow. CookieNames cookie_names = 4; - - // The domain to set the cookie on. If not set, the cookie will default to the host of the request, not including the subdomains. - // This is useful when token cookies need to be shared across multiple subdomains. - string cookie_domain = 5; } // OAuth config // -// [#next-free-field: 23] +// [#next-free-field: 18] message OAuth2Config { enum AuthType { // The ``client_id`` and ``client_secret`` will be sent in the URL encoded request body. @@ -140,9 +88,6 @@ message OAuth2Config { // Endpoint on the authorization server to retrieve the access token from. config.core.v3.HttpUri token_endpoint = 1; - // Specifies the retry policy for requests to the OAuth server. If not specified, then no retries will be performed. - config.core.v3.RetryPolicy retry_policy = 18; - // The endpoint redirect to for authorization in response to unauthorized requests. string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; @@ -167,7 +112,7 @@ message OAuth2Config { bool forward_bearer_token = 7; // If set to true, preserve the existing authorization header. - // By default the client strips the existing authorization header before forwarding upstream. + // By default Envoy strips the existing authorization header before forwarding upstream. // Can not be set to true if forward_bearer_token is already set to true. // Default value is false. bool preserve_authorization_header = 16; @@ -190,7 +135,7 @@ message OAuth2Config { // If set to true, allows automatic access token refresh using the associated refresh token (see // `RFC 6749 section 6 `_), provided that the OAuth server supports that. - // Default value is true. + // Default value is false. google.protobuf.BoolValue use_refresh_token = 12; // The default lifetime in seconds of the access token, if omitted by the authorization server. @@ -208,32 +153,14 @@ message OAuth2Config { // // If this value is not set, it will default to ``604800s``. In this case, the cookie with the refresh token will be expired // in a week. - // This setting is only considered if ``use_refresh_token`` is set to true, otherwise the authorization server expiration or ``default_expires_in`` is used. + // This setting is only considered if ``use_refresh_token`` is set to true, otherwise the authorization server expiration or ``defaul_expires_in`` is used. google.protobuf.Duration default_refresh_token_expires_in = 15; - // If set to true, the client will not set a cookie for ID Token even if one is received from the Identity Provider. This may be useful in cases where the ID + // If set to true, Envoy will not set a cookie for ID Token even if one is received from the Identity Provider. This may be useful in cases where the ID // Token is too large for HTTP cookies (longer than 4096 characters). Enabling this option will only disable setting the cookie response header, the filter // will still process incoming ID Tokens as part of the HMAC if they are there. This is to ensure compatibility while switching this setting on. Future // sessions would not set the IdToken cookie header. bool disable_id_token_set_cookie = 17; - - // If set to true, the client will not set a cookie for Access Token even if one is received from the Identity Provider. - // Enabling this option will only disable setting the cookie response header, the filter - // will still process incoming Access Tokens as part of the HMAC if they are there. This is to ensure compatibility while switching this setting on. Future - // sessions would not set the Access Token cookie header. - bool disable_access_token_set_cookie = 19; - - // If set to true, the client will not set a cookie for Refresh Token even if one is received from the Identity Provider. - // Enabling this option will only disable setting the cookie response header, the filter - // will still process incoming Refresh Tokens as part of the HMAC if they are there. This is to ensure compatibility while switching this setting on. Future - // sessions would not set the Refresh Token cookie header. - bool disable_refresh_token_set_cookie = 20; - - // Controls for attributes that can be set on the cookies. - CookieConfigs cookie_configs = 21; - - // Optional additional prefix to use when emitting statistics. - string stat_prefix = 22; } // Filter config. diff --git a/src/main/proto/envoy/extensions/filters/http/proto_message_extraction/v3/config.proto b/src/main/proto/envoy/extensions/filters/http/proto_message_logging/v3/config.proto similarity index 58% rename from src/main/proto/envoy/extensions/filters/http/proto_message_extraction/v3/config.proto rename to src/main/proto/envoy/extensions/filters/http/proto_message_logging/v3/config.proto index dc51f9d..5b57e9c 100644 --- a/src/main/proto/envoy/extensions/filters/http/proto_message_extraction/v3/config.proto +++ b/src/main/proto/envoy/extensions/filters/http/proto_message_logging/v3/config.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package envoy.extensions.filters.http.proto_message_extraction.v3; +package envoy.extensions.filters.http.proto_message_logging.v3; import "envoy/config/core/v3/base.proto"; @@ -8,39 +8,33 @@ import "xds/annotations/v3/status.proto"; import "udpa/annotations/status.proto"; -option java_package = "io.envoyproxy.envoy.extensions.filters.http.proto_message_extraction.v3"; +option java_package = "io.envoyproxy.envoy.extensions.filters.http.proto_message_logging.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/proto_message_extraction/v3;proto_message_extractionv3"; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/proto_message_logging/v3;proto_message_loggingv3"; option (udpa.annotations.file_status).package_version_status = ACTIVE; option (xds.annotations.v3.file_status).work_in_progress = true; -// [#protodoc-title: Proto Message Extraction] +// [#not-implemented-hide:] +// [#protodoc-title: Proto Message Logging] +// Proto Message Logging :ref:`configuration overview +// `. +// [#extension: envoy.filters.http.proto_message_logging] // -// [#extension: envoy.filters.http.proto_message_extraction] -// -// Overview -// -------- -// -// ProtoMessageExtraction filter supports extracting gRPC -// requests/responses(proto messages) into google.protobuf.Struct and storing -// results in the dynamic metadata `envoy.filters.http.proto_message_extraction` -// for later access. -// -// Assumptions -// ----------- +// ProtoMessageLogging filter supports logging scrubbed gRPC requests/responses(proto messages) +// as google.protobuf.Struct and storing results +// in the dynamic metadata `envoy.filters.http.proto_message_logging` for later access. // +// # Assumptions // This filter assumes it is only applicable for gRPC with Protobuf as payload. // -// Process Flow -// ------------ -// +// # Process Flow // On the request path, it will check // // 1. if the incoming gRPC request is configured, the filter tries to: // // a. buffer the incoming data to complete protobuf messages -// b. extract individual protobuf messages according to directives +// b. log individual protobuf messages according to directives // c. write the result into the dynamic metadata. // d. pass through the request data // @@ -51,57 +45,50 @@ option (xds.annotations.v3.file_status).work_in_progress = true; // 1. if the incoming gRPC request is configured, the filter tries to: // // a. buffer the incoming data to complete protobuf messages -// b. extract individual protobuf messages according to directives +// b. log individual protobuf messages according to directives // c. write the result into the dynamic metadata. // d. pass through the response data // // 2. otherwise, pass through the response. // -// Config Requirements -// ------------------- - +// # Config Requirements // Here are config requirements - -// 1. the extracted target field should be among the following primitive types: -// `string`, `uint32`, `uint64`, `int32`, `int64`, `sint32`, `sint64`, -// `fixed32`, `fixed64`, `sfixed32`, `sfixed64`, `float`, `double`. - +// +// 1. the log target field should be among the following primitive types: `string`, `uint32`, `uint64`, `int32`, `int64`, `sint32`, `sint64`, `fixed32`, `fixed64`, `sfixed32`, `sfixed64`, `float`, `double`. +// // 2. the target field could be repeated. - +// // 3. the intermediate type could also be repeated. - -// Output Format -// ------------- - -// The extracted requests and responses will be will be added in the dynamic -// ``metadata`` with the same layout of the message. - +// +// # Output Format +// The logged requests and responses will be will be added in the dynamic ``metadata`` with the same layout of the message. +// // For the default `FIRST_AND_LAST` mode, it will be like: - -// .. code-block:: json - // { // "requests":{ // "first":{ // "foo": "val_foo1", +// ... // } // "last":{ // "foo": "val_foo3", +// ... // } // }, // "responses":{ // "first":{ // "baz": "val_baz1", +// ... // } // "last":{ // "baz": "val_foo3", +// ... // } // } // } - -// Example for `FIRST_AND_LAST` mode -// --------------------------------- // +// +// # Example for `FIRST_AND_LAST` mode // Let's say we have the following definition for the bi-streaming request // `pkg.svc.Method`. // @@ -124,7 +111,7 @@ option (xds.annotations.v3.file_status).work_in_progress = true; // // message Msg { // string bar = 1; -// string not_extracted = 2; +// string not_logged = 2; // } // // This is the filter config in JSON. @@ -134,15 +121,15 @@ option (xds.annotations.v3.file_status).work_in_progress = true; // { // "descriptor_set":{}, // "mode": "FIRST_AND_LAST", -// "extraction_by_method":{ +// "logging_by_method":{ // "pkg.svc.Method":{ -// "request_extraction_by_field":{ -// "foo":"EXTRACT", -// "nested.doubled_nested.bar":"EXTRACT", -// "redacted":"EXTRACT_REDACT" +// "request_logging_by_field":{ +// "foo":"LOG", +// "nested.doubled_nested.bar":"LOG", +// "redacted":"LOG_REDACT" // }, -// "response_extraction_by_field":{ -// "bar":"EXTRACT", +// "response_logging_by_field":{ +// "bar":"LOG", // } // } // } @@ -155,18 +142,18 @@ option (xds.annotations.v3.file_status).work_in_progress = true; // // { // "foo": "val_foo1", -// "nested": { "double_nested": {"bar": "val_bar1", "not_extracted": -// "val_not_extracted1"}, "redacted": { "bar": "val_redacted_bar1"} +// "nested": { "double_nested": {"bar": "val_bar1", "not_logged": "val_not_logged1"}, +// "redacted": { "bar": "val_redacted_bar1"} // } // { // "foo": "val_foo2", -// "nested": { "double_nested": {"bar": "val_bar2", "not_extracted": -// "val_not_extracted2"}, "redacted": { "bar": "val_redacted_bar2"} +// "nested": { "double_nested": {"bar": "val_bar2", "not_logged": "val_not_logged2"}, +// "redacted": { "bar": "val_redacted_bar2"} // } // { // "foo": "val_foo3", -// "nested": { "double_nested": {"bar": "val_bar3", "not_extracted": -// "val_not_extracted3"}, "redacted": { "bar": "val_redacted_bar3"} +// "nested": { "double_nested": {"bar": "val_bar3", "not_logged": "val_not_logged3"}, +// "redacted": { "bar": "val_redacted_bar3"} // } // // the filter receives the following `MethodResponse` message in @@ -185,7 +172,7 @@ option (xds.annotations.v3.file_status).work_in_progress = true; // } // // The filter will write the following dynamic -// metadata(`envoy.filters.http.proto_message_extraction`) in JSON. +// metadata(`envoy.filters.http.proto_message_logging`) in JSON. // // .. code-block:: json // @@ -212,11 +199,11 @@ option (xds.annotations.v3.file_status).work_in_progress = true; // } // } -message ProtoMessageExtractionConfig { - enum ExtractMode { - ExtractMode_UNSPECIFIED = 0; +message ProtoMessageLoggingConfig { + enum LogMode { + LogMode_UNSPECIFIED = 0; - // The filter will extract the first and the last message for + // The filter will log the first and the last message for // for streaming cases, containing // client-side streaming, server-side streaming or bi-directional streaming. FIRST_AND_LAST = 1; @@ -229,41 +216,40 @@ message ProtoMessageExtractionConfig { config.core.v3.DataSource data_source = 1; // Unimplemented, the key of proto descriptor TypedMetadata. - // Among filters depending on the proto descriptor, we can have a - // TypedMetadata for proto descriptors, so that these filters can share one - // copy of proto descriptor in memory. + // Among filters depending on the proto descriptor, we can have a TypedMetadata + // for proto descriptors, so that these filters can share one copy of proto + // descriptor in memory. string proto_descriptor_typed_metadata = 2; } - ExtractMode mode = 3; + LogMode mode = 3; - // Specify the message extraction info. + // Specify the message logging info. // The key is the fully qualified gRPC method name. // ``${package}.${Service}.${Method}``, like // ``endpoints.examples.bookstore.BookStore.GetShelf`` // - // The value is the message extraction information for individual gRPC - // methods. - map extraction_by_method = 4; + // The value is the message logging information for individual gRPC methods. + map logging_by_method = 4; } // This message can be used to support per route config approach later even // though the Istio doesn't support that so far. -message MethodExtraction { - enum ExtractDirective { - ExtractDirective_UNSPECIFIED = 0; +message MethodLogging { + enum LogDirective { + LogDirective_UNSPECIFIED = 0; - // The value of this field will be extracted. - EXTRACT = 1; + // The value of this field will be logged. + LOG = 1; // It should be only annotated on Message type fields so if the field isn't - // empty, an empty Struct will be extracted. - EXTRACT_REDACT = 2; + // empty, an empty Struct will be logged. + LOG_REDACT = 2; } - // The mapping of field path to its ExtractDirective for request messages - map request_extraction_by_field = 2; + // The mapping of field path to its LogDirective for request messages + map request_logging_by_field = 2; - // The mapping of field path to its ExtractDirective for response messages - map response_extraction_by_field = 3; + // The mapping of field path to its LogDirective for response messages + map response_logging_by_field = 3; } diff --git a/src/main/proto/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto b/src/main/proto/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto index f135424..3e33536 100644 --- a/src/main/proto/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto +++ b/src/main/proto/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto @@ -3,8 +3,10 @@ syntax = "proto3"; package envoy.extensions.filters.http.ratelimit.v3; import "envoy/config/core/v3/base.proto"; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/ratelimit/v3/rls.proto"; import "envoy/config/route/v3/route_components.proto"; +import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/v3/http_status.proto"; import "google/protobuf/duration.proto"; @@ -23,7 +25,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] -// [#next-free-field: 16] +// [#next-free-field: 14] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; @@ -132,23 +134,221 @@ message RateLimit { // Optional additional prefix to use when emitting statistics. This allows to distinguish // emitted statistics between configured ``ratelimit`` filters in an HTTP filter chain. string stat_prefix = 13; +} - // If set, this will enable -- but not necessarily enforce -- the rate limit for the given - // fraction of requests. - // - // If not set then ``ratelimit.http_filter_enabled`` runtime key will be used to determine - // the fraction of requests to enforce rate limits on. And the default percentage of the - // runtime key is 100% for backwards compatibility. - config.core.v3.RuntimeFractionalPercent filter_enabled = 14; +// Global rate limiting :ref:`architecture overview `. +// Also applies to Local rate limiting :ref:`using descriptors `. +// [#not-implemented-hide:] +message RateLimitConfig { + // [#next-free-field: 10] + message Action { + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("source_cluster", "") + // + // is derived from the :option:`--service-cluster` option. + message SourceCluster { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("destination_cluster", "") + // + // Once a request matches against a route table rule, a routed cluster is determined by one of + // the following :ref:`route table configuration ` + // settings: + // + // * :ref:`cluster ` indicates the upstream cluster + // to route to. + // * :ref:`weighted_clusters ` + // chooses a cluster randomly from a set of clusters with attributed weight. + // * :ref:`cluster_header ` indicates which + // header in the request contains the target cluster. + message DestinationCluster { + } + + // The following descriptor entry is appended when a header contains a key that matches the + // ``header_name``: + // + // .. code-block:: cpp + // + // ("", "") + message RequestHeaders { + // The header name to be queried from the request headers. The header’s + // value is used to populate the value of the descriptor entry for the + // descriptor_key. + string header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The key to use in the descriptor entry. + string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when header is not present in the request. By default it skips calling the + // rate limiting service if this header is not present in the request. + bool skip_if_absent = 3; + } + + // The following descriptor entry is appended to the descriptor and is populated using the + // trusted address from :ref:`x-forwarded-for `: + // + // .. code-block:: cpp + // + // ("remote_address", "") + message RemoteAddress { + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("generic_key", "") + message GenericKey { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; + + // An optional key to use in the descriptor entry. If not set it defaults + // to 'generic_key' as the descriptor key. + string descriptor_key = 2; + } + + // The following descriptor entry is appended to the descriptor: + // + // .. code-block:: cpp + // + // ("header_match", "") + message HeaderValueMatch { + // The value to use in the descriptor entry. + string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; + + // If set to true, the action will append a descriptor entry when the + // request matches the headers. If set to false, the action will append a + // descriptor entry when the request does not match the headers. The + // default value is true. + bool expect_match = 2; + + // Specifies a set of headers that the rate limit action should match + // on. The action will check the request’s headers against all the + // specified headers in the config. A match will happen if all the + // headers in the config are present in the request with the same values + // (or based on presence if the value field is not in the config). + repeated config.route.v3.HeaderMatcher headers = 3 + [(validate.rules).repeated = {min_items: 1}]; + } + + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + // [#next-free-field: 6] + message MetaData { + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if ``metadata_key`` is empty. If not set and + // no value is present under the metadata_key then ``skip_if_absent`` is followed to + // skip calling the rate limiting service or skip the descriptor. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + + // If set to true, Envoy skips the descriptor while calling rate limiting service + // when ``metadata_key`` is empty and ``default_value`` is not set. By default it skips calling the + // rate limiting service in that case. + bool skip_if_absent = 5; + } + + oneof action_specifier { + option (validate.required) = true; + + // Rate limit on source cluster. + SourceCluster source_cluster = 1; + + // Rate limit on destination cluster. + DestinationCluster destination_cluster = 2; + + // Rate limit on request headers. + RequestHeaders request_headers = 3; + + // Rate limit on remote address. + RemoteAddress remote_address = 4; + + // Rate limit on a generic key. + GenericKey generic_key = 5; + + // Rate limit on the existence of request headers. + HeaderValueMatch header_value_match = 6; + + // Rate limit on metadata. + MetaData metadata = 8; + + // Rate limit descriptor extension. See the rate limit descriptor extensions documentation. + // [#extension-category: envoy.rate_limit_descriptors] + config.core.v3.TypedExtensionConfig extension = 9; + } + } + + message Override { + // Fetches the override from the dynamic metadata. + message DynamicMetadata { + // Metadata struct that defines the key and path to retrieve the struct value. + // The value must be a struct containing an integer "requests_per_unit" property + // and a "unit" property with a value parseable to :ref:`RateLimitUnit + // enum ` + type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; + } + + oneof override_specifier { + option (validate.required) = true; + + // Limit override from dynamic metadata. + DynamicMetadata dynamic_metadata = 1; + } + } - // If set, this will enforce the rate limit decisions for the given fraction of requests. + // Refers to the stage set in the filter. The rate limit configuration only + // applies to filters with the same stage number. The default stage number is + // 0. // - // Note: this only applies to the fraction of enabled requests. + // .. note:: // - // If not set then ``ratelimit.http_filter_enforcing`` runtime key will be used to determine - // the fraction of requests to enforce rate limits on. And the default percentage of the - // runtime key is 100% for backwards compatibility. - config.core.v3.RuntimeFractionalPercent filter_enforced = 15; + // The filter supports a range of 0 - 10 inclusively for stage numbers. + uint32 stage = 1 [(validate.rules).uint32 = {lte: 10}]; + + // The key to be set in runtime to disable this rate limit configuration. + string disable_key = 2; + + // A list of actions that are to be applied for this rate limit configuration. + // Order matters as the actions are processed sequentially and the descriptor + // is composed by appending descriptor entries in that sequence. If an action + // cannot append a descriptor entry, no descriptor is generated for the + // configuration. See :ref:`composing actions + // ` for additional documentation. + repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; + + // An optional limit override to be appended to the descriptor produced by this + // rate limit configuration. If the override value is invalid or cannot be resolved + // from metadata, no override is provided. See :ref:`rate limit override + // ` for more information. + Override limit = 4; } message RateLimitPerRoute { @@ -188,24 +388,11 @@ message RateLimitPerRoute { // [#not-implemented-hide:] OverrideOptions override_option = 2 [(validate.rules).enum = {defined_only: true}]; - // Rate limit configuration that is used to generate a list of descriptor entries based on - // the request context. The generated entries will be used to find one or multiple matched rate - // limit rule from the ``descriptors``. - // If this is set, then + // Rate limit configuration. If not set, uses the // :ref:`VirtualHost.rate_limits` or - // :ref:`RouteAction.rate_limits` fields - // will be ignored. - // - // .. note:: - // Not all configuration fields of - // :ref:`rate limit config ` is supported at here. - // Following fields are not supported: - // - // 1. :ref:`rate limit stage `. - // 2. :ref:`dynamic metadata `. - // 3. :ref:`disable_key `. - // 4. :ref:`override limit `. - repeated config.route.v3.RateLimit rate_limits = 3; + // :ref:`RouteAction.rate_limits` fields instead. + // [#not-implemented-hide:] + repeated RateLimitConfig rate_limits = 3; // Overrides the domain. If not set, uses the filter-level domain instead. string domain = 4; diff --git a/src/main/proto/envoy/extensions/filters/http/rbac/v3/rbac.proto b/src/main/proto/envoy/extensions/filters/http/rbac/v3/rbac.proto index 6efd47a..649869a 100644 --- a/src/main/proto/envoy/extensions/filters/http/rbac/v3/rbac.proto +++ b/src/main/proto/envoy/extensions/filters/http/rbac/v3/rbac.proto @@ -27,53 +27,48 @@ message RBAC { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rbac.v2.RBAC"; - // The primary RBAC policy which will be applied globally, to all the incoming requests. - // - // * If absent, no RBAC enforcement occurs. - // * If set but empty, all requests are denied. - // - // .. note:: - // - // When both ``rules`` and ``matcher`` are configured, ``rules`` will be ignored. - // + // Specify the RBAC rules to be applied globally. + // If absent, no enforcing RBAC policy will be applied. + // If present and empty, DENY. + // If both rules and matcher are configured, rules will be ignored. config.rbac.v3.RBAC rules = 1 [(udpa.annotations.field_migrate).oneof_promotion = "rules_specifier"]; // If specified, rules will emit stats with the given prefix. - // This is useful for distinguishing metrics when multiple RBAC filters are configured. + // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with + // rules. string rules_stat_prefix = 6; - // Match tree for evaluating RBAC actions on incoming requests. Requests not matching any matcher will be denied. - // - // * If absent, no RBAC enforcement occurs. - // * If set but empty, all requests are denied. - // - xds.type.matcher.v3.Matcher matcher = 4 - [(udpa.annotations.field_migrate).oneof_promotion = "rules_specifier"]; + // The match tree to use when resolving RBAC action for incoming requests. Requests do not + // match any matcher will be denied. + // If absent, no enforcing RBAC matcher will be applied. + // If present and empty, deny all requests. + xds.type.matcher.v3.Matcher matcher = 4 [ + (udpa.annotations.field_migrate).oneof_promotion = "rules_specifier", + (xds.annotations.v3.field_status).work_in_progress = true + ]; - // Shadow policy for testing RBAC rules without enforcing them. These rules generate stats and logs but do not deny - // requests. If absent, no shadow RBAC policy will be applied. - // - // .. note:: - // - // When both ``shadow_rules`` and ``shadow_matcher`` are configured, ``shadow_rules`` will be ignored. - // + // Shadow rules are not enforced by the filter (i.e., returning a 403) + // but will emit stats and logs and can be used for rule testing. + // If absent, no shadow RBAC policy will be applied. + // If both shadow rules and shadow matcher are configured, shadow rules will be ignored. config.rbac.v3.RBAC shadow_rules = 2 [(udpa.annotations.field_migrate).oneof_promotion = "shadow_rules_specifier"]; + // The match tree to use for emitting stats and logs which can be used for rule testing for + // incoming requests. // If absent, no shadow matcher will be applied. - // Match tree for testing RBAC rules through stats and logs without enforcing them. - // If absent, no shadow matching occurs. xds.type.matcher.v3.Matcher shadow_matcher = 5 [ (udpa.annotations.field_migrate).oneof_promotion = "shadow_rules_specifier", (xds.annotations.v3.field_status).work_in_progress = true ]; // If specified, shadow rules will emit stats with the given prefix. - // This is useful for distinguishing metrics when multiple RBAC filters use shadow rules. + // This is useful to distinguish the stat when there are more than 1 RBAC filter configured with + // shadow rules. string shadow_rules_stat_prefix = 3; - // If ``track_per_rule_stats`` is ``true``, counters will be published for each rule and shadow rule. + // If track_per_rule_stats is true, counters will be published for each rule and shadow rule. bool track_per_rule_stats = 7; } @@ -83,7 +78,7 @@ message RBACPerRoute { reserved 1; - // Per-route specific RBAC configuration that overrides the global RBAC configuration. - // If absent, RBAC policy will be disabled for this route. + // Override the global configuration of the filter with this new config. + // If absent, the global RBAC policy will be disabled for this route. RBAC rbac = 2; } diff --git a/src/main/proto/envoy/extensions/filters/http/router/v3/router.proto b/src/main/proto/envoy/extensions/filters/http/router/v3/router.proto index d3996a9..75bca96 100644 --- a/src/main/proto/envoy/extensions/filters/http/router/v3/router.proto +++ b/src/main/proto/envoy/extensions/filters/http/router/v3/router.proto @@ -119,11 +119,11 @@ message Router { // for more details. bool suppress_grpc_request_failure_code_stats = 7; - // Optional HTTP filters for the upstream HTTP filter chain. - // // .. note:: // Upstream HTTP filters are currently in alpha. // + // Optional HTTP filters for the upstream HTTP filter chain. + // // These filters will be applied for all requests that pass through the router. // They will also be applied to shadowed requests. // Upstream HTTP filters cannot change route or cluster. diff --git a/src/main/proto/envoy/extensions/filters/http/stateful_session/v3/stateful_session.proto b/src/main/proto/envoy/extensions/filters/http/stateful_session/v3/stateful_session.proto index 5cef3fc..aa07083 100644 --- a/src/main/proto/envoy/extensions/filters/http/stateful_session/v3/stateful_session.proto +++ b/src/main/proto/envoy/extensions/filters/http/stateful_session/v3/stateful_session.proto @@ -18,16 +18,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#extension: envoy.filters.http.stateful_session] message StatefulSession { - // Specifies the implementation of session state. This session state is used to store and retrieve the address of the - // upstream host assigned to the session. + // Specific implementation of session state. This session state will be used to store and + // get address of the upstream host to which the session is assigned. // // [#extension-category: envoy.http.stateful_session] config.core.v3.TypedExtensionConfig session_state = 1; - // Determines whether the HTTP request must be strictly routed to the requested destination. When set to ``true``, - // if the requested destination is unavailable, Envoy will return a 503 status code. The default value is ``false``, - // which allows Envoy to fall back to its load balancing mechanism. In this case, if the requested destination is not - // found, the request will be routed according to the load balancing algorithm. + // If set to True, the HTTP request must be routed to the requested destination. + // If the requested destination is not available, Envoy returns 503. Defaults to False. bool strict = 2; } diff --git a/src/main/proto/envoy/extensions/filters/network/ext_proc/v3/ext_proc.proto b/src/main/proto/envoy/extensions/filters/network/ext_proc/v3/ext_proc.proto deleted file mode 100644 index d79f80c..0000000 --- a/src/main/proto/envoy/extensions/filters/network/ext_proc/v3/ext_proc.proto +++ /dev/null @@ -1,84 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.filters.network.ext_proc.v3; - -import "envoy/config/core/v3/grpc_service.proto"; - -import "google/protobuf/duration.proto"; - -import "xds/annotations/v3/status.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_proc.v3"; -option java_outer_classname = "ExtProcProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/ext_proc/v3;ext_procv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; -option (xds.annotations.v3.file_status).work_in_progress = true; - -// [#protodoc-title: External Processing Filter] -// External Processing Filter: Process network traffic using an external service. -// [#extension: envoy.filters.network.ext_proc] - -// The Network External Processing filter allows an external service to process raw TCP/UDP traffic -// in a flexible way using a bidirectional gRPC stream. Unlike the HTTP External Processing filter, -// this filter operates at the L4 (transport) layer, giving access to raw network traffic. -// -// The filter communicates with an external gRPC service that can: -// * Inspect traffic in both directions -// * Modify the network traffic -// * Control connection lifecycle (continue, close, or reset) -// -// By using the filter's processing mode, you can selectively choose which data -// directions to process (read, write or both), allowing for efficient processing. -message NetworkExternalProcessor { - // The gRPC service that will process network traffic. - // This service must implement the NetworkExternalProcessor service - // defined in the proto file /envoy/service/network_ext_proc/v3/external_processor.proto. - config.core.v3.GrpcService grpc_service = 1; - - // By default, if the gRPC stream cannot be established, or if it is closed - // prematurely with an error, the filter will fail, leading to the close of connection. - // With this parameter set to true, however, then if the gRPC stream is prematurely closed - // or could not be opened, processing continues without error. - // [#not-implemented-hide:] - bool failure_mode_allow = 2; - - // Options for controlling processing behavior. - // [#not-implemented-hide:] - ProcessingMode processing_mode = 3; - - // Specifies the timeout for each individual message sent on the stream and - // when the filter is running in synchronous mode. Whenever - // the proxy sends a message on the stream that requires a response, it will - // reset this timer, and will stop processing and return an error (subject - // to the processing mode) if the timer expires. Default is 200 ms. - // [#not-implemented-hide:] - google.protobuf.Duration message_timeout = 4 [(validate.rules).duration = { - lte {seconds: 3600} - gte {} - }]; -} - -// Options for controlling processing behavior. -// Filter will reject the config if both read and write are SKIP mode. -message ProcessingMode { - // Defines how traffic should be handled by the external processor. - enum DataSendMode { - // Send the data to the external processor for processing whenever the data is ready. - STREAMED = 0; - - // Skip sending the data to the external processor. - SKIP = 1; - } - - // Controls whether inbound (read) data from the client is sent to the external processor. - // Default: STREAMED - DataSendMode process_read = 1; - - // Controls whether outbound (write) data to the client is sent to the external processor. - // Default: STREAMED - DataSendMode process_write = 2; -} diff --git a/src/main/proto/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/src/main/proto/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index ce549d6..434f977 100644 --- a/src/main/proto/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/src/main/proto/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -37,7 +37,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] -// [#next-free-field: 59] +// [#next-free-field: 58] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; @@ -58,8 +58,9 @@ message HttpConnectionManager { // Prior knowledge is allowed). HTTP2 = 2; - // The connection manager will assume that the client is speaking HTTP/3. - // This needs to be consistent with listener and transport socket config. + // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with + // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient + // to distinguish HTTP1 and HTTP2 traffic. HTTP3 = 3; } @@ -185,6 +186,14 @@ message HttpConnectionManager { // Configuration for an external tracing provider. // If not specified, no tracing will be performed. + // + // .. attention:: + // Please be aware that ``envoy.tracers.opencensus`` provider can only be configured once + // in Envoy lifetime. + // Any attempts to reconfigure it or to use different configurations for different HCM filters + // will be rejected. + // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes + // on OpenCensus side. config.trace.v3.Tracing.Http provider = 9; // Create separate tracing span for each upstream request if true. And if this flag is set to true, @@ -438,21 +447,6 @@ message HttpConnectionManager { config.core.v3.HttpProtocolOptions common_http_protocol_options = 35 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; - // If set to true, Envoy will not start a drain timer for downstream HTTP1 connections after - // :ref:`common_http_protocol_options.max_connection_duration - // ` passes. - // Instead, Envoy will wait for the next downstream request, add connection:close to the response - // headers, then close the connection after the stream ends. - // - // This behavior is compliant with `RFC 9112 section 9.6 `_ - // - // If set to false, ``max_connection_duration`` will cause Envoy to enter the normal drain - // sequence for HTTP1 with Envoy eventually closing the connection (once there are no active - // streams). - // - // Has no effect if ``max_connection_duration`` is unset. Defaults to false. - bool http1_safe_max_connection_duration = 58; - // Additional HTTP/1 settings that are passed to the HTTP/1 codec. // [#comment:TODO: The following fields are ignored when the // :ref:`header validation configuration ` @@ -465,6 +459,7 @@ message HttpConnectionManager { [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/3 settings that are passed directly to the HTTP/3 codec. + // [#not-implemented-hide:] config.core.v3.Http3ProtocolOptions http3_protocol_options = 44; // An optional override that the connection manager will write to the server @@ -485,12 +480,7 @@ message HttpConnectionManager { // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. - // The default value can be overridden by setting runtime key ``envoy.reloadable_features.max_request_headers_size_kb``. // Requests that exceed this limit will receive a 431 response. - // - // Note: currently some protocol codecs impose limits on the maximum size of a single header: - // HTTP/2 (when using nghttp2) limits a single header to around 100kb. - // HTTP/3 limits a single header to around 1024kb. google.protobuf.UInt32Value max_request_headers_kb = 29 [(validate.rules).uint32 = {lte: 8192 gt: 0}]; @@ -599,33 +589,26 @@ message HttpConnectionManager { // emitted by the connection manager. repeated config.accesslog.v3.AccessLog access_log = 13; - // The interval to flush the above access logs. - // // .. attention:: - // - // This field is deprecated in favor of - // :ref:`access_log_flush_interval - // `. - // Note that if both this field and :ref:`access_log_flush_interval - // ` - // are specified, the former (deprecated field) is ignored. + // This field is deprecated in favor of + // :ref:`access_log_flush_interval + // `. + // Note that if both this field and :ref:`access_log_flush_interval + // ` + // are specified, the former (deprecated field) is ignored. google.protobuf.Duration access_log_flush_interval = 54 [ deprecated = true, (validate.rules).duration = {gte {nanos: 1000000}}, (envoy.annotations.deprecated_at_minor_version) = "3.0" ]; - // If set to true, HCM will flush an access log once when a new HTTP request is received, after the request - // headers have been evaluated, and before iterating through the HTTP filter chain. - // // .. attention:: - // - // This field is deprecated in favor of - // :ref:`flush_access_log_on_new_request - // `. - // Note that if both this field and :ref:`flush_access_log_on_new_request - // ` - // are specified, the former (deprecated field) is ignored. + // This field is deprecated in favor of + // :ref:`flush_access_log_on_new_request + // `. + // Note that if both this field and :ref:`flush_access_log_on_new_request + // ` + // are specified, the former (deprecated field) is ignored. bool flush_access_log_on_new_request = 55 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; @@ -683,7 +666,7 @@ message HttpConnectionManager { // information about internal/external addresses. // // .. warning:: - // As of Envoy 1.33.0 no IP addresses will be considered trusted. If you have tooling such as probes + // In the next release, no IP addresses will be considered trusted. If you have tooling such as probes // on your private network which need to be treated as trusted (e.g. changing arbitrary x-envoy headers) // you will have to manually include those addresses or CIDR ranges like: // diff --git a/src/main/proto/envoy/extensions/filters/network/rbac/v3/rbac.proto b/src/main/proto/envoy/extensions/filters/network/rbac/v3/rbac.proto index 9032a65..823e182 100644 --- a/src/main/proto/envoy/extensions/filters/network/rbac/v3/rbac.proto +++ b/src/main/proto/envoy/extensions/filters/network/rbac/v3/rbac.proto @@ -4,8 +4,6 @@ package envoy.extensions.filters.network.rbac.v3; import "envoy/config/rbac/v3/rbac.proto"; -import "google/protobuf/duration.proto"; - import "xds/annotations/v3/status.proto"; import "xds/type/matcher/v3/matcher.proto"; @@ -28,7 +26,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // // Header should not be used in rules/shadow_rules in RBAC network filter as // this information is only available in :ref:`RBAC http filter `. -// [#next-free-field: 9] +// [#next-free-field: 8] message RBAC { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.rbac.v2.RBAC"; @@ -89,10 +87,4 @@ message RBAC { // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to // CONTINUOUS to enforce RBAC policies on every message boundary. EnforcementType enforcement_type = 4; - - // Delay the specified duration before closing the connection when the policy evaluation - // result is ``DENY``. If this is not present, the connection will be closed immediately. - // This is useful to provide a better protection for Envoy against clients that retries - // aggressively when the connection is rejected by the RBAC filter. - google.protobuf.Duration delay_deny = 8; } diff --git a/src/main/proto/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto b/src/main/proto/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto index 21c87a1..28e351f 100644 --- a/src/main/proto/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto +++ b/src/main/proto/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto @@ -3,7 +3,6 @@ syntax = "proto3"; package envoy.extensions.filters.network.redis_proxy.v3; import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/grpc_service.proto"; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; import "google/protobuf/duration.proto"; @@ -26,7 +25,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] -// [#next-free-field: 12] +// [#next-free-field: 10] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; @@ -354,22 +353,6 @@ message RedisProxy { // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no ACL is set" error will be returned. config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true]; - - // External authentication configuration. If set, instead of validating username and password against ``downstream_auth_username`` and ``downstream_auth_password``, - // the filter will call an external gRPC service to authenticate the client. - // A typical usage of this feature is for situations where the password is a one-time token that needs to be validated against a remote service, like a sidecar. - // Expiration is also supported, which will disable any further commands from the client after the expiration time, unless a new AUTH command is received and the external auth service returns a new expiration time. - // If the external auth service returns an error, authentication is considered failed. - // If this setting is set together with ``downstream_auth_username`` and ``downstream_auth_password``, the external auth service will be source of truth, but those fields will still be used for downstream authentication to the cluster. - // The API is defined by :ref:`RedisProxyExternalAuthRequest `. - RedisExternalAuthProvider external_auth_provider = 10; - - // Optional configure redis custom commands for the proxy, eg -> ["my_custom_cmd1", "my_custom_cmd2"] - // - // .. note:: - // The is to support redis's feature wherein new commands can be added using redis' modules api: - // https://redis.io/docs/latest/develop/reference/modules/ - repeated string custom_commands = 11; } // RedisProtocolOptions specifies Redis upstream protocol options. This object is used in @@ -387,22 +370,3 @@ message RedisProtocolOptions { // ``_ in the server's configuration file. config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true]; } - -// RedisExternalAuthProvider specifies a gRPC service that can be used to authenticate Redis clients. -// This service will be called every time an AUTH command is received from a client. -// If the service returns an error, authentication is considered failed. -// If the service returns a success, the client is considered authenticated. -// The service can also return an expiration timestamp, which will be used to disable any further -// commands from the client after it passes, unless a new AUTH command is received and the -// external auth service returns a new expiration timestamp. -message RedisExternalAuthProvider { - // External auth gRPC service configuration. - // It will be called every time an AUTH command is received from a client. - config.core.v3.GrpcService grpc_service = 1; - - // If set to true, the filter will expect an expiration timestamp in the response from the external - // auth service. This timestamp will be used to disable any further commands from the client after - // the expiration time, unless a new AUTH command is received and the external auth service returns - // a new expiration timestamp. - bool enable_auth_expiration = 2; -} diff --git a/src/main/proto/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto b/src/main/proto/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto index 0f01889..1ab471e 100644 --- a/src/main/proto/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto +++ b/src/main/proto/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3/sni_dynamic_forward_proxy.proto @@ -33,10 +33,4 @@ message FilterConfig { // The port number to connect to the upstream. uint32 port_value = 2 [(validate.rules).uint32 = {lte: 65535 gt: 0}]; } - - // When this flag is set, the filter will add the resolved upstream address in the filter - // state. The state should be saved with key - // ``envoy.stream.upstream_address`` (See - // :repo:`upstream_address.h`). - bool save_upstream_address = 3; } diff --git a/src/main/proto/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto b/src/main/proto/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto index f4d57c9..b0f7602 100644 --- a/src/main/proto/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto +++ b/src/main/proto/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto @@ -3,10 +3,8 @@ syntax = "proto3"; package envoy.extensions.filters.network.tcp_proxy.v3; import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/core/v3/backoff.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; -import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/v3/hash_policy.proto"; import "google/protobuf/duration.proto"; @@ -27,7 +25,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // TCP Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.tcp_proxy] -// [#next-free-field: 20] +// [#next-free-field: 18] message TcpProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.tcp_proxy.v2.TcpProxy"; @@ -177,9 +175,9 @@ message TcpProxy { // :ref:`TcpProxy.weighted_clusters `. OnDemand on_demand = 14; - // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints - // in the upstream cluster with metadata matching what is set in this field will be considered - // for load balancing. The filter name should be specified as ``envoy.lb``. + // Optional endpoint metadata match criteria. Only endpoints in the upstream + // cluster with metadata matching that set in metadata_match will be + // considered. The filter name should be specified as ``envoy.lb``. config.core.v3.Metadata metadata_match = 9; // The idle timeout for connections managed by the TCP proxy filter. The idle timeout @@ -213,9 +211,6 @@ message TcpProxy { // giving up. If the parameter is not specified, 1 connection attempt will be made. google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; - // Sets the backoff strategy. If not set, the retries are performed without backoff. - config.core.v3.BackoffStrategy backoff_options = 18; - // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based // load balancing algorithms will select a host randomly. Currently the number of hash policies is // limited to 1. @@ -233,47 +228,29 @@ message TcpProxy { google.protobuf.Duration max_downstream_connection_duration = 13 [(validate.rules).duration = {gte {nanos: 1000000}}]; + // .. attention:: + // This field is deprecated in favor of + // :ref:`access_log_flush_interval + // `. // Note that if both this field and :ref:`access_log_flush_interval // ` // are specified, the former (deprecated field) is ignored. - // - // .. attention:: - // This field is deprecated in favor of - // :ref:`access_log_flush_interval - // `. google.protobuf.Duration access_log_flush_interval = 15 [ deprecated = true, (validate.rules).duration = {gte {nanos: 1000000}}, (envoy.annotations.deprecated_at_minor_version) = "3.0" ]; + // .. attention:: + // This field is deprecated in favor of + // :ref:`flush_access_log_on_connected + // `. // Note that if both this field and :ref:`flush_access_log_on_connected // ` // are specified, the former (deprecated field) is ignored. - // - // .. attention:: - // This field is deprecated in favor of - // :ref:`flush_access_log_on_connected - // `. bool flush_access_log_on_connected = 16 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Additional access log options for TCP Proxy. TcpAccessLogOptions access_log_options = 17; - - // If set, the specified PROXY protocol TLVs (Type-Length-Value) will be added to the PROXY protocol - // state created by the TCP proxy filter. These TLVs will be sent in the PROXY protocol v2 header - // to upstream. - // - // This field only takes effect when the TCP proxy filter is creating new PROXY protocol - // state and there is an upstream proxy protocol transport socket configured in the cluster. - // If the connection already contains PROXY protocol state (including any TLVs) parsed by a - // downstream proxy protocol listener filter, the TLVs specified here are ignored. - // - // .. note:: - // To ensure specified TLVs are allowed in the upstream PROXY protocol header, you must also - // configure the passthrough TLVs on the upstream proxy protocol transport. See - // :ref:`core.v3.ProxyProtocolConfig.pass_through_tlvs ` - // for details. - repeated config.core.v3.TlvEntry proxy_protocol_tlvs = 19; } diff --git a/src/main/proto/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto b/src/main/proto/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto index 6de0c1e..1d07668 100644 --- a/src/main/proto/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto +++ b/src/main/proto/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto @@ -3,9 +3,7 @@ syntax = "proto3"; package envoy.extensions.filters.udp.udp_proxy.v3; import "envoy/config/accesslog/v3/accesslog.proto"; -import "envoy/config/core/v3/backoff.proto"; import "envoy/config/core/v3/base.proto"; -import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/udp_socket_config.proto"; import "google/protobuf/any.proto"; @@ -63,11 +61,6 @@ message UdpProxyConfig { // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. google.protobuf.Any typed_config = 2; - - // Configuration source specifier for an extension configuration discovery - // service. In case of a failure and without the default configuration, the - // UDP session will be removed. - config.core.v3.ExtensionConfigSource config_discovery = 3; } } @@ -92,9 +85,6 @@ message UdpProxyConfig { // The maximum number of unsuccessful connection attempts that will be made before giving up. // If the parameter is not specified, 1 connection attempt will be made. google.protobuf.UInt32Value max_connect_attempts = 1; - - // Sets the backoff strategy. If not set, the retries are performed without backoff. - config.core.v3.BackoffStrategy backoff_options = 2; } // The hostname to send in the synthesized CONNECT headers to the upstream proxy. diff --git a/src/main/proto/envoy/extensions/formatter/cel/v3/cel.proto b/src/main/proto/envoy/extensions/formatter/cel/v3/cel.proto index 265f9dd..4e19fa5 100644 --- a/src/main/proto/envoy/extensions/formatter/cel/v3/cel.proto +++ b/src/main/proto/envoy/extensions/formatter/cel/v3/cel.proto @@ -31,9 +31,5 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // * ``%CEL(request.headers['x-log-mtls'] || request.url_path.contains('v1beta3'))%`` // Configuration for the CEL formatter. -// -// .. warning:: -// This extension is treated as built-in extension and will be enabled by default now. -// It is unnecessary to configure this extension. message Cel { } diff --git a/src/main/proto/envoy/extensions/formatter/metadata/v3/metadata.proto b/src/main/proto/envoy/extensions/formatter/metadata/v3/metadata.proto index 816a6be..a9c477a 100644 --- a/src/main/proto/envoy/extensions/formatter/metadata/v3/metadata.proto +++ b/src/main/proto/envoy/extensions/formatter/metadata/v3/metadata.proto @@ -56,9 +56,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // METADATA(DYNAMIC:NAMESPACE:KEY):Z is equivalent to :ref:`DYNAMIC_METADATA(NAMESPACE:KEY):Z` // METADATA(CLUSTER:NAMESPACE:KEY):Z is equivalent to :ref:`CLUSTER_METADATA(NAMESPACE:KEY):Z` // METADATA(UPSTREAM_HOST:NAMESPACE:KEY):Z is equivalent to :ref:`UPSTREAM_METADATA(NAMESPACE:KEY):Z` -// -// .. warning:: -// This extension is treated as built-in extension and will be enabled by default now. -// It is unnecessary to configure this extension. + message Metadata { } diff --git a/src/main/proto/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto b/src/main/proto/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto index db82293..de8bf98 100644 --- a/src/main/proto/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto +++ b/src/main/proto/envoy/extensions/formatter/req_without_query/v3/req_without_query.proto @@ -25,10 +25,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // the HTTP request header named X first and if it's not set, then request header Y is used. If // none of the headers are present '-' symbol will be in the log. -// .. warning:: -// Please use %PATH% to replace this extension. -// See :ref:`access log formats ` for more details. - // Configuration for the request without query formatter. message ReqWithoutQuery { } diff --git a/src/main/proto/envoy/extensions/geoip_providers/common/v3/common.proto b/src/main/proto/envoy/extensions/geoip_providers/common/v3/common.proto index e289751..91a9126 100644 --- a/src/main/proto/envoy/extensions/geoip_providers/common/v3/common.proto +++ b/src/main/proto/envoy/extensions/geoip_providers/common/v3/common.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.extensions.geoip_providers.common.v3; -import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -19,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message CommonGeoipProviderConfig { // The set of geolocation headers to add to request. If any of the configured headers is present // in the incoming request, it will be overridden by the :ref:`Geoip filter `. - // [#next-free-field: 13] + // [#next-free-field: 10] message GeolocationHeadersToAdd { // If set, the header will be used to populate the country ISO code associated with the IP address. string country = 1 @@ -38,16 +37,9 @@ message CommonGeoipProviderConfig { string asn = 4 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; - // This field is being deprecated, use ``anon`` instead. - string is_anon = 5 [ - deprecated = true, - (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}, - (envoy.annotations.deprecated_at_minor_version) = "3.0" - ]; - // If set, the IP address will be checked if it belongs to any type of anonymization network (e.g. VPN, public proxy etc) // and header will be populated with the check result. Header value will be set to either "true" or "false" depending on the check result. - string anon = 12 + string is_anon = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; // If set, the IP address will be checked if it belongs to a VPN and header will be populated with the check result. @@ -69,15 +61,6 @@ message CommonGeoipProviderConfig { // Header value will be set to either "true" or "false" depending on the check result. string anon_proxy = 9 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; - - // If set, the header will be used to populate the ISP associated with the IP address. - string isp = 10 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; - - // If set, the IP address will be checked if it belongs to the ISP named iCloud Private Relay and header will be populated with the check result. - // Header value will be set to either "true" or "false" depending on the check result. - string apple_private_relay = 11 - [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}]; } // Configuration for geolocation headers to add to request. diff --git a/src/main/proto/envoy/extensions/geoip_providers/maxmind/v3/maxmind.proto b/src/main/proto/envoy/extensions/geoip_providers/maxmind/v3/maxmind.proto index c83f9b5..3fc7f7c 100644 --- a/src/main/proto/envoy/extensions/geoip_providers/maxmind/v3/maxmind.proto +++ b/src/main/proto/envoy/extensions/geoip_providers/maxmind/v3/maxmind.proto @@ -20,11 +20,9 @@ option (xds.annotations.v3.file_status).work_in_progress = true; // MaxMind geolocation provider :ref:`configuration overview `. // At least one geolocation database path :ref:`city_db_path `, // :ref:`isp_db_path ` or -// :ref:`asn_db_path ` or // :ref:`anon_db_path ` must be configured. // [#extension: envoy.geoip_providers.maxmind] -// [#next-free-field: 6] message MaxMindConfig { // Full file path to the Maxmind city database, e.g. /etc/GeoLite2-City.mmdb. // Database file is expected to have .mmdb extension. @@ -32,16 +30,12 @@ message MaxMindConfig { // Full file path to the Maxmind ASN database, e.g. /etc/GeoLite2-ASN.mmdb. // Database file is expected to have .mmdb extension. - string asn_db_path = 2 [(validate.rules).string = {pattern: "^$|^.*\\.mmdb$"}]; + string isp_db_path = 2 [(validate.rules).string = {pattern: "^$|^.*\\.mmdb$"}]; // Full file path to the Maxmind anonymous IP database, e.g. /etc/GeoIP2-Anonymous-IP.mmdb. // Database file is expected to have .mmdb extension. string anon_db_path = 3 [(validate.rules).string = {pattern: "^$|^.*\\.mmdb$"}]; - // Full file path to the Maxmind ISP database, e.g. /etc/GeoLite2-ISP.mmdb. - // Database file is expected to have .mmdb extension. - string isp_db_path = 5 [(validate.rules).string = {pattern: "^$|^.*\\.mmdb$"}]; - // Common provider configuration that specifies which geolocation headers will be populated with geolocation data. common.v3.CommonGeoipProviderConfig common_provider_config = 4 [(validate.rules).message = {required: true}]; diff --git a/src/main/proto/envoy/extensions/http/ext_proc/response_processors/save_processing_response/v3/save_processing_response.proto b/src/main/proto/envoy/extensions/http/ext_proc/response_processors/save_processing_response/v3/save_processing_response.proto deleted file mode 100644 index 87f7b10..0000000 --- a/src/main/proto/envoy/extensions/http/ext_proc/response_processors/save_processing_response/v3/save_processing_response.proto +++ /dev/null @@ -1,67 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.http.ext_proc.response_processors.save_processing_response.v3; - -import "xds/annotations/v3/status.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.http.ext_proc.response_processors.save_processing_response.v3"; -option java_outer_classname = "SaveProcessingResponseProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/http/ext_proc/response_processors/save_processing_response/v3;save_processing_responsev3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; -option (xds.annotations.v3.file_status).work_in_progress = true; - -// [#protodoc-title: Save Processing Response from external processor.] -// [#extension: envoy.http.ext_proc.response_processors.save_processing_response] - -// Extension to save the :ref:`response -// ` from the external processor as -// filter state with name -// "envoy.http.ext_proc.response_processors.save_processing_response[.:ref:`filter_state_name_suffix -// `]. -// This extension supports saving of request and response headers and trailers, -// and immediate response. -// -// .. note:: -// Response processors are currently in alpha. -// -// [#next-free-field: 7] -message SaveProcessingResponse { - message SaveOptions { - // Whether or not to save the response for the response type. - bool save_response = 1; - - // When true, saves the response if there was an error when processing - // the response from the external processor. - bool save_on_error = 2; - } - - // The default filter state name is - // "envoy.http.ext_proc.response_processors.save_processing_response". - // If defined, ``filter_state_name_suffix`` is appended to this. - // For example, setting ``filter_state_name_suffix`` to "xyz" will set the - // filter state name to "envoy.http.ext_proc.response_processors.save_processing_response.xyz" - string filter_state_name_suffix = 1; - - // Save the response to filter state when :ref:`request_headers - // ` is set. - SaveOptions save_request_headers = 2; - - // Save the response to filter state when :ref:`response_headers - // ` is set. - SaveOptions save_response_headers = 3; - - // Save the response to filter state when :ref:`request_trailers - // ` is set. - SaveOptions save_request_trailers = 4; - - // Save the response to filter state when :ref:`response_trailers - // ` is set. - SaveOptions save_response_trailers = 5; - - // Save the response to filter state when :ref:`immediate_response - // ` is set. - SaveOptions save_immediate_response = 6; -} diff --git a/src/main/proto/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto b/src/main/proto/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto index d1dd5f0..b09b6f3 100644 --- a/src/main/proto/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto +++ b/src/main/proto/envoy/extensions/http/original_ip_detection/xff/v3/xff.proto @@ -2,10 +2,6 @@ syntax = "proto3"; package envoy.extensions.http.original_ip_detection.xff.v3; -import "envoy/config/core/v3/address.proto"; - -import "google/protobuf/wrappers.proto"; - import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.http.original_ip_detection.xff.v3"; @@ -26,45 +22,5 @@ message XffConfig { // determining the origin client's IP address. The default is zero if this option // is not specified. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. - // - // Only one of ``xff_num_trusted_hops`` and ``xff_trusted_cidrs`` can be set. uint32 xff_num_trusted_hops = 1; - - // The `CIDR `_ ranges to trust when - // evaluating the remote IP address to determine the original client's IP address. - // This is used instead of - // :ref:`use_remote_address `. - // When the remote IP address matches a trusted CIDR and the - // :ref:`config_http_conn_man_headers_x-forwarded-for` header was sent, each entry - // in the ``x-forwarded-for`` header is evaluated from right to left and the first - // public non-trusted address is used as the original client address. If all - // addresses in ``x-forwarded-for`` are within the trusted list, the first (leftmost) - // entry is used. - // - // This is typically used when requests are proxied by a - // `CDN `_. - // - // Only one of ``xff_num_trusted_hops`` and ``xff_trusted_cidrs`` can be set. - XffTrustedCidrs xff_trusted_cidrs = 2; - - // If set, Envoy will not append the remote address to the - // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. - // - // .. attention:: - // - // For proper proxy behaviour it is not recommended to set this option. - // For backwards compatibility, if this option is unset it defaults to true. - // - // This only applies when :ref:`use_remote_address - // ` - // is false, otherwise :ref:`skip_xff_append - // ` - // applies. - google.protobuf.BoolValue skip_xff_append = 3; -} - -message XffTrustedCidrs { - // The list of `CIDRs `_ from which remote - // connections are considered trusted. - repeated config.core.v3.CidrRange cidrs = 1; } diff --git a/src/main/proto/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto b/src/main/proto/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto index 9520f6d..c70360a 100644 --- a/src/main/proto/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto +++ b/src/main/proto/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3/client_side_weighted_round_robin.proto @@ -15,7 +15,7 @@ option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/loa option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Client-Side Weighted Round Robin Load Balancing Policy] -// [#extension: envoy.load_balancing_policies.client_side_weighted_round_robin] +// [#not-implemented-hide:] // Configuration for the client_side_weighted_round_robin LB policy. // @@ -30,12 +30,11 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // regardless of result. Only failed queries count toward eps. A config // parameter error_utilization_penalty controls the penalty to adjust endpoint // weights using eps and qps. The weight of a given endpoint is computed as: -// ``qps / (utilization + eps/qps * error_utilization_penalty)``. +// qps / (utilization + eps/qps * error_utilization_penalty) // -// See the :ref:`load balancing architecture -// overview` for more information. +// See the :ref:`load balancing architecture overview` for more information. // -// [#next-free-field: 8] +// [#next-free-field: 7] message ClientSideWeightedRoundRobin { // Whether to enable out-of-band utilization reporting collection from // the endpoints. By default, per-request utilization reporting is used. @@ -69,10 +68,4 @@ message ClientSideWeightedRoundRobin { // calculated as eps/qps. Configuration is rejected if this value is negative. // Default is 1.0. google.protobuf.FloatValue error_utilization_penalty = 6 [(validate.rules).float = {gte: 0.0}]; - - // By default, endpoint weight is computed based on the :ref:`application_utilization ` field reported by the endpoint. - // If that field is not set, then utilization will instead be computed by taking the max of the values of the metrics specified here. - // For map fields in the ORCA proto, the string will be of the form ``.``. For example, the string ``named_metrics.foo`` will mean to look for the key ``foo`` in the ORCA :ref:`named_metrics ` field. - // If none of the specified metrics are present in the load report, then :ref:`cpu_utilization ` is used instead. - repeated string metric_names_for_computing_utilization = 7; } diff --git a/src/main/proto/envoy/extensions/load_balancing_policies/common/v3/common.proto b/src/main/proto/envoy/extensions/load_balancing_policies/common/v3/common.proto index 7addeb7..5152069 100644 --- a/src/main/proto/envoy/extensions/load_balancing_policies/common/v3/common.proto +++ b/src/main/proto/envoy/extensions/load_balancing_policies/common/v3/common.proto @@ -41,9 +41,6 @@ message LocalityLbConfig { // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a // failing service. bool fail_traffic_on_panic = 3; - - // If set to true, Envoy will force LocalityDirect routing if a local locality exists. - bool force_locality_direct_routing = 4; } // Configuration for :ref:`locality weighted load balancing diff --git a/src/main/proto/envoy/extensions/matching/common_inputs/network/v3/network_inputs.proto b/src/main/proto/envoy/extensions/matching/common_inputs/network/v3/network_inputs.proto index bea415a..59756bc 100644 --- a/src/main/proto/envoy/extensions/matching/common_inputs/network/v3/network_inputs.proto +++ b/src/main/proto/envoy/extensions/matching/common_inputs/network/v3/network_inputs.proto @@ -103,48 +103,3 @@ message ApplicationProtocolInput { message FilterStateInput { string key = 1 [(validate.rules).string = {min_len: 1}]; } - -// Input that matches dynamic metadata by key. -// DynamicMetadataInput provides a general interface using ``filter`` and ``path`` to retrieve value from -// :ref:`Metadata `. -// -// For example, for the following Metadata: -// -// .. code-block:: yaml -// -// filter_metadata: -// envoy.xxx: -// prop: -// foo: bar -// xyz: -// hello: envoy -// -// The following DynamicMetadataInput will retrieve a string value "bar" from the Metadata. -// -// .. code-block:: yaml -// -// filter: envoy.xxx -// path: -// - key: prop -// - key: foo -// -// [#extension: envoy.matching.inputs.dynamic_metadata] -message DynamicMetadataInput { - // Specifies the segment in a path to retrieve value from Metadata. - // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that - // if the segment key refers to a list, it has to be the last segment in a path. - message PathSegment { - oneof segment { - option (validate.required) = true; - - // If specified, use the key to retrieve the value in a Struct. - string key = 1 [(validate.rules).string = {min_len: 1}]; - } - } - - // The filter name to retrieve the Struct from the Metadata. - string filter = 1 [(validate.rules).string = {min_len: 1}]; - - // The path to retrieve the Value from the Struct. - repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; -} diff --git a/src/main/proto/envoy/extensions/matching/input_matchers/metadata/v3/metadata.proto b/src/main/proto/envoy/extensions/matching/input_matchers/metadata/v3/metadata.proto deleted file mode 100644 index 19d74fb..0000000 --- a/src/main/proto/envoy/extensions/matching/input_matchers/metadata/v3/metadata.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.matching.input_matchers.metadata.v3; - -import "envoy/type/matcher/v3/value.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.matching.input_matchers.metadata.v3"; -option java_outer_classname = "MetadataProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/matching/input_matchers/metadata/v3;metadatav3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: metadata matcher] -// [#extension: envoy.matching.matchers.metadata_matcher] - -// Metadata matcher for metadata from http matching input data. -message Metadata { - // The Metadata is matched if the value retrieved by metadata matching input is matched to this value. - type.matcher.v3.ValueMatcher value = 1 [(validate.rules).message = {required: true}]; - - // If true, the match result will be inverted. - bool invert = 4; -} diff --git a/src/main/proto/envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.proto b/src/main/proto/envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.proto index 2bc000e..8e9c114 100644 --- a/src/main/proto/envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.proto +++ b/src/main/proto/envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.proto @@ -20,13 +20,13 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#extension: envoy.network.dns_resolver.cares] // Configuration for c-ares DNS resolver. -// [#next-free-field: 9] +// [#next-free-field: 6] message CaresDnsResolverConfig { // A list of dns resolver addresses. // :ref:`use_resolvers_as_fallback` // below dictates if the DNS client should override system defaults or only use the provided // resolvers if the system defaults are not available, i.e., as a fallback. - repeated config.core.v3.Address resolvers = 1; + repeated config.core.v3.Address resolvers = 1 [(validate.rules).repeated = {min_items: 1}]; // If true use the resolvers listed in the // :ref:`resolvers` @@ -48,24 +48,4 @@ message CaresDnsResolverConfig { // This option allows for number of UDP based DNS queries to be capped. Note, this // is only applicable to c-ares DNS resolver currently. google.protobuf.UInt32Value udp_max_queries = 5; - - // The number of seconds each name server is given to respond to a query on the first try of any given server. - // - // Note: While the c-ares library defaults to 2 seconds, Envoy's default (if this field is unset) is 5 seconds. - // This adjustment was made to maintain the previous behavior after users reported an increase in DNS resolution times. - google.protobuf.UInt64Value query_timeout_seconds = 6 [(validate.rules).uint64 = {gte: 1}]; - - // The maximum number of query attempts the resolver will make before giving up. - // Each attempt may use a different name server. - // - // Note: While the c-ares library defaults to 3 attempts, Envoy's default (if this field is unset) is 4 attempts. - // This adjustment was made to maintain the previous behavior after users reported an increase in DNS resolution times. - google.protobuf.UInt32Value query_tries = 7 [(validate.rules).uint32 = {gte: 1}]; - - // Enable round-robin selection of name servers for DNS resolution. When enabled, the resolver will cycle through the - // list of name servers for each resolution request. This can help distribute the query load across multiple name - // servers. If disabled (default), the resolver will try name servers in the order they are configured. - // - // Note: This setting overrides any system configuration for name server rotation. - bool rotate_nameservers = 8; } diff --git a/src/main/proto/envoy/extensions/network/dns_resolver/getaddrinfo/v3/getaddrinfo_dns_resolver.proto b/src/main/proto/envoy/extensions/network/dns_resolver/getaddrinfo/v3/getaddrinfo_dns_resolver.proto index 522888a..0ffde4b 100644 --- a/src/main/proto/envoy/extensions/network/dns_resolver/getaddrinfo/v3/getaddrinfo_dns_resolver.proto +++ b/src/main/proto/envoy/extensions/network/dns_resolver/getaddrinfo/v3/getaddrinfo_dns_resolver.proto @@ -2,8 +2,6 @@ syntax = "proto3"; package envoy.extensions.network.dns_resolver.getaddrinfo.v3; -import "google/protobuf/wrappers.proto"; - import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.network.dns_resolver.getaddrinfo.v3"; @@ -29,7 +27,4 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Resolutions currently use a hard coded TTL of 60s because the getaddrinfo() API does not // provide the actual TTL. Configuration for this can be added in the future if needed. message GetAddrInfoDnsResolverConfig { - // Specifies the number of retries before the resolver gives up. If not specified, the resolver will - // retry indefinitely until it succeeds or the DNS query times out. - google.protobuf.UInt32Value num_retries = 1; } diff --git a/src/main/proto/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto b/src/main/proto/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto index 69b413e..ae26938 100644 --- a/src/main/proto/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto +++ b/src/main/proto/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto @@ -2,8 +2,6 @@ syntax = "proto3"; package envoy.extensions.network.socket_interface.v3; -import "google/protobuf/wrappers.proto"; - import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.network.socket_interface.v3"; @@ -17,30 +15,4 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Configuration for default socket interface that relies on OS dependent syscall to create // sockets. message DefaultSocketInterface { - // io_uring options. io_uring is only valid in Linux with at least kernel version 5.11. Otherwise, - // Envoy will fall back to use the default socket API. If not set then io_uring will not be - // enabled. - IoUringOptions io_uring_options = 1; -} - -message IoUringOptions { - // The size for io_uring submission queues (SQ). io_uring is built with a fixed size in each - // thread during configuration, and each io_uring operation creates a submission queue - // entry (SQE). The default is 1000. - google.protobuf.UInt32Value io_uring_size = 1; - - // Enable io_uring submission queue polling (SQPOLL). io_uring SQPOLL mode polls all SQEs in the - // SQ in the kernel thread. io_uring SQPOLL mode may reduce latency and increase CPU usage as a - // cost. The default is false. - bool enable_submission_queue_polling = 2; - - // The size of an io_uring socket's read buffer. Each io_uring read operation will allocate a - // buffer of the given size. If the given buffer is too small, the socket will have read multiple - // times for all the data. The default is 8192. - google.protobuf.UInt32Value read_buffer_size = 3; - - // The write timeout of an io_uring socket on closing in ms. io_uring writes and closes - // asynchronously. If the remote stops reading, the io_uring write operation may never complete. - // The operation is canceled and the socket is closed after the timeout. The default is 1000. - google.protobuf.UInt32Value write_timeout_ms = 4; } diff --git a/src/main/proto/envoy/extensions/quic/connection_debug_visitor/quic_stats/v3/quic_stats.proto b/src/main/proto/envoy/extensions/quic/connection_debug_visitor/quic_stats/v3/quic_stats.proto deleted file mode 100644 index 541fa8c..0000000 --- a/src/main/proto/envoy/extensions/quic/connection_debug_visitor/quic_stats/v3/quic_stats.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.quic.connection_debug_visitor.quic_stats.v3; - -import "google/protobuf/duration.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.quic.connection_debug_visitor.quic_stats.v3"; -option java_outer_classname = "QuicStatsProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/quic/connection_debug_visitor/quic_stats/v3;quic_statsv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: QUIC stats config] -// [#extension: envoy.quic.connection_debug_visitor.quic_stats] - -// Configuration for a QUIC debug visitor which emits stats from the underlying QUIC transport. -message Config { - // Period to update stats while the connection is open. If unset, updates only happen when the - // connection is closed. Stats are always updated one final time when the connection is closed. - google.protobuf.Duration update_period = 2 [(validate.rules).duration = {gte {nanos: 1000000}}]; -} diff --git a/src/main/proto/envoy/extensions/quic/connection_id_generator/quic_lb/v3/quic_lb.proto b/src/main/proto/envoy/extensions/quic/connection_id_generator/quic_lb/v3/quic_lb.proto deleted file mode 100644 index 446ff95..0000000 --- a/src/main/proto/envoy/extensions/quic/connection_id_generator/quic_lb/v3/quic_lb.proto +++ /dev/null @@ -1,68 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.quic.connection_id_generator.quic_lb.v3; - -import "envoy/config/core/v3/base.proto"; -import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; - -import "xds/annotations/v3/status.proto"; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.quic.connection_id_generator.quic_lb.v3"; -option java_outer_classname = "QuicLbProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/quic/connection_id_generator/quic_lb/v3;quic_lbv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: QUIC-LB connection ID generator config] -// [#extension: envoy.quic.connection_id_generator.quic_lb] - -// Configuration for a connection ID generator implementation for the QUIC-LB draft RFC for -// routable connection IDs. -// -// Connection IDs always have the length self encoded, as described in -// https://datatracker.ietf.org/doc/html/draft-ietf-quic-load-balancers#name-length-self-description. -// -// See https://datatracker.ietf.org/doc/html/draft-ietf-quic-load-balancers for details. -// -// .. warning:: -// -// This is still a work in progress. Performance is expected to be poor. Interoperability testing -// has not yet been performed. -// [#next-free-field: 6] -message Config { - option (xds.annotations.v3.message_status).work_in_progress = true; - - // Use the unencrypted mode. This is useful for testing, but allows for linking different CIDs - // for the same connection, and leaks information about the valid server IDs in use. This should - // only be used for testing. - bool unsafe_unencrypted_testing_mode = 1; - - // Must be at least 1 octet. - // The length of server_id and nonce_length_bytes must be 18 or less. - // See https://datatracker.ietf.org/doc/html/draft-ietf-quic-load-balancers#name-server-id-allocation. - config.core.v3.DataSource server_id = 2 [(validate.rules).message = {required: true}]; - - // Optional validation of the expected server ID length. If this is non-zero and the value in ``server_id`` - // does not have a matching length, a configuration error is generated. This can be useful for validating - // that the server ID is valid. - uint32 expected_server_id_length = 3 [(validate.rules).uint32 = {lte: 18}]; - - // The nonce length must be at least 4 bytes. - // The length of server_id and nonce_length_bytes must be 18 bytes or less. - uint32 nonce_length_bytes = 4 [(validate.rules).uint32 = {lte: 16 gte: 4}]; - - // Configuration to fetch the encryption key and configuration version. - // - // The SDS service is for a :ref:`GenericSecret `. - // The data should populate :ref:`secrets `: - // - // "encryption_key" must contain the 16 byte encryption key. - // - // "configuration_version" must contain a 1 byte unsigned integer of value less than 7. - // See https://datatracker.ietf.org/doc/html/draft-ietf-quic-load-balancers#name-config-rotation. - transport_sockets.tls.v3.SdsSecretConfig encryption_parameters = 5 - [(validate.rules).message = {required: true}]; -} diff --git a/src/main/proto/envoy/extensions/quic/server_preferred_address/v3/datasource.proto b/src/main/proto/envoy/extensions/quic/server_preferred_address/v3/datasource.proto index 5bea486..6baf398 100644 --- a/src/main/proto/envoy/extensions/quic/server_preferred_address/v3/datasource.proto +++ b/src/main/proto/envoy/extensions/quic/server_preferred_address/v3/datasource.proto @@ -4,6 +4,8 @@ package envoy.extensions.quic.server_preferred_address.v3; import "envoy/config/core/v3/base.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -18,6 +20,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Configuration for DataSourceServerPreferredAddressConfig. message DataSourceServerPreferredAddressConfig { + // [#comment:TODO(danzh2010): discuss with API shepherds before removing WiP status.] + + option (xds.annotations.v3.message_status).work_in_progress = true; + // Addresses for server preferred address for a single address family (IPv4 or IPv6). message AddressFamilyConfig { // The server preferred address sent to clients. The data must contain an IP address string. diff --git a/src/main/proto/envoy/extensions/quic/server_preferred_address/v3/fixed_server_preferred_address_config.proto b/src/main/proto/envoy/extensions/quic/server_preferred_address/v3/fixed_server_preferred_address_config.proto index 43072fd..20ec9a2 100644 --- a/src/main/proto/envoy/extensions/quic/server_preferred_address/v3/fixed_server_preferred_address_config.proto +++ b/src/main/proto/envoy/extensions/quic/server_preferred_address/v3/fixed_server_preferred_address_config.proto @@ -4,6 +4,8 @@ package envoy.extensions.quic.server_preferred_address.v3; import "envoy/config/core/v3/address.proto"; +import "xds/annotations/v3/status.proto"; + import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.quic.server_preferred_address.v3"; @@ -17,6 +19,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Configuration for FixedServerPreferredAddressConfig. message FixedServerPreferredAddressConfig { + // [#comment:TODO(danzh2010): discuss with API shepherds before removing WiP status.] + + option (xds.annotations.v3.message_status).work_in_progress = true; + // Addresses for server preferred address for a single address family (IPv4 or IPv6). message AddressFamilyConfig { // The server preferred address sent to clients. diff --git a/src/main/proto/envoy/extensions/rbac/principals/mtls_authenticated/v3/mtls_authenticated.proto b/src/main/proto/envoy/extensions/rbac/principals/mtls_authenticated/v3/mtls_authenticated.proto deleted file mode 100644 index 76a145f..0000000 --- a/src/main/proto/envoy/extensions/rbac/principals/mtls_authenticated/v3/mtls_authenticated.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.rbac.principals.mtls_authenticated.v3; - -import "envoy/extensions/transport_sockets/tls/v3/common.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.rbac.principals.mtls_authenticated.v3"; -option java_outer_classname = "MtlsAuthenticatedProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/principals/mtls_authenticated/v3;mtls_authenticatedv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: RBAC MTls Authenticated Principal] -// [#extension: envoy.rbac.principals.mtls_authenticated] - -// Authentication attributes for a downstream mTLS connection. All modes require that a peer certificate -// was presented and validated using the ValidationContext in the DownstreamTlsContext configuration. -// -// If neither field is set, a configuration loading error will be generated. This is so that -// not validating SANs requires an affirmative configuration to disable, to prevent accidentally -// not configuring SAN validation. -// -// If ``any_validated_client_certificate`` is set in addition to ``san_matcher`` or a future field -// which specifies additional validation, the other field always takes precedence over -// ``any_validated_client_certificate`` and all specified validation is performed. -message Config { - // Specifies a SAN that must be present in the validated peer certificate. - transport_sockets.tls.v3.SubjectAltNameMatcher san_matcher = 1; - - // Only require that the peer certificate is present and valid. - bool any_validated_client_certificate = 2; -} diff --git a/src/main/proto/envoy/extensions/resource_monitors/cpu_utilization/v3/cpu_utilization.proto b/src/main/proto/envoy/extensions/resource_monitors/cpu_utilization/v3/cpu_utilization.proto deleted file mode 100644 index 2f91e09..0000000 --- a/src/main/proto/envoy/extensions/resource_monitors/cpu_utilization/v3/cpu_utilization.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.resource_monitors.cpu_utilization.v3; - -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.resource_monitors.cpu_utilization.v3"; -option java_outer_classname = "CpuUtilizationProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/resource_monitors/cpu_utilization/v3;cpu_utilizationv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: CPU utilization] -// [#extension: envoy.resource_monitors.cpu_utilization] - -// The CPU utilization resource monitor reports the Envoy process the CPU Utilization across different platforms. -message CpuUtilizationConfig { - enum UtilizationComputeStrategy { - // Reports CPU Utilization of the entire Linux Host machine - HOST = 0; - - // Reports CPU Utilization of Envoy Container using CGROUP stats in Linux K8s environment - CONTAINER = 1; - } - - UtilizationComputeStrategy mode = 1 [(validate.rules).enum = {defined_only: true}]; -} diff --git a/src/main/proto/envoy/extensions/resource_monitors/downstream_connections/v3/downstream_connections.proto b/src/main/proto/envoy/extensions/resource_monitors/downstream_connections/v3/downstream_connections.proto index b65b01a..5028fdf 100644 --- a/src/main/proto/envoy/extensions/resource_monitors/downstream_connections/v3/downstream_connections.proto +++ b/src/main/proto/envoy/extensions/resource_monitors/downstream_connections/v3/downstream_connections.proto @@ -12,7 +12,7 @@ option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/res option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Downstream connections] -// [#extension: envoy.resource_monitors.global_downstream_max_connections] +// [#extension: envoy.resource_monitors.downstream_connections] // The downstream connections resource monitor tracks the global number of open downstream connections. message DownstreamConnectionsConfig { diff --git a/src/main/proto/envoy/extensions/tracers/fluentd/v3/fluentd.proto b/src/main/proto/envoy/extensions/tracers/fluentd/v3/fluentd.proto deleted file mode 100644 index 5e58a60..0000000 --- a/src/main/proto/envoy/extensions/tracers/fluentd/v3/fluentd.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.fluentd.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/duration.proto"; -import "google/protobuf/wrappers.proto"; - -import "udpa/annotations/migrate.proto"; -import "udpa/annotations/status.proto"; -import "validate/validate.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.fluentd.v3"; -option java_outer_classname = "FluentdProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/fluentd/v3;fluentdv3"; -option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.fluentd.v4alpha"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Fluentd tracer] - -// Configuration for the Fluentd tracer. -// This tracer extension will send the emitted traces over a TCP connection to an upstream that is accepting -// the Fluentd Forward Protocol as described in: `Fluentd Forward Protocol Specification -// `_. -// [#extension: envoy.tracers.fluentd] -// [#next-free-field: 7] -message FluentdConfig { - // The upstream cluster to connect to for streaming the Fluentd messages. - string cluster = 1 [(validate.rules).string = {min_len: 1}]; - - // A tag is a string separated with ``.`` (e.g. ``log.type``) to categorize events. - // See: https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1#message-modes - string tag = 2 [(validate.rules).string = {min_len: 1}]; - - // The prefix to use when emitting tracer stats. - string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; - - // Interval for flushing traces to the TCP stream. Tracer will flush requests every time - // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to - // 1 second. - google.protobuf.Duration buffer_flush_interval = 4 [(validate.rules).duration = {gt {}}]; - - // Soft size limit in bytes for access log entries buffer. The logger will buffer requests until - // this limit it hit, or every time flush interval is elapsed, whichever comes first. When the buffer - // limit is hit, the logger will immediately flush the buffer contents. Setting it to zero effectively - // disables the batching. Defaults to 16384. - google.protobuf.UInt32Value buffer_size_bytes = 5; - - // Optional retry, in case upstream connection has failed. If this field is not set, the default values will be applied. - config.core.v3.RetryPolicy retry_policy = 6; -} diff --git a/src/main/proto/envoy/extensions/tracers/opentelemetry/samplers/v3/cel_sampler.proto b/src/main/proto/envoy/extensions/tracers/opentelemetry/samplers/v3/cel_sampler.proto deleted file mode 100644 index d4253ea..0000000 --- a/src/main/proto/envoy/extensions/tracers/opentelemetry/samplers/v3/cel_sampler.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package envoy.extensions.tracers.opentelemetry.samplers.v3; - -import "xds/annotations/v3/status.proto"; -import "xds/type/v3/cel.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.extensions.tracers.opentelemetry.samplers.v3"; -option java_outer_classname = "CelSamplerProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/opentelemetry/samplers/v3;samplersv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: "CEL" Sampler config] -// Configuration for the "CEL" Sampler extension. -// -// [#extension: envoy.tracers.opentelemetry.samplers.cel] - -message CELSamplerConfig { - // Expression that, when evaluated, will be used to make sample decision. - xds.type.v3.CelExpression expression = 1 - [(xds.annotations.v3.field_status).work_in_progress = true]; -} diff --git a/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto b/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto index 2c9b533..99c2e45 100644 --- a/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto +++ b/src/main/proto/envoy/extensions/transport_sockets/http_11_proxy/v3/upstream_http_11_connect.proto @@ -5,6 +5,7 @@ package envoy.extensions.transport_sockets.http_11_proxy.v3; import "envoy/config/core/v3/base.proto"; import "udpa/annotations/status.proto"; +import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.http_11_proxy.v3"; option java_outer_classname = "UpstreamHttp11ConnectProto"; @@ -15,24 +16,21 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Upstream HTTP/1.1 Proxy] // [#extension: envoy.transport_sockets.http_11_proxy] -// HTTP/1.1 proxy transport socket establishes an upstream connection to a proxy address -// instead of the target host's address. This behavior is triggered when the transport -// socket is configured and proxy information is provided. +// Configuration for HTTP/1.1 proxy transport sockets. +// This is intended for use in Envoy Mobile, though may eventually be extended +// for upstream Envoy use. +// If this transport socket is configured, and an intermediate filter adds the +// stream info necessary for proxying to the stream info (as the test filter +// does :repo:`here `) then // -// Behavior when proxying: -// ======================= -// When an upstream connection is established, instead of connecting directly to the endpoint -// address, the client will connect to the specified proxy address, send an HTTP/1.1 ``CONNECT`` request -// indicating the endpoint address, and process the response. If the response has HTTP status 200, -// the connection will be passed down to the underlying transport socket. -// -// Configuring proxy information: -// ============================== -// Set ``typed_filter_metadata`` in :ref:`LbEndpoint.Metadata ` or :ref:`LocalityLbEndpoints.Metadata `. -// using the key ``envoy.http11_proxy_transport_socket.proxy_address`` and the -// proxy address in ``config::core::v3::Address`` format. +// * Upstream connections will be directed to the specified proxy address rather +// than the host's address +// * Upstream TLS connections will have a raw HTTP/1.1 CONNECT header prefaced +// to the payload, and 200 response stripped (if less than 200 bytes) +// * Plaintext HTTP/1.1 connections will be sent with a fully qualified URL. // +// This transport socket is not compatible with HTTP/3, plaintext HTTP/2, or raw TCP. message Http11ProxyUpstreamTransport { - // The underlying transport socket being wrapped. Defaults to plaintext (raw_buffer) if unset. - config.core.v3.TransportSocket transport_socket = 1; + // The underlying transport socket being wrapped. + config.core.v3.TransportSocket transport_socket = 1 [(validate.rules).message = {required: true}]; } diff --git a/src/main/proto/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto b/src/main/proto/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto index 12c0e92..87effb7 100644 --- a/src/main/proto/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto +++ b/src/main/proto/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto @@ -24,13 +24,4 @@ message ProxyProtocolUpstreamTransport { // The underlying transport socket being wrapped. config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; - - // If this is set to true, the null addresses are allowed in the PROXY protocol header. - // The proxy protocol header encodes the null addresses to AF_UNSPEC. - // [#not-implemented-hide:] - bool allow_unspecified_address = 3; - - // If true, all the TLVs are encoded in the connection pool key. - // [#not-implemented-hide:] - bool tlv_as_pool_key = 4; } diff --git a/src/main/proto/envoy/extensions/transport_sockets/tap/v3/tap.proto b/src/main/proto/envoy/extensions/transport_sockets/tap/v3/tap.proto index aaede4a..281b657 100644 --- a/src/main/proto/envoy/extensions/transport_sockets/tap/v3/tap.proto +++ b/src/main/proto/envoy/extensions/transport_sockets/tap/v3/tap.proto @@ -30,14 +30,4 @@ message Tap { // The underlying transport socket being wrapped. config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; - - // Additional configurations for the transport socket tap - SocketTapConfig socket_tap_config = 3; -} - -// Additional configurations for the transport socket tap -message SocketTapConfig { - // Indicates to whether output the connection information per event - // This is only applicable if the streamed trace is enabled - bool set_connection_per_event = 1; } diff --git a/src/main/proto/envoy/extensions/transport_sockets/tls/v3/common.proto b/src/main/proto/envoy/extensions/transport_sockets/tls/v3/common.proto index 9bc5fb5..c1a3f5b 100644 --- a/src/main/proto/envoy/extensions/transport_sockets/tls/v3/common.proto +++ b/src/main/proto/envoy/extensions/transport_sockets/tls/v3/common.proto @@ -24,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common TLS configuration] -// [#next-free-field: 7] +// [#next-free-field: 6] message TlsParameters { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; @@ -45,23 +45,6 @@ message TlsParameters { TLSv1_3 = 4; } - enum CompliancePolicy { - // FIPS_202205 configures a TLS connection to use: - // - // * TLS 1.2 or 1.3 - // * For TLS 1.2, only ECDHE_[RSA|ECDSA]_WITH_AES_*_GCM_SHA*. - // * For TLS 1.3, only AES-GCM - // * P-256 or P-384 for key agreement. - // * For server signatures, only ``PKCS#1/PSS`` with ``SHA256/384/512``, or ECDSA - // with P-256 or P-384. - // - // .. attention:: - // - // Please refer to `BoringSSL policies `_ - // for details. - FIPS_202205 = 0; - } - // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for both clients and servers. // // TLS protocol versions below TLSv1_2 require setting compatible ciphers with the @@ -174,11 +157,6 @@ message TlsParameters { // rsa_pkcs1_sha1 // ecdsa_sha1 repeated string signature_algorithms = 5; - - // Compliance policies configure various aspects of the TLS based on the given policy. - // The policies are applied last during configuration and may override the other TLS - // parameters, or any previous policy. - repeated CompliancePolicy compliance_policies = 6 [(validate.rules).repeated = {max_items: 1}]; } // BoringSSL private key method configuration. The private key methods are used for external @@ -254,13 +232,12 @@ message TlsCertificate { config.core.v3.WatchedDirectory watched_directory = 7; // BoringSSL private key method provider. This is an alternative to :ref:`private_key - // ` field. - // When both :ref:`private_key ` and - // :ref:`private_key_provider ` fields are set, - // ``private_key_provider`` takes precedence. - // If ``private_key_provider`` is unavailable and :ref:`fallback - // ` - // is enabled, ``private_key`` will be used. + // ` field. This can't be + // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key + // ` and + // :ref:`private_key_provider + // ` fields will result in an + // error. PrivateKeyProvider private_key_provider = 6; // The password to decrypt the TLS private key. If this field is not set, it is assumed that the @@ -313,12 +290,12 @@ message TlsSessionTicketKeys { // respect to the TLS handshake. // [#not-implemented-hide:] message CertificateProviderPluginInstance { - // Provider instance name. + // Provider instance name. If not present, defaults to "default". // // Instance names should generally be defined not in terms of the underlying provider // implementation (e.g., "file_watcher") but rather in terms of the function of the // certificates (e.g., "foo_deployment_identity"). - string instance_name = 1 [(validate.rules).string = {min_len: 1}]; + string instance_name = 1; // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "example.com" to specify a certificate for a @@ -345,13 +322,6 @@ message SubjectAltNameMatcher { // Matcher for SAN value. // - // If the :ref:`san_type ` - // is :ref:`DNS ` - // and the matcher type is :ref:`exact `, DNS wildcards are evaluated - // according to the rules in https://www.rfc-editor.org/rfc/rfc6125#section-6.4.3. - // For example, ``*.example.com`` would match ``test.example.com`` but not ``example.com`` and not - // ``a.b.example.com``. - // // The string matching for OTHER_NAME SAN values depends on their ASN.1 type: // // * OBJECT: Validated against its dotted numeric notation (e.g., "1.2.3.4") diff --git a/src/main/proto/envoy/extensions/transport_sockets/tls/v3/secret.proto b/src/main/proto/envoy/extensions/transport_sockets/tls/v3/secret.proto index 94660e2..83ad364 100644 --- a/src/main/proto/envoy/extensions/transport_sockets/tls/v3/secret.proto +++ b/src/main/proto/envoy/extensions/transport_sockets/tls/v3/secret.proto @@ -22,13 +22,8 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; message GenericSecret { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; - // Secret of generic type and is available to filters. It is expected - // that only only one of secret and secrets is set. + // Secret of generic type and is available to filters. config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; - - // For cases where multiple associated secrets need to be distributed together. It is expected - // that only only one of secret and secrets is set. - map secrets = 2 [(udpa.annotations.sensitive) = true]; } message SdsSecretConfig { diff --git a/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto b/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto index b292b18..9d465c9 100644 --- a/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto +++ b/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls.proto @@ -25,7 +25,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#extension: envoy.transport_sockets.tls] // The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. -// [#next-free-field: 8] +// [#next-free-field: 6] message UpstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.UpstreamTlsContext"; @@ -34,32 +34,14 @@ message UpstreamTlsContext { // // .. attention:: // - // Server certificate verification is not enabled by default. To enable verification, configure - // :ref:`trusted_ca`. + // Server certificate verification is not enabled by default. Configure + // :ref:`trusted_ca` to enable + // verification. CommonTlsContext common_tls_context = 1; // SNI string to use when creating TLS backend connections. string sni = 2 [(validate.rules).string = {max_bytes: 255}]; - // If true, replaces the SNI for the connection with the hostname of the upstream host, if - // the hostname is known due to either a DNS cluster type or the - // :ref:`hostname ` is set on - // the host. - // - // See :ref:`SNI configuration ` for details on how this - // interacts with other validation options. - bool auto_host_sni = 6; - - // If true, replaces any Subject Alternative Name (SAN) validations with a validation for a DNS SAN matching - // the SNI value sent. The validation uses the actual requested SNI, regardless of how the SNI is configured. - // - // For common cases where an SNI value is present and the server certificate should include a corresponding SAN, - // this option ensures the SAN is properly validated. - // - // See the :ref:`validation configuration ` for how this interacts with - // other validation options. - bool auto_sni_san_validation = 7; - // If true, server-initiated TLS renegotiation will be allowed. // // .. attention:: @@ -68,38 +50,43 @@ message UpstreamTlsContext { bool allow_renegotiation = 3; // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets - // for TLSv1.2 and older) to be stored for session resumption. + // for TLSv1.2 and older) to store for the purpose of session resumption. // // Defaults to 1, setting this to 0 disables session resumption. google.protobuf.UInt32Value max_session_keys = 4; - // Controls enforcement of the ``keyUsage`` extension in peer certificates. If set to ``true``, the handshake will fail if - // the ``keyUsage`` is incompatible with TLS usage. - // - // .. note:: - // The default value is ``false`` (i.e., enforcement off). It is expected to change to ``true`` in a future release. - // - // The ``ssl.was_key_usage_invalid`` in :ref:`listener metrics ` metric will be incremented - // for configurations that would fail if this option were enabled. + // This field is used to control the enforcement, whereby the handshake will fail if the keyUsage extension + // is present and incompatible with the TLS usage. Currently, the default value is false (i.e., enforcement off) + // but it is expected to be changed to true by default in a future release. + // ``ssl.was_key_usage_invalid`` in :ref:`listener metrics ` will be set for certificate + // configurations that would fail if this option were set to true. google.protobuf.BoolValue enforce_rsa_key_usage = 5; } -// [#next-free-field: 12] +// [#next-free-field: 11] message DownstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.DownstreamTlsContext"; enum OcspStaplePolicy { - // OCSP responses are optional. If absent or expired, the certificate is used without stapling. + // OCSP responses are optional. If an OCSP response is absent + // or expired, the associated certificate will be used for + // connections without an OCSP staple. LENIENT_STAPLING = 0; - // OCSP responses are optional. If absent, the certificate is used without stapling. If present but expired, - // the certificate is not used for subsequent connections. Connections are rejected if no suitable certificate - // is found. + // OCSP responses are optional. If an OCSP response is absent, + // the associated certificate will be used without an + // OCSP staple. If a response is provided but is expired, + // the associated certificate will not be used for + // subsequent connections. If no suitable certificate is found, + // the connection is rejected. STRICT_STAPLING = 1; - // OCSP responses are required. Connections fail if a certificate lacks a valid OCSP response. Expired responses - // prevent certificate use in new connections, and connections are rejected if no suitable certificate is available. + // OCSP responses are required. Configuration will fail if + // a certificate is provided without an OCSP response. If a + // response expires, the associated certificate will not be + // used connections. If no suitable certificate is found, the + // connection is rejected. MUST_STAPLE = 2; } @@ -132,64 +119,51 @@ message DownstreamTlsContext { bool disable_stateless_session_resumption = 7; } - // If ``true``, the TLS server will not maintain a session cache of TLS sessions. - // - // .. note:: - // This applies only to TLSv1.2 and earlier. - // + // If set to true, the TLS server will not maintain a session cache of TLS sessions. (This is + // relevant only for TLSv1.2 and earlier.) bool disable_stateful_session_resumption = 10; - // Maximum lifetime of TLS sessions. If specified, ``session_timeout`` will change the maximum lifetime - // of the TLS session. - // - // This serves as a hint for the `TLS session ticket lifetime (for TLSv1.2) `_. - // Only whole seconds are considered; fractional seconds are ignored. + // If specified, ``session_timeout`` will change the maximum lifetime (in seconds) of the TLS session. + // Currently this value is used as a hint for the `TLS session ticket lifetime (for TLSv1.2) `_. + // Only seconds can be specified (fractional seconds are ignored). google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { lt {seconds: 4294967296} gte {} }]; - // Configuration for handling certificates without an OCSP response or with expired responses. - // - // Defaults to ``LENIENT_STAPLING`` + // Config for whether to use certificates if they do not have + // an accompanying OCSP response or if the response expires at runtime. + // Defaults to LENIENT_STAPLING OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; // Multiple certificates are allowed in Downstream transport socket to serve different SNI. - // This option controls the behavior when no matching certificate is found for the received SNI value, - // or no SNI value was sent. If enabled, all certificates will be evaluated for a match for non-SNI criteria - // such as key type and OCSP settings. If disabled, the first provided certificate will be used. - // Defaults to ``false``. See more details in :ref:`Multiple TLS certificates `. + // If the client provides SNI but no such cert matched, it will decide to full scan certificates or not based on this config. + // Defaults to false. See more details in :ref:`Multiple TLS certificates `. google.protobuf.BoolValue full_scan_certs_on_sni_mismatch = 9; - - // If ``true``, the downstream client's preferred cipher is used during the handshake. If ``false``, Envoy - // uses its preferred cipher. - // - // .. note:: - // This has no effect when using TLSv1_3. - // - bool prefer_client_ciphers = 11; } // TLS key log configuration. // The key log file format is "format used by NSS for its SSLKEYLOGFILE debugging output" (text taken from openssl man page) message TlsKeyLog { - // Path to save the TLS key log. + // The path to save the TLS key log. string path = 1 [(validate.rules).string = {min_len: 1}]; - // Local IP address ranges to filter connections for TLS key logging. If not set, matches any local IP address. + // The local IP address that will be used to filter the connection which should save the TLS key log + // If it is not set, any local IP address will be matched. repeated config.core.v3.CidrRange local_address_range = 2; - // Remote IP address ranges to filter connections for TLS key logging. If not set, matches any remote IP address. + // The remote IP address that will be used to filter the connection which should save the TLS key log + // If it is not set, any remote IP address will be matched. repeated config.core.v3.CidrRange remote_address_range = 3; } // TLS context shared by both client and server TLS contexts. -// [#next-free-field: 17] +// [#next-free-field: 16] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; - // Config for the Certificate Provider to fetch certificates. Certificates are fetched/refreshed asynchronously over - // the network relative to the TLS handshake. + // Config for Certificate provider to get certificates. This provider should allow certificates to be + // fetched/refreshed over the network asynchronously with respect to the TLS handshake. // // DEPRECATED: This message is not currently used, but if we ever do need it, we will want to // move it out of CommonTlsContext and into common.proto, similar to the existing @@ -282,7 +256,7 @@ message CommonTlsContext { // fetched/refreshed over the network asynchronously with respect to the TLS handshake. // // The same number and types of certificates as :ref:`tls_certificates ` - // are valid in the certificates fetched through this setting. + // are valid in the the certificates fetched through this setting. // // If ``tls_certificates`` or ``tls_certificate_provider_instance`` are set, this field // is ignored. @@ -295,13 +269,6 @@ message CommonTlsContext { // [#not-implemented-hide:] CertificateProviderPluginInstance tls_certificate_provider_instance = 14; - // Custom TLS certificate selector. - // - // Select TLS certificate based on TLS client hello. - // If empty, defaults to native TLS certificate selection behavior: - // DNS SANs or Subject Common Name in TLS certificates is extracted as server name pattern to match SNI. - config.core.v3.TypedExtensionConfig custom_tls_certificate_selector = 16; - // Certificate provider for fetching TLS certificates. // [#not-implemented-hide:] CertificateProvider tls_certificate_certificate_provider = 9 @@ -320,17 +287,13 @@ message CommonTlsContext { // fetched/refreshed over the network asynchronously with respect to the TLS handshake. SdsSecretConfig validation_context_sds_secret_config = 7; - // Combines the default ``CertificateValidationContext`` with the SDS-provided dynamic context for certificate - // validation. - // - // When the SDS server returns a dynamic ``CertificateValidationContext``, it is merged - // with the default context using ``Message::MergeFrom()``. The merging rules are as follows: - // - // * **Singular Fields:** Dynamic fields override the default singular fields. - // * **Repeated Fields:** Dynamic repeated fields are concatenated with the default repeated fields. - // * **Boolean Fields:** Boolean fields are combined using a logical OR operation. - // - // The resulting ``CertificateValidationContext`` is used to perform certificate validation. + // Combined certificate validation context holds a default CertificateValidationContext + // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic + // and default CertificateValidationContext are merged into a new CertificateValidationContext + // for validation. This merge is done by Message::MergeFrom(), so dynamic + // CertificateValidationContext overwrites singular fields in default + // CertificateValidationContext, and concatenates repeated fields to default + // CertificateValidationContext, and logical OR is applied to boolean fields. CombinedCertificateValidationContext combined_validation_context = 8; // Certificate provider for fetching validation context. diff --git a/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto b/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto index 73592f8..4b0e17c 100644 --- a/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto +++ b/src/main/proto/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.proto @@ -57,11 +57,4 @@ message SPIFFECertValidatorConfig { // This field specifies trust domains used for validating incoming X.509-SVID(s). repeated TrustDomain trust_domains = 1 [(validate.rules).repeated = {min_items: 1}]; - - // This field specifies all trust bundles as a single DataSource. If both - // trust_bundles and trust_domains are specified, trust_bundles will - // take precedence. Currently assumes file will be a SPIFFE Trust Bundle Map. - // If DataSource is a file, dynamic file watching will be enabled, - // and updates to the specified file will trigger a refresh of the trust_bundles. - config.core.v3.DataSource trust_bundles = 2; } diff --git a/src/main/proto/envoy/extensions/upstreams/http/v3/http_protocol_options.proto b/src/main/proto/envoy/extensions/upstreams/http/v3/http_protocol_options.proto index ff90cdd..ca4cb81 100644 --- a/src/main/proto/envoy/extensions/upstreams/http/v3/http_protocol_options.proto +++ b/src/main/proto/envoy/extensions/upstreams/http/v3/http_protocol_options.proto @@ -140,6 +140,7 @@ message HttpProtocolOptions { option (validate.required) = true; // To explicitly configure either HTTP/1 or HTTP/2 (but not both!) use ``explicit_http_config``. + // If the ``explicit_http_config`` is empty, HTTP/1.1 is used. ExplicitHttpConfig explicit_http_config = 3; // This allows switching on protocol based on what protocol the downstream @@ -150,11 +151,10 @@ message HttpProtocolOptions { AutoHttpConfig auto_config = 5; } - // Optional HTTP filters for the upstream HTTP filter chain. - // // .. note:: // Upstream HTTP filters are currently in alpha. // + // Optional HTTP filters for the upstream HTTP filter chain. // // These filters will be applied for all HTTP streams which flow through this // cluster. Unlike downstream HTTP filters, they will *not* be applied to terminated CONNECT requests. diff --git a/src/main/proto/envoy/extensions/wasm/v3/wasm.proto b/src/main/proto/envoy/extensions/wasm/v3/wasm.proto index 6ad19ee..e691302 100644 --- a/src/main/proto/envoy/extensions/wasm/v3/wasm.proto +++ b/src/main/proto/envoy/extensions/wasm/v3/wasm.proto @@ -2,12 +2,10 @@ syntax = "proto3"; package envoy.extensions.wasm.v3; -import "envoy/config/core/v3/backoff.proto"; import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; -import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.wasm.v3"; @@ -19,33 +17,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] // [#extension: envoy.bootstrap.wasm] -// If there is a fatal error on the VM (e.g. exception, abort()), then the policy will be applied. -enum FailurePolicy { - // No policy is specified. The default policy will be used. The default policy is ``FAIL_CLOSED``. - UNSPECIFIED = 0; - - // New plugin instance will be created for the new request if the VM is failed. Note this only - // be applied to the following failures: - // - // * ``proxy_wasm::FailState::RuntimeError`` - // - // This will fallback to the ``FAIL_CLOSED`` for all other failures. - FAIL_RELOAD = 1; - - // All plugins associated with the VM will return an HTTP 503 error. - FAIL_CLOSED = 2; - - // All plugins associated with the VM will be ignored and the filter chain will continue. This - // makes sense when the plugin is optional. - FAIL_OPEN = 3; -} - -message ReloadConfig { - // Backoff strategy for the VM failure reload. If not specified, the default 1s base interval - // will be applied. - config.core.v3.BackoffStrategy backoff = 1; -} - // Configuration for restricting Proxy-Wasm capabilities available to modules. message CapabilityRestrictionConfig { // The Proxy-Wasm capabilities which will be allowed. Capabilities are mapped by @@ -143,7 +114,7 @@ message EnvironmentVariables { } // Base Configuration for Wasm Plugins e.g. filters and services. -// [#next-free-field: 9] +// [#next-free-field: 7] message PluginConfig { // A unique name for a filters/services in a VM for use in identifying the filter/service if // multiple filters/services are handled by the same ``vm_id`` and ``root_id`` and for @@ -173,14 +144,7 @@ message PluginConfig { // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial // startup the proxy will not start. - // This field is deprecated in favor of the ``failure_policy`` field. - bool fail_open = 5 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - // The failure policy for the plugin. - FailurePolicy failure_policy = 7; - - // Reload configuration. This is only applied when ``failure_policy`` is set to ``FAIL_RELOAD``. - ReloadConfig reload_config = 8; + bool fail_open = 5; // Configuration for restricting Proxy-Wasm capabilities available to modules. CapabilityRestrictionConfig capability_restriction_config = 6; diff --git a/src/main/proto/envoy/service/auth/v2/attribute_context.proto b/src/main/proto/envoy/service/auth/v2/attribute_context.proto index 3a11b73..cf5cda3 100644 --- a/src/main/proto/envoy/service/auth/v2/attribute_context.proto +++ b/src/main/proto/envoy/service/auth/v2/attribute_context.proto @@ -27,7 +27,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; // of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes // supported by Envoy authorization system. // [#comment: The following items are left out of this proto -// Request.Auth field for JWTs +// Request.Auth field for jwt tokens // Request.Api for api management // Origin peer that originated the request // Caching Protocol diff --git a/src/main/proto/envoy/service/auth/v3/attribute_context.proto b/src/main/proto/envoy/service/auth/v3/attribute_context.proto index 2c4fbb4..ef76499 100644 --- a/src/main/proto/envoy/service/auth/v3/attribute_context.proto +++ b/src/main/proto/envoy/service/auth/v3/attribute_context.proto @@ -29,7 +29,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // of the ``AttributeContext``. The ``AttributeContext`` is a collection of individual attributes // supported by Envoy authorization system. // [#comment: The following items are left out of this proto -// Request.Auth field for JWTs +// Request.Auth field for jwt tokens // Request.Api for api management // Origin peer that originated the request // Caching Protocol diff --git a/src/main/proto/envoy/service/discovery/v3/discovery.proto b/src/main/proto/envoy/service/discovery/v3/discovery.proto index 6f3b123..b7270f2 100644 --- a/src/main/proto/envoy/service/discovery/v3/discovery.proto +++ b/src/main/proto/envoy/service/discovery/v3/discovery.proto @@ -41,17 +41,6 @@ message ResourceName { DynamicParameterConstraints dynamic_parameter_constraints = 2; } -// [#not-implemented-hide:] -// An error associated with a specific resource name, returned to the -// client by the server. -message ResourceError { - // The name of the resource. - ResourceName resource_name = 1; - - // The error reported for the resource. - google.rpc.Status error_detail = 2; -} - // A DiscoveryRequest requests a set of versioned resources of the same type for // a given Envoy node on some API. // [#next-free-field: 8] @@ -107,7 +96,7 @@ message DiscoveryRequest { google.rpc.Status error_detail = 6; } -// [#next-free-field: 8] +// [#next-free-field: 7] message DiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryResponse"; @@ -149,13 +138,6 @@ message DiscoveryResponse { // The control plane instance that sent the response. config.core.v3.ControlPlane control_plane = 6; - - // [#not-implemented-hide:] - // Errors associated with specific resources. Clients are expected to - // remember the most recent error for a given resource across responses; - // the error condition is not considered to be cleared until a response is - // received that contains the resource in the 'resources' field. - repeated ResourceError resource_errors = 7; } // DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC @@ -265,7 +247,7 @@ message DeltaDiscoveryRequest { google.rpc.Status error_detail = 7; } -// [#next-free-field: 10] +// [#next-free-field: 9] message DeltaDiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryResponse"; @@ -299,13 +281,6 @@ message DeltaDiscoveryResponse { // [#not-implemented-hide:] // The control plane instance that sent the response. config.core.v3.ControlPlane control_plane = 7; - - // [#not-implemented-hide:] - // Errors associated with specific resources. Note that a resource in - // this field with a status of NOT_FOUND should be treated the same as - // a resource listed in the 'removed_resources' or 'removed_resource_names' - // fields. - repeated ResourceError resource_errors = 9; } // A set of dynamic parameter constraints associated with a variant of an individual xDS resource. diff --git a/src/main/proto/envoy/service/ext_proc/v3/external_processor.proto b/src/main/proto/envoy/service/ext_proc/v3/external_processor.proto index e77d60d..e1d698f 100644 --- a/src/main/proto/envoy/service/ext_proc/v3/external_processor.proto +++ b/src/main/proto/envoy/service/ext_proc/v3/external_processor.proto @@ -9,8 +9,6 @@ import "envoy/type/v3/http_status.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; -import "xds/annotations/v3/status.proto"; - import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; @@ -31,10 +29,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // 2. The service sends back a ProcessingResponse message that directs Envoy // to either stop processing, continue without it, or send it the // next chunk of the message body. -// 3. If so requested, Envoy sends the server the message body in chunks, -// or the entire body at once. In either case, the server may send back -// a ProcessingResponse for each message it receives, or wait for certain amount -// of body chunks received before streams back the ProcessingResponse messages. +// 3. If so requested, Envoy sends the server chunks of the message body, +// or the entire body at once. In either case, the server sends back +// a ProcessingResponse after each message it receives. // 4. If so requested, Envoy sends the server the HTTP trailers, // and the server sends back a ProcessingResponse. // 5. At this point, request processing is done, and we pick up again @@ -48,6 +45,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // In other words, the process is a request/response conversation, but // using a gRPC stream to make it easier for the server to // maintain state. + service ExternalProcessor { // This begins the bidirectional stream that Envoy will use to // give the server control over what the filter does. The actual @@ -57,31 +55,9 @@ service ExternalProcessor { } } -// This message specifies the filter protocol configurations which will be sent to the ext_proc -// server in a :ref:`ProcessingRequest `. -// If the server does not support these protocol configurations, it may choose to close the gRPC stream. -// If the server supports these protocol configurations, it should respond based on the API specifications. -message ProtocolConfiguration { - // Specify the filter configuration :ref:`request_body_mode - // ` - envoy.extensions.filters.http.ext_proc.v3.ProcessingMode.BodySendMode request_body_mode = 1 - [(validate.rules).enum = {defined_only: true}]; - - // Specify the filter configuration :ref:`response_body_mode - // ` - envoy.extensions.filters.http.ext_proc.v3.ProcessingMode.BodySendMode response_body_mode = 2 - [(validate.rules).enum = {defined_only: true}]; - - // Specify the filter configuration :ref:`send_body_without_waiting_for_header_response - // ` - // If the client is waiting for a header response from the server, setting ``true`` means the client will send body to the server - // as they arrive. Setting ``false`` means the client will buffer the arrived data and not send it to the server immediately. - bool send_body_without_waiting_for_header_response = 3; -} - // This represents the different types of messages that Envoy can send // to an external processing server. -// [#next-free-field: 12] +// [#next-free-field: 11] message ProcessingRequest { reserved 1; @@ -147,24 +123,12 @@ message ProcessingRequest { // are needed. // bool observability_mode = 10; - - // Specify the filter protocol configurations to be sent to the server. - // ``protocol_config`` is only encoded in the first ``ProcessingRequest`` message from the client to the server. - ProtocolConfiguration protocol_config = 11; } -// This represents the different types of messages the server may send back to Envoy -// when the ``observability_mode`` field in the received ProcessingRequest is set to false. -// -// * If the corresponding ``BodySendMode`` in the -// :ref:`processing_mode ` -// is not set to ``FULL_DUPLEX_STREAMED``, then for every received ProcessingRequest, -// the server must send back exactly one ProcessingResponse message. -// * If it is set to ``FULL_DUPLEX_STREAMED``, the server must follow the API defined -// for this mode to send the ProcessingResponse messages. +// For every ProcessingRequest received by the server with the ``observability_mode`` field +// set to false, the server must send back exactly one ProcessingResponse message. // [#next-free-field: 11] message ProcessingResponse { - // The response type that is sent by the server. oneof response { option (validate.required) = true; @@ -216,10 +180,7 @@ message ProcessingResponse { // It is also ignored by Envoy when the ext_proc filter config // :ref:`allow_mode_override // ` - // is set to false, or - // :ref:`send_body_without_waiting_for_header_response - // ` - // is set to true. + // is set to false. envoy.extensions.filters.http.ext_proc.v3.ProcessingMode mode_override = 9; // When ext_proc server receives a request message, in case it needs more @@ -245,8 +206,12 @@ message ProcessingResponse { message HttpHeaders { // The HTTP request headers. All header keys will be // lower-cased, because HTTP header keys are case-insensitive. - // The header value is encoded in the + // The ``headers`` encoding is based on the runtime guard + // envoy_reloadable_features_send_header_raw_value setting. + // When it is true, the header value is encoded in the // :ref:`raw_value ` field. + // When it is false, the header value is encoded in the + // :ref:`value ` field. config.core.v3.HeaderMap headers = 1; // [#not-implemented-hide:] @@ -256,61 +221,50 @@ message HttpHeaders { map attributes = 2 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - // If ``true``, then there is no message body associated with this + // If true, then there is no message body associated with this // request or response. bool end_of_stream = 3; } -// This message is sent to the external server when the HTTP request and -// response bodies are received. +// This message contains the message body that Envoy sends to the external server. message HttpBody { - // The contents of the body in the HTTP request/response. Note that in - // streaming mode multiple ``HttpBody`` messages may be sent. bytes body = 1; - // If ``true``, this will be the last ``HttpBody`` message that will be sent and no - // trailers will be sent for the current request/response. bool end_of_stream = 2; } -// This message is sent to the external server when the HTTP request and -// response trailers are received. +// This message contains the trailers. message HttpTrailers { - // The header value is encoded in the + // The ``trailers`` encoding is based on the runtime guard + // envoy_reloadable_features_send_header_raw_value setting. + // When it is true, the header value is encoded in the // :ref:`raw_value ` field. + // When it is false, the header value is encoded in the + // :ref:`value ` field. config.core.v3.HeaderMap trailers = 1; } // The following are messages that may be sent back by the server. -// This message is sent by the external server to Envoy after ``HttpHeaders`` was -// sent to it. +// This message must be sent in response to an HttpHeaders message. message HeadersResponse { - // Details the modifications (if any) to be made by Envoy to the current - // request/response. - CommonResponse response = 1; -} - -// This message is sent by the external server to Envoy after ``HttpBody`` was -// sent to it. -message BodyResponse { - // Details the modifications (if any) to be made by Envoy to the current - // request/response. CommonResponse response = 1; } -// This message is sent by the external server to Envoy after ``HttpTrailers`` was -// sent to it. +// This message must be sent in response to an HttpTrailers message. message TrailersResponse { - // Details the modifications (if any) to be made by Envoy to the current - // request/response trailers. + // Instructions on how to manipulate the trailers HeaderMutation header_mutation = 1; } +// This message must be sent in response to an HttpBody message. +message BodyResponse { + CommonResponse response = 1; +} + // This message contains common fields between header and body responses. // [#next-free-field: 6] message CommonResponse { - // The status of the response. enum ResponseStatus { // Apply the mutation instructions in this message to the // request or response, and then continue processing the filter @@ -354,8 +308,12 @@ message CommonResponse { // Add new trailers to the message. This may be used when responding to either a // HttpHeaders or HttpBody message, but only if this message is returned // along with the CONTINUE_AND_REPLACE status. - // The header value is encoded in the + // The ``trailers`` encoding is based on the runtime guard + // envoy_reloadable_features_send_header_raw_value setting. + // When it is true, the header value is encoded in the // :ref:`raw_value ` field. + // When it is false, the header value is encoded in the + // :ref:`value ` field. config.core.v3.HeaderMap trailers = 4; // Clear the route cache for the current client request. This is necessary @@ -373,7 +331,7 @@ message CommonResponse { // to the downstream codec, or reset the stream. // [#next-free-field: 6] message ImmediateResponse { - // The response code to return. + // The response code to return type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // Apply changes to the default headers, which will include content-type. @@ -394,7 +352,7 @@ message ImmediateResponse { // This message specifies a gRPC status for an ImmediateResponse message. message GrpcStatus { - // The actual gRPC status. + // The actual gRPC status uint32 status = 1; } @@ -404,8 +362,12 @@ message HeaderMutation { // Add or replace HTTP headers. Attempts to set the value of // any ``x-envoy`` header, and attempts to set the ``:method``, // ``:authority``, ``:scheme``, or ``host`` headers will be ignored. - // The header value is encoded in the + // The ``set_headers`` encoding is based on the runtime guard + // envoy_reloadable_features_send_header_raw_value setting. + // When it is true, the header value is encoded in the // :ref:`raw_value ` field. + // When it is false, the header value is encoded in the + // :ref:`value ` field. repeated config.core.v3.HeaderValueOption set_headers = 1; // Remove these HTTP headers. Attempts to remove system headers -- @@ -413,38 +375,14 @@ message HeaderMutation { repeated string remove_headers = 2; } -// The body response message corresponding to FULL_DUPLEX_STREAMED body mode. -message StreamedBodyResponse { - // The body response chunk that will be passed to the upstream/downstream by Envoy. - bytes body = 1; - - // The server sets this flag to true if it has received a body request with - // :ref:`end_of_stream ` set to true, - // and this is the last chunk of body responses. - bool end_of_stream = 2; -} - -// This message specifies the body mutation the server sends to Envoy. +// Replace the entire message body chunk received in the corresponding +// HttpBody message with this new body, or clear the body. message BodyMutation { - // The type of mutation for the body. oneof mutation { - // The entire body to replace. - // Should only be used when the corresponding ``BodySendMode`` in the - // :ref:`processing_mode ` - // is not set to ``FULL_DUPLEX_STREAMED``. + // The entire body to replace bytes body = 1; - // Clear the corresponding body chunk. - // Should only be used when the corresponding ``BodySendMode`` in the - // :ref:`processing_mode ` - // is not set to ``FULL_DUPLEX_STREAMED``. - // Clear the corresponding body chunk. + // Clear the corresponding body chunk bool clear_body = 2; - - // Must be used when the corresponding ``BodySendMode`` in the - // :ref:`processing_mode ` - // is set to ``FULL_DUPLEX_STREAMED``. - StreamedBodyResponse streamed_response = 3 - [(xds.annotations.v3.field_status).work_in_progress = true]; } } diff --git a/src/main/proto/envoy/service/extension/v3/config_discovery.proto b/src/main/proto/envoy/service/extension/v3/config_discovery.proto index 8948555..2230c8d 100644 --- a/src/main/proto/envoy/service/extension/v3/config_discovery.proto +++ b/src/main/proto/envoy/service/extension/v3/config_discovery.proto @@ -18,14 +18,13 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Extension config discovery service (ECDS)] // A service that supports dynamic configuration updates for a specific filter. -// Currently, ECDS is supported for network filters, HTTP filters, UDP session filters and Listener filters. +// Currently, ECDS is supported for network filters, HTTP filters and Listener filters. // Please check :ref:`Extension Config Discovery Service (ECDS) API `. // The overall extension config discovery service works as follows: // // 1. A filter (:ref:`Downstream Network `, // :ref:`Upstream Network `, -// :ref:`Listener `, -// :ref:`UDP Session `, +// :ref:`Listener ` // or :ref:`HTTP `) // contains a :ref:`config_discovery ` configuration. This configuration // includes a :ref:`config_source `, diff --git a/src/main/proto/envoy/service/network_ext_proc/v3/network_external_processor.proto b/src/main/proto/envoy/service/network_ext_proc/v3/network_external_processor.proto deleted file mode 100644 index 2cba2be..0000000 --- a/src/main/proto/envoy/service/network_ext_proc/v3/network_external_processor.proto +++ /dev/null @@ -1,158 +0,0 @@ -syntax = "proto3"; - -package envoy.service.network_ext_proc.v3; - -import "envoy/config/core/v3/base.proto"; - -import "google/protobuf/struct.proto"; - -import "xds/annotations/v3/status.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.network_ext_proc.v3"; -option java_outer_classname = "NetworkExternalProcessorProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/service/network_ext_proc/v3;network_ext_procv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; -option (xds.annotations.v3.file_status).work_in_progress = true; - -// [#protodoc-title: Network External Processing Service] - -// The Network External Processing filter allows an external service to dynamically -// interact with and modify L4 network traffic passing through Envoy. Unlike the -// HTTP External Processing filter, this service operates at the TCP/UDP level, -// providing access to raw network data. -// -// The filter communicates with an external gRPC service that can: -// * Inspect network traffic in both directions (client->server and server->client) -// * Modify the payload data -// * Control connection lifecycle (continue, close gracefully, or reset) -// -// Use cases include: -// * Custom protocol inspection and modification -// * Advanced traffic manipulation -// * Security scanning and filtering -// * Dynamic connection management -// -// The service uses a bidirectional gRPC stream, maintaining state throughout -// the connection lifetime while allowing asynchronous processing. - -// NetworkExternalProcessor service defines the contract between Envoy and -// external processing servers for L4 network traffic. -service NetworkExternalProcessor { - // Process establishes a bidirectional stream between Envoy and the external - // processing server. Envoy sends ProcessingRequests containing network data - // and the server responds with ProcessingResponses containing processing - // decisions and potentially modified data. - // - // The server should handle processing timeout properly to avoid blocking - // network traffic for extended periods. Any uncaught exceptions will - // be treated as errors and will terminate the stream. - // - // Implementation note: The server should process requests in the order - // they are received to maintain proper sequencing of network traffic. - rpc Process(stream ProcessingRequest) returns (stream ProcessingResponse) { - } -} - -// The payload data from network layer -message Data { - // The raw payload data - bytes data = 1; - - // Indicates whether this is the last data frame in the current direction. - // The external processor should still respond to this message even - // if there is no more data expected in this direction. - bool end_of_stream = 2; -} - -// ProcessingRequest contains data sent from Envoy to the external processing server. -// Each request contains either read data (from client) or write data (to client) -// along with optional metadata. -message ProcessingRequest { - // ReadData contains the network data intercepted in the request path (client to server). - // This is sent to the external processor when data arrives from the downstream client. - // If this is set, write_data should not be set. - Data read_data = 1; - - // WriteData contains the network data intercepted in the response path (server to client). - // This is sent to the external processor when data arrives from the upstream server. - // If this is set, read_data should not be set. - Data write_data = 2; - - // Optional metadata associated with the request. - // This can include connection properties, filter configuration, and any other - // contextual information that might be useful for processing decisions. - // - // The metadata is not automatically propagated from request to response. - // The external processor must include any needed metadata in its response. - config.core.v3.Metadata metadata = 3; -} - -// ProcessingResponse contains the response from the external processing server to Envoy. -// Each response corresponds to a ProcessingRequest and indicates how the network -// traffic should be handled. -// [#next-free-field: 6] -message ProcessingResponse { - // DataProcessedStatus indicates whether the data was modified by the external processor. - enum DataProcessedStatus { - // The data processed status is unknown. - UNKNOWN = 0; - - // The data remains unchanged. Envoy will use the original data. - // This is useful when the processor only wants to inspect but not modify the traffic. - UNMODIFIED = 1; - - // The data has been modified. Envoy will use the modified data provided in the response. - // This allows the processor to transform the network traffic as needed. - MODIFIED = 2; - } - - // ConnectionStatus determines what happens to the connection after processing. - enum ConnectionStatus { - // Continue normal processing of the connection. - // This is the default behavior and allows traffic to flow normally. - CONTINUE = 0; - - // Close the connection with normal TCP FIN. - // This allows for clean connection termination with a normal close sequence. - // Any buffered data will be sent before closing. - CLOSE = 1; - - // Immediately reset the connection with TCP RST. - // This forcibly terminates the connection without a clean shutdown, - // discarding any buffered data. Use this for security-critical situations - // or when immediate termination is required. - CLOSE_RST = 2; - } - - // The processed ReadData containing potentially modified data for the request path. - // This should be sent in response to a ProcessingRequest with read_data, and the - // previous data in ProcessingRequest will be replaced by the new data in Envoy's data plane. - // If this is set, write_data should not be set. - Data read_data = 1; - - // The processed WriteData containing potentially modified data for the response path. - // This should be sent in response to a ProcessingRequest with write_data, and the - // previous data in ProcessingRequest will be replaced by the new data in Envoy's data plane. - // If this is set, read_data should not be set. - Data write_data = 2; - - // Indicates whether the data was modified or not. - // This is mandatory and tells Envoy whether to use the original or modified data. - DataProcessedStatus data_processing_status = 3; - - // Optional: Determines the connection behavior after processing. - // If not specified, CONTINUE is assumed, and the connection proceeds normally. - // Use CLOSE or CLOSE_RST to terminate the connection based on processing results. - ConnectionStatus connection_status = 4; - - // Optional metadata associated with the request. - // This can include connection properties, filter configuration, and any other - // contextual information that might be useful for processing decisions. - // - // The metadata is not automatically propagated from request to response. - // The external processor must include any needed metadata in its response. - google.protobuf.Struct dynamic_metadata = 5; -} diff --git a/src/main/proto/envoy/service/ratelimit/v3/rls.proto b/src/main/proto/envoy/service/ratelimit/v3/rls.proto index 63f2477..7375ace 100644 --- a/src/main/proto/envoy/service/ratelimit/v3/rls.proto +++ b/src/main/proto/envoy/service/ratelimit/v3/rls.proto @@ -49,8 +49,6 @@ message RateLimitRequest { // Rate limit requests can optionally specify the number of hits a request adds to the matched // limit. If the value is not set in the message, a request increases the matched limit by 1. - // This value can be overridden by setting filter state value ``envoy.ratelimit.hits_addend`` - // to the desired number. Invalid number (< 0) or number will be ignored. uint32 hits_addend = 3; } @@ -94,9 +92,6 @@ message RateLimitResponse { // The time unit representing a day. DAY = 4; - // The time unit representing a week. - WEEK = 7; - // The time unit representing a month. MONTH = 5; diff --git a/src/main/proto/envoy/service/redis_auth/v3/redis_external_auth.proto b/src/main/proto/envoy/service/redis_auth/v3/redis_external_auth.proto deleted file mode 100644 index 52bc3bc..0000000 --- a/src/main/proto/envoy/service/redis_auth/v3/redis_external_auth.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -package envoy.service.redis_auth.v3; - -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.service.redis_auth.v3"; -option java_outer_classname = "RedisExternalAuthProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/service/redis_auth/v3;redis_authv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Redis external authentication service] - -// The messages used by the redis_proxy filter when performing external authentication. - -// A generic interface for performing external password authentication on incoming AUTH commands. -service RedisProxyExternalAuth { - // Performs authentication check based on the data sent with the AUTH request. - // Returns either an OK status or an error status. - rpc Authenticate(RedisProxyExternalAuthRequest) returns (RedisProxyExternalAuthResponse) { - } -} - -message RedisProxyExternalAuthRequest { - // Username, if applicable. Otherwise, empty. - string username = 1; - - // Password sent with the AUTH command. - string password = 2; -} - -message RedisProxyExternalAuthResponse { - // Status of the authentication check. - google.rpc.Status status = 1; - - // Optional expiration time for the authentication. - // If set, the authentication will be valid until this time. - // If not set, the authentication will be valid indefinitely. - google.protobuf.Timestamp expiration = 2; - - // Optional message to be sent back to the client. - string message = 3; -} diff --git a/src/main/proto/envoy/service/status/v3/csds.proto b/src/main/proto/envoy/service/status/v3/csds.proto index de62fbf..1c51f2b 100644 --- a/src/main/proto/envoy/service/status/v3/csds.proto +++ b/src/main/proto/envoy/service/status/v3/csds.proto @@ -72,11 +72,6 @@ enum ClientConfigStatus { // config dump is not the NACKed version, but the most recent accepted one. If // no config is accepted yet, the attached config dump will be empty. CLIENT_NACKED = 3; - - // Client received an error from the control plane. The attached config - // dump is the most recent accepted one. If no config is accepted yet, - // the attached config dump will be empty. - CLIENT_RECEIVED_ERROR = 4; } // Request for client status of clients identified by a list of NodeMatchers. diff --git a/src/main/proto/envoy/service/trace/v2/trace_service.proto b/src/main/proto/envoy/service/trace/v2/trace_service.proto new file mode 100644 index 0000000..c4808f1 --- /dev/null +++ b/src/main/proto/envoy/service/trace/v2/trace_service.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package envoy.service.trace.v2; + +import "envoy/api/v2/core/base.proto"; + +import "opencensus/proto/trace/v1/trace.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.trace.v2"; +option java_outer_classname = "TraceServiceProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/service/trace/v2;tracev2"; +option (udpa.annotations.file_status).package_version_status = FROZEN; + +// [#protodoc-title: Trace service] + +// Service for streaming traces to server that consumes the trace data. It +// uses OpenCensus data model as a standard to represent trace information. +service TraceService { + // Envoy will connect and send StreamTracesMessage messages forever. It does + // not expect any response to be sent as nothing would be done in the case + // of failure. + rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { + } +} + +message StreamTracesResponse { +} + +message StreamTracesMessage { + message Identifier { + // The node sending the access log messages over the stream. + api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; + } + + // Identifier data effectively is a structured metadata. + // As a performance optimization this will only be sent in the first message + // on the stream. + Identifier identifier = 1; + + // A list of Span entries + repeated opencensus.proto.trace.v1.Span spans = 2; +} diff --git a/src/main/proto/envoy/service/trace/v3/trace_service.proto b/src/main/proto/envoy/service/trace/v3/trace_service.proto new file mode 100644 index 0000000..320f20f --- /dev/null +++ b/src/main/proto/envoy/service/trace/v3/trace_service.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +package envoy.service.trace.v3; + +import "envoy/config/core/v3/base.proto"; + +import "opencensus/proto/trace/v1/trace.proto"; + +import "udpa/annotations/status.proto"; +import "udpa/annotations/versioning.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.service.trace.v3"; +option java_outer_classname = "TraceServiceProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/service/trace/v3;tracev3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Trace service] + +// Service for streaming traces to server that consumes the trace data. It +// uses OpenCensus data model as a standard to represent trace information. +service TraceService { + // Envoy will connect and send StreamTracesMessage messages forever. It does + // not expect any response to be sent as nothing would be done in the case + // of failure. + rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { + } +} + +message StreamTracesResponse { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.trace.v2.StreamTracesResponse"; +} + +message StreamTracesMessage { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.trace.v2.StreamTracesMessage"; + + message Identifier { + option (udpa.annotations.versioning).previous_message_type = + "envoy.service.trace.v2.StreamTracesMessage.Identifier"; + + // The node sending the access log messages over the stream. + config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; + } + + // Identifier data effectively is a structured metadata. + // As a performance optimization this will only be sent in the first message + // on the stream. + Identifier identifier = 1; + + // A list of Span entries + repeated opencensus.proto.trace.v1.Span spans = 2; +} diff --git a/src/main/proto/envoy/type/matcher/v3/address.proto b/src/main/proto/envoy/type/matcher/v3/address.proto deleted file mode 100644 index 8a03a53..0000000 --- a/src/main/proto/envoy/type/matcher/v3/address.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package envoy.type.matcher.v3; - -import "xds/core/v3/cidr.proto"; - -import "udpa/annotations/status.proto"; - -option java_package = "io.envoyproxy.envoy.type.matcher.v3"; -option java_outer_classname = "AddressProto"; -option java_multiple_files = true; -option go_package = "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3;matcherv3"; -option (udpa.annotations.file_status).package_version_status = ACTIVE; - -// [#protodoc-title: Address Matcher] - -// Match an IP against a repeated CIDR range. This matcher is intended to be -// used in other matchers, for example in the filter state matcher to match a -// filter state object as an IP. -message AddressMatcher { - repeated xds.core.v3.CidrRange ranges = 1; -} diff --git a/src/main/proto/envoy/type/matcher/v3/filter_state.proto b/src/main/proto/envoy/type/matcher/v3/filter_state.proto index 8c38a51..f813178 100644 --- a/src/main/proto/envoy/type/matcher/v3/filter_state.proto +++ b/src/main/proto/envoy/type/matcher/v3/filter_state.proto @@ -2,7 +2,6 @@ syntax = "proto3"; package envoy.type.matcher.v3; -import "envoy/type/matcher/v3/address.proto"; import "envoy/type/matcher/v3/string.proto"; import "udpa/annotations/status.proto"; @@ -26,8 +25,5 @@ message FilterStateMatcher { // Matches the filter state object as a string value. StringMatcher string_match = 2; - - // Matches the filter state object as a ip Instance. - AddressMatcher address_match = 3; } } diff --git a/src/main/proto/envoy/type/v3/http_status.proto b/src/main/proto/envoy/type/v3/http_status.proto index 40d697b..ab03e1b 100644 --- a/src/main/proto/envoy/type/v3/http_status.proto +++ b/src/main/proto/envoy/type/v3/http_status.proto @@ -21,172 +21,116 @@ enum StatusCode { // `enum` type. Empty = 0; - // Continue - ``100`` status code. Continue = 100; - // OK - ``200`` status code. OK = 200; - // Created - ``201`` status code. Created = 201; - // Accepted - ``202`` status code. Accepted = 202; - // NonAuthoritativeInformation - ``203`` status code. NonAuthoritativeInformation = 203; - // NoContent - ``204`` status code. NoContent = 204; - // ResetContent - ``205`` status code. ResetContent = 205; - // PartialContent - ``206`` status code. PartialContent = 206; - // MultiStatus - ``207`` status code. MultiStatus = 207; - // AlreadyReported - ``208`` status code. AlreadyReported = 208; - // IMUsed - ``226`` status code. IMUsed = 226; - // MultipleChoices - ``300`` status code. MultipleChoices = 300; - // MovedPermanently - ``301`` status code. MovedPermanently = 301; - // Found - ``302`` status code. Found = 302; - // SeeOther - ``303`` status code. SeeOther = 303; - // NotModified - ``304`` status code. NotModified = 304; - // UseProxy - ``305`` status code. UseProxy = 305; - // TemporaryRedirect - ``307`` status code. TemporaryRedirect = 307; - // PermanentRedirect - ``308`` status code. PermanentRedirect = 308; - // BadRequest - ``400`` status code. BadRequest = 400; - // Unauthorized - ``401`` status code. Unauthorized = 401; - // PaymentRequired - ``402`` status code. PaymentRequired = 402; - // Forbidden - ``403`` status code. Forbidden = 403; - // NotFound - ``404`` status code. NotFound = 404; - // MethodNotAllowed - ``405`` status code. MethodNotAllowed = 405; - // NotAcceptable - ``406`` status code. NotAcceptable = 406; - // ProxyAuthenticationRequired - ``407`` status code. ProxyAuthenticationRequired = 407; - // RequestTimeout - ``408`` status code. RequestTimeout = 408; - // Conflict - ``409`` status code. Conflict = 409; - // Gone - ``410`` status code. Gone = 410; - // LengthRequired - ``411`` status code. LengthRequired = 411; - // PreconditionFailed - ``412`` status code. PreconditionFailed = 412; - // PayloadTooLarge - ``413`` status code. PayloadTooLarge = 413; - // URITooLong - ``414`` status code. URITooLong = 414; - // UnsupportedMediaType - ``415`` status code. UnsupportedMediaType = 415; - // RangeNotSatisfiable - ``416`` status code. RangeNotSatisfiable = 416; - // ExpectationFailed - ``417`` status code. ExpectationFailed = 417; - // MisdirectedRequest - ``421`` status code. MisdirectedRequest = 421; - // UnprocessableEntity - ``422`` status code. UnprocessableEntity = 422; - // Locked - ``423`` status code. Locked = 423; - // FailedDependency - ``424`` status code. FailedDependency = 424; - // UpgradeRequired - ``426`` status code. UpgradeRequired = 426; - // PreconditionRequired - ``428`` status code. PreconditionRequired = 428; - // TooManyRequests - ``429`` status code. TooManyRequests = 429; - // RequestHeaderFieldsTooLarge - ``431`` status code. RequestHeaderFieldsTooLarge = 431; - // InternalServerError - ``500`` status code. InternalServerError = 500; - // NotImplemented - ``501`` status code. NotImplemented = 501; - // BadGateway - ``502`` status code. BadGateway = 502; - // ServiceUnavailable - ``503`` status code. ServiceUnavailable = 503; - // GatewayTimeout - ``504`` status code. GatewayTimeout = 504; - // HTTPVersionNotSupported - ``505`` status code. HTTPVersionNotSupported = 505; - // VariantAlsoNegotiates - ``506`` status code. VariantAlsoNegotiates = 506; - // InsufficientStorage - ``507`` status code. InsufficientStorage = 507; - // LoopDetected - ``508`` status code. LoopDetected = 508; - // NotExtended - ``510`` status code. NotExtended = 510; - // NetworkAuthenticationRequired - ``511`` status code. NetworkAuthenticationRequired = 511; } diff --git a/src/main/proto/google/api/annotations.proto b/src/main/proto/google/api/annotations.proto index 84c4816..efdab3d 100644 --- a/src/main/proto/google/api/annotations.proto +++ b/src/main/proto/google/api/annotations.proto @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2015 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/main/proto/google/api/expr/v1alpha1/checked.proto b/src/main/proto/google/api/expr/v1alpha1/checked.proto index c684934..930dc4f 100644 --- a/src/main/proto/google/api/expr/v1alpha1/checked.proto +++ b/src/main/proto/google/api/expr/v1alpha1/checked.proto @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/main/proto/google/api/expr/v1alpha1/syntax.proto b/src/main/proto/google/api/expr/v1alpha1/syntax.proto index 7b6668d..8219ba6 100644 --- a/src/main/proto/google/api/expr/v1alpha1/syntax.proto +++ b/src/main/proto/google/api/expr/v1alpha1/syntax.proto @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -44,24 +44,21 @@ message ParsedExpr { // operators with the exception of the '.' operator are modelled as function // calls. This makes it easy to represent new operators into the existing AST. // -// All references within expressions must resolve to a -// [Decl][google.api.expr.v1alpha1.Decl] provided at type-check for an -// expression to be valid. A reference may either be a bare identifier `name` or -// a qualified identifier `google.api.name`. References may either refer to a -// value or a function declaration. +// All references within expressions must resolve to a [Decl][google.api.expr.v1alpha1.Decl] provided at +// type-check for an expression to be valid. A reference may either be a bare +// identifier `name` or a qualified identifier `google.api.name`. References +// may either refer to a value or a function declaration. // // For example, the expression `google.api.name.startsWith('expr')` references -// the declaration `google.api.name` within a -// [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression, and the -// function declaration `startsWith`. +// the declaration `google.api.name` within a [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression, and +// the function declaration `startsWith`. message Expr { // An identifier expression. e.g. `request`. message Ident { // Required. Holds a single, unqualified identifier, possibly preceded by a // '.'. // - // Qualified names are represented by the - // [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression. + // Qualified names are represented by the [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression. string name = 1; } @@ -184,49 +181,11 @@ message Expr { // messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the // macro tests whether the property is set to its default. For map and struct // types, the macro tests whether the property `x` is defined on `m`. - // - // Comprehensions for the standard environment macros evaluation can be best - // visualized as the following pseudocode: - // - // ``` - // let `accu_var` = `accu_init` - // for (let `iter_var` in `iter_range`) { - // if (!`loop_condition`) { - // break - // } - // `accu_var` = `loop_step` - // } - // return `result` - // ``` - // - // Comprehensions for the optional V2 macros which support map-to-map - // translation differ slightly from the standard environment macros in that - // they expose both the key or index in addition to the value for each list - // or map entry: - // - // ``` - // let `accu_var` = `accu_init` - // for (let `iter_var`, `iter_var2` in `iter_range`) { - // if (!`loop_condition`) { - // break - // } - // `accu_var` = `loop_step` - // } - // return `result` - // ``` message Comprehension { - // The name of the first iteration variable. - // When the iter_range is a list, this variable is the list element. - // When the iter_range is a map, this variable is the map entry key. + // The name of the iteration variable. string iter_var = 1; - // The name of the second iteration variable, empty if not set. - // When the iter_range is a list, this variable is the integer index. - // When the iter_range is a map, this variable is the map entry value. - // This field is only set for comprehension v2 macros. - string iter_var2 = 8; - - // The range over which the comprehension iterates. + // The range over which var iterates. Expr iter_range = 2; // The name of the variable used for accumulation of the result. @@ -235,13 +194,13 @@ message Expr { // The initial value of the accumulator. Expr accu_init = 4; - // An expression which can contain iter_var, iter_var2, and accu_var. + // An expression which can contain iter_var and accu_var. // // Returns false when the result has been computed and may be used as // a hint to short-circuit the remainder of the comprehension. Expr loop_condition = 5; - // An expression which can contain iter_var, iter_var2, and accu_var. + // An expression which can contain iter_var and accu_var. // // Computes the next value of accu_var. Expr loop_step = 6; @@ -291,8 +250,7 @@ message Expr { // primitives. // // Lists and structs are not included as constants as these aggregate types may -// contain [Expr][google.api.expr.v1alpha1.Expr] elements which require -// evaluation and are thus not constant. +// contain [Expr][google.api.expr.v1alpha1.Expr] elements which require evaluation and are thus not constant. // // Examples of literals include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`, // `true`, `null`. @@ -334,50 +292,6 @@ message Constant { // Source information collected at parse time. message SourceInfo { - // An extension that was requested for the source expression. - message Extension { - // Version - message Version { - // Major version changes indicate different required support level from - // the required components. - int64 major = 1; - - // Minor version changes must not change the observed behavior from - // existing implementations, but may be provided informationally. - int64 minor = 2; - } - - // CEL component specifier. - enum Component { - // Unspecified, default. - COMPONENT_UNSPECIFIED = 0; - - // Parser. Converts a CEL string to an AST. - COMPONENT_PARSER = 1; - - // Type checker. Checks that references in an AST are defined and types - // agree. - COMPONENT_TYPE_CHECKER = 2; - - // Runtime. Evaluates a parsed and optionally checked CEL AST against a - // context. - COMPONENT_RUNTIME = 3; - } - - // Identifier for the extension. Example: constant_folding - string id = 1; - - // If set, the listed components must understand the extension for the - // expression to evaluate correctly. - // - // This field has set semantics, repeated values should be deduplicated. - repeated Component affected_components = 2; - - // Version info. May be skipped if it isn't meaningful for the extension. - // (for example constant_folding might always be v0.0). - Version version = 3; - } - // The syntax version of the source, e.g. `cel1`. string syntax_version = 1; @@ -409,15 +323,6 @@ message SourceInfo { // in the map corresponds to the expression id of the expanded macro, and the // value is the call `Expr` that was replaced. map macro_calls = 5; - - // A list of tags for extensions that were used while parsing or type checking - // the source expression. For example, optimizations that require special - // runtime support may be specified. - // - // These are used to check feature support between components in separate - // implementations. This can be used to either skip redundant work or - // report an error if the extension is unsupported. - repeated Extension extensions = 6; } // A specific position in source. diff --git a/src/main/proto/google/api/http.proto b/src/main/proto/google/api/http.proto index e327037..113fa93 100644 --- a/src/main/proto/google/api/http.proto +++ b/src/main/proto/google/api/http.proto @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2015 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -41,7 +41,7 @@ message Http { bool fully_decode_reserved_expansion = 2; } -// gRPC Transcoding +// # gRPC Transcoding // // gRPC Transcoding is a feature for mapping between a gRPC method and one or // more HTTP REST endpoints. It allows developers to build a single API service @@ -82,8 +82,9 @@ message Http { // // This enables an HTTP REST to gRPC mapping as below: // -// - HTTP: `GET /v1/messages/123456` -// - gRPC: `GetMessage(name: "messages/123456")` +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` // // Any fields in the request message which are not bound by the path template // automatically become HTTP query parameters if there is no HTTP request body. @@ -107,9 +108,11 @@ message Http { // // This enables a HTTP JSON to RPC mapping as below: // -// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` -// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: -// SubMessage(subfield: "foo"))` +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` // // Note that fields which are mapped to URL query parameters must have a // primitive type or a repeated primitive type or a non-repeated message type. @@ -139,8 +142,10 @@ message Http { // representation of the JSON in the request body is determined by // protos JSON encoding: // -// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` -// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -163,8 +168,10 @@ message Http { // // The following HTTP JSON to RPC mapping is enabled: // -// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` -// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in @@ -192,32 +199,29 @@ message Http { // // This enables the following two alternative HTTP JSON to RPC mappings: // -// - HTTP: `GET /v1/messages/123456` -// - gRPC: `GetMessage(message_id: "123456")` +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` // -// - HTTP: `GET /v1/users/me/messages/123456` -// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` -// -// Rules for HTTP mapping +// ## Rules for HTTP mapping // // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: // - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They -// are passed via the HTTP +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP // request body. // - All other fields are passed via the URL query parameters, and the // parameter name is the field path in the request message. A repeated // field can be represented as multiple query parameters under the same // name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL -// query parameter, all fields +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields // are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP -// request body, all +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all // fields are passed via URL path and URL query parameters. // -// Path template syntax +// ### Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -256,7 +260,7 @@ message Http { // Document](https://developers.google.com/discovery/v1/reference/apis) as // `{+var}`. // -// Using gRPC API Service Configuration +// ## Using gRPC API Service Configuration // // gRPC API Service Configuration (service config) is a configuration language // for configuring a gRPC service to become a user-facing product. The @@ -271,14 +275,15 @@ message Http { // specified in the service config will override any matching transcoding // configuration in the proto. // -// The following example selects a gRPC method and applies an `HttpRule` to it: +// Example: // // http: // rules: +// # Selects a gRPC method and applies HttpRule to it. // - selector: example.v1.Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // -// Special notes +// ## Special notes // // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the // proto to JSON conversion must follow the [proto3 @@ -308,8 +313,7 @@ message Http { message HttpRule { // Selects a method to which this rule applies. // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax - // details. + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. string selector = 1; // Determines the URL pattern is matched by this rules. This pattern can be diff --git a/src/main/proto/google/rpc/status.proto b/src/main/proto/google/rpc/status.proto index 90b70dd..923e169 100644 --- a/src/main/proto/google/rpc/status.proto +++ b/src/main/proto/google/rpc/status.proto @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/src/main/proto/opencensus/proto/agent/common/v1/common.proto b/src/main/proto/opencensus/proto/agent/common/v1/common.proto new file mode 100644 index 0000000..2bfc274 --- /dev/null +++ b/src/main/proto/opencensus/proto/agent/common/v1/common.proto @@ -0,0 +1,101 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +// NOTE: This proto is experimental and is subject to change at this point. +// Please do not use it at the moment. + +package opencensus.proto.agent.common.v1; + +import "google/protobuf/timestamp.proto"; + +option java_multiple_files = true; +option java_package = "io.opencensus.proto.agent.common.v1"; +option java_outer_classname = "CommonProto"; + +option go_package = "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"; + +option ruby_package = "OpenCensus::Proto::Agent::Common::V1"; + +// Identifier metadata of the Node that produces the span or tracing data. +// Note, this is not the metadata about the Node or service that is described by associated spans. +// In the future we plan to extend the identifier proto definition to support +// additional information (e.g cloud id, etc.) +message Node { + // Identifier that uniquely identifies a process within a VM/container. + ProcessIdentifier identifier = 1; + + // Information on the OpenCensus Library that initiates the stream. + LibraryInfo library_info = 2; + + // Additional information on service. + ServiceInfo service_info = 3; + + // Additional attributes. + map attributes = 4; + + // TODO(songya): Add more identifiers in the future as needed, like cloud + // identifiers. +} + +// Identifier that uniquely identifies a process within a VM/container. +message ProcessIdentifier { + + // The host name. Usually refers to the machine/container name. + // For example: os.Hostname() in Go, socket.gethostname() in Python. + string host_name = 1; + + // Process id. + uint32 pid = 2; + + // Start time of this ProcessIdentifier. Represented in epoch time. + google.protobuf.Timestamp start_timestamp = 3; +} + +// Information on OpenCensus Library. +message LibraryInfo { + + enum Language { + LANGUAGE_UNSPECIFIED = 0; + CPP = 1; + C_SHARP = 2; + ERLANG = 3; + GO_LANG = 4; + JAVA = 5; + NODE_JS = 6; + PHP = 7; + PYTHON = 8; + RUBY = 9; + WEB_JS = 10; + } + + // Language of OpenCensus Library. + Language language = 1; + + // Version of Agent exporter of Library. + string exporter_version = 2; + + // Version of OpenCensus Library. + string core_library_version = 3; +} + +// Additional service information. +message ServiceInfo { + + // Name of the service. + string name = 1; + + // TODO(songya): add more fields as needed. +} diff --git a/src/main/proto/opencensus/proto/agent/metrics/v1/metrics_service.proto b/src/main/proto/opencensus/proto/agent/metrics/v1/metrics_service.proto new file mode 100644 index 0000000..13d9171 --- /dev/null +++ b/src/main/proto/opencensus/proto/agent/metrics/v1/metrics_service.proto @@ -0,0 +1,58 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opencensus.proto.agent.metrics.v1; + +import "opencensus/proto/agent/common/v1/common.proto"; +import "opencensus/proto/metrics/v1/metrics.proto"; +import "opencensus/proto/resource/v1/resource.proto"; + +option java_multiple_files = true; +option java_package = "io.opencensus.proto.agent.metrics.v1"; +option java_outer_classname = "MetricsServiceProto"; + +option go_package = "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1"; + +option ruby_package = "OpenCensus::Proto::Agent::Metrics::V1"; + +// Service that can be used to push metrics between one Application +// instrumented with OpenCensus and an agent, or between an agent and a +// central collector. +service MetricsService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(stream ExportMetricsServiceRequest) returns (stream ExportMetricsServiceResponse) {} +} + +message ExportMetricsServiceRequest { + // This is required only in the first message on the stream or if the + // previous sent ExportMetricsServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Metrics from multiple Applications). + opencensus.proto.agent.common.v1.Node node = 1; + + // A list of metrics that belong to the last received Node. + repeated opencensus.proto.metrics.v1.Metric metrics = 2; + + // The resource for the metrics in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known + // at all or when all sent metrics have an explicit resource set. + opencensus.proto.resource.v1.Resource resource = 3; +} + +message ExportMetricsServiceResponse { +} diff --git a/src/main/proto/opencensus/proto/agent/trace/v1/trace_service.proto b/src/main/proto/opencensus/proto/agent/trace/v1/trace_service.proto new file mode 100644 index 0000000..8792940 --- /dev/null +++ b/src/main/proto/opencensus/proto/agent/trace/v1/trace_service.proto @@ -0,0 +1,87 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +// NOTE: This proto is experimental and is subject to change at this point. +// Please do not use it at the moment. + +package opencensus.proto.agent.trace.v1; + +import "opencensus/proto/agent/common/v1/common.proto"; +import "opencensus/proto/resource/v1/resource.proto"; +import "opencensus/proto/trace/v1/trace.proto"; +import "opencensus/proto/trace/v1/trace_config.proto"; + +option java_multiple_files = true; +option java_package = "io.opencensus.proto.agent.trace.v1"; +option java_outer_classname = "TraceServiceProto"; + +option go_package = "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1"; + +option ruby_package = "OpenCensus::Proto::Agent::Trace::V1"; + +// Service that can be used to push spans and configs between one Application +// instrumented with OpenCensus and an agent, or between an agent and a +// central collector or config service (in this case spans and configs are +// sent/received to/from multiple Applications). +service TraceService { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + rpc Config(stream CurrentLibraryConfig) returns (stream UpdatedLibraryConfig) {} + + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(stream ExportTraceServiceRequest) returns (stream ExportTraceServiceResponse) {} +} + +message CurrentLibraryConfig { + // This is required only in the first message on the stream or if the + // previous sent CurrentLibraryConfig message has a different Node (e.g. + // when the same RPC is used to configure multiple Applications). + opencensus.proto.agent.common.v1.Node node = 1; + + // Current configuration. + opencensus.proto.trace.v1.TraceConfig config = 2; +} + +message UpdatedLibraryConfig { + // This field is ignored when the RPC is used to configure only one Application. + // This is required only in the first message on the stream or if the + // previous sent UpdatedLibraryConfig message has a different Node. + opencensus.proto.agent.common.v1.Node node = 1; + + // Requested updated configuration. + opencensus.proto.trace.v1.TraceConfig config = 2; +} + +message ExportTraceServiceRequest { + // This is required only in the first message on the stream or if the + // previous sent ExportTraceServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Spans from multiple Applications). + opencensus.proto.agent.common.v1.Node node = 1; + + // A list of Spans that belong to the last received Node. + repeated opencensus.proto.trace.v1.Span spans = 2; + + // The resource for the spans in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known. + opencensus.proto.resource.v1.Resource resource = 3; +} + +message ExportTraceServiceResponse { +} diff --git a/src/main/proto/opencensus/proto/metrics/v1/metrics.proto b/src/main/proto/opencensus/proto/metrics/v1/metrics.proto new file mode 100644 index 0000000..88f2261 --- /dev/null +++ b/src/main/proto/opencensus/proto/metrics/v1/metrics.proto @@ -0,0 +1,303 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package describes the Metrics data model. It is currently experimental +// but may eventually become the wire format for metrics. Please see +// https://github.com/census-instrumentation/opencensus-specs/blob/master/stats/Metrics.md +// for more details. + +syntax = "proto3"; + +package opencensus.proto.metrics.v1; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "opencensus/proto/resource/v1/resource.proto"; + +option go_package = "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"; + +option java_multiple_files = true; +option java_package = "io.opencensus.proto.metrics.v1"; +option java_outer_classname = "MetricsProto"; + +option ruby_package = "OpenCensus::Proto::Metrics::V1"; + +// Defines a Metric which has one or more timeseries. +message Metric { + // The descriptor of the Metric. + // TODO(issue #152): consider only sending the name of descriptor for + // optimization. + MetricDescriptor metric_descriptor = 1; + + // One or more timeseries for a single metric, where each timeseries has + // one or more points. + repeated TimeSeries timeseries = 2; + + // The resource for the metric. If unset, it may be set to a default value + // provided for a sequence of messages in an RPC stream. + opencensus.proto.resource.v1.Resource resource = 3; +} + +// Defines a metric type and its schema. +message MetricDescriptor { + // The metric type, including its DNS name prefix. It must be unique. + string name = 1; + + // A detailed description of the metric, which can be used in documentation. + string description = 2; + + // The unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + string unit = 3; + + // The kind of metric. It describes how the data is reported. + // + // A gauge is an instantaneous measurement of a value. + // + // A cumulative measurement is a value accumulated over a time interval. In + // a time series, cumulative measurements should have the same start time, + // increasing values and increasing end times, until an event resets the + // cumulative value to zero and sets a new start time for the following + // points. + enum Type { + // Do not use this default value. + UNSPECIFIED = 0; + + // Integer gauge. The value can go both up and down. + GAUGE_INT64 = 1; + + // Floating point gauge. The value can go both up and down. + GAUGE_DOUBLE = 2; + + // Distribution gauge measurement. The count and sum can go both up and + // down. Recorded values are always >= 0. + // Used in scenarios like a snapshot of time the current items in a queue + // have spent there. + GAUGE_DISTRIBUTION = 3; + + // Integer cumulative measurement. The value cannot decrease, if resets + // then the start_time should also be reset. + CUMULATIVE_INT64 = 4; + + // Floating point cumulative measurement. The value cannot decrease, if + // resets then the start_time should also be reset. Recorded values are + // always >= 0. + CUMULATIVE_DOUBLE = 5; + + // Distribution cumulative measurement. The count and sum cannot decrease, + // if resets then the start_time should also be reset. + CUMULATIVE_DISTRIBUTION = 6; + + // Some frameworks implemented Histograms as a summary of observations + // (usually things like request durations and response sizes). While it + // also provides a total count of observations and a sum of all observed + // values, it calculates configurable percentiles over a sliding time + // window. This is not recommended, since it cannot be aggregated. + SUMMARY = 7; + } + Type type = 4; + + // The label keys associated with the metric descriptor. + repeated LabelKey label_keys = 5; +} + +// Defines a label key associated with a metric descriptor. +message LabelKey { + // The key for the label. + string key = 1; + + // A human-readable description of what this label key represents. + string description = 2; +} + +// A collection of data points that describes the time-varying values +// of a metric. +message TimeSeries { + // Must be present for cumulative metrics. The time when the cumulative value + // was reset to zero. Exclusive. The cumulative value is over the time interval + // (start_timestamp, timestamp]. If not specified, the backend can use the + // previous recorded value. + google.protobuf.Timestamp start_timestamp = 1; + + // The set of label values that uniquely identify this timeseries. Applies to + // all points. The order of label values must match that of label keys in the + // metric descriptor. + repeated LabelValue label_values = 2; + + // The data points of this timeseries. Point.value type MUST match the + // MetricDescriptor.type. + repeated Point points = 3; +} + +message LabelValue { + // The value for the label. + string value = 1; + // If false the value field is ignored and considered not set. + // This is used to differentiate a missing label from an empty string. + bool has_value = 2; +} + +// A timestamped measurement. +message Point { + // The moment when this point was recorded. Inclusive. + // If not specified, the timestamp will be decided by the backend. + google.protobuf.Timestamp timestamp = 1; + + // The actual point value. + oneof value { + // A 64-bit integer. + int64 int64_value = 2; + + // A 64-bit double-precision floating-point number. + double double_value = 3; + + // A distribution value. + DistributionValue distribution_value = 4; + + // A summary value. This is not recommended, since it cannot be aggregated. + SummaryValue summary_value = 5; + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +message DistributionValue { + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + int64 count = 1; + + // The sum of the values in the population. If count is zero then this field + // must be zero. + double sum = 2; + + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + double sum_of_squared_deviation = 3; + + // A Distribution may optionally contain a histogram of the values in the + // population. The bucket boundaries for that histogram are described by + // BucketOptions. + // + // If bucket_options has no type, then there is no histogram associated with + // the Distribution. + message BucketOptions { + oneof type { + // Bucket with explicit bounds. + Explicit explicit = 1; + } + + // Specifies a set of buckets with arbitrary upper-bounds. + // This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, bucket_bounds[i]) for i == 0 + // [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1 + // [bucket_bounds[i], +infinity) for i == N-1 + message Explicit { + // The values must be strictly increasing and > 0. + repeated double bounds = 1; + } + + // TODO: If OpenMetrics decides to support (a, b] intervals we should add + // support for these by defining a boolean value here which decides what + // type of intervals to use. + } + + // Don't change bucket boundaries within a TimeSeries if your backend doesn't + // support this. + // TODO(issue #152): consider not required to send bucket options for + // optimization. + BucketOptions bucket_options = 4; + + message Bucket { + // The number of values in each bucket of the histogram, as described in + // bucket_bounds. + int64 count = 1; + + // If the distribution does not have a histogram, then omit this field. + Exemplar exemplar = 2; + } + + // If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + repeated Bucket buckets = 5; + + // Exemplars are example points that may be used to annotate aggregated + // Distribution values. They are metadata that gives information about a + // particular value added to a Distribution bucket. + message Exemplar { + // Value of the exemplar point. It determines which bucket the exemplar + // belongs to. + double value = 1; + + // The observation (sampling) time of the above value. + google.protobuf.Timestamp timestamp = 2; + + // Contextual information about the example value. + map attachments = 3; + } +} + +// The start_timestamp only applies to the count and sum in the SummaryValue. +message SummaryValue { + // The total number of recorded values since start_time. Optional since + // some systems don't expose this. + google.protobuf.Int64Value count = 1; + + // The total sum of recorded values since start_time. Optional since some + // systems don't expose this. If count is zero then this field must be zero. + // This field must be unset if the sum is not available. + google.protobuf.DoubleValue sum = 2; + + // The values in this message can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + message Snapshot { + // The number of values in the snapshot. Optional since some systems don't + // expose this. + google.protobuf.Int64Value count = 1; + + // The sum of values in the snapshot. Optional since some systems don't + // expose this. If count is zero then this field must be zero or not set + // (if not supported). + google.protobuf.DoubleValue sum = 2; + + // Represents the value at a given percentile of a distribution. + message ValueAtPercentile { + // The percentile of a distribution. Must be in the interval + // (0.0, 100.0]. + double percentile = 1; + + // The value at the given percentile of a distribution. + double value = 2; + } + + // A list of values at different percentiles of the distribution calculated + // from the current snapshot. The percentiles must be strictly increasing. + repeated ValueAtPercentile percentile_values = 3; + } + + // Values calculated over an arbitrary time window. + Snapshot snapshot = 3; +} + diff --git a/src/main/proto/opencensus/proto/resource/v1/resource.proto b/src/main/proto/opencensus/proto/resource/v1/resource.proto new file mode 100644 index 0000000..9ce4f39 --- /dev/null +++ b/src/main/proto/opencensus/proto/resource/v1/resource.proto @@ -0,0 +1,35 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opencensus.proto.resource.v1; + +option go_package = "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"; + +option java_multiple_files = true; +option java_package = "io.opencensus.proto.resource.v1"; +option java_outer_classname = "ResourceProto"; + +option ruby_package = "OpenCensus::Proto::Resource::V1"; + +// Resource information. +message Resource { + + // Type identifier for the resource. + string type = 1; + + // Set of labels that describe the resource. + map labels = 2; +} diff --git a/src/main/proto/opencensus/proto/stats/v1/stats.proto b/src/main/proto/opencensus/proto/stats/v1/stats.proto new file mode 100644 index 0000000..ae92457 --- /dev/null +++ b/src/main/proto/opencensus/proto/stats/v1/stats.proto @@ -0,0 +1,138 @@ +// Copyright 2016-18, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opencensus.proto.stats.v1; + +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/census-instrumentation/opencensus-proto/gen-go/stats/v1"; + +option java_multiple_files = true; +option java_package = "io.opencensus.proto.stats.v1"; +option java_outer_classname = "StatsProto"; + +option ruby_package = "OpenCensus::Proto::Stats::V1"; + +// TODO(bdrutu): Consider if this should be moved to a "tags" directory to match the API structure. +message Tag { + string key = 1; + string value = 2; +} + +// Measure . +message Measure { + // A string by which the measure will be referred to, e.g. "rpc_server_latency". Names MUST be + // unique within the library. + string name = 1; + + // Describes the measure, e.g. "RPC latency in seconds". + string description = 2; + + // Describes the unit used for the Measure. Follows the format described by + // http://unitsofmeasure.org/ucum.html. + string unit = 3; + + enum Type { + // Unknown type. + TYPE_UNSPECIFIED = 0; + // Indicates an int64 Measure. + INT64 = 1; + // Indicates a double Measure. + DOUBLE = 2; + } + + // The type used for this Measure. + Type type = 4; +} + +message View { + // A string by which the View will be referred to, e.g. "rpc_latency". Names MUST be unique + // within the library. + string name = 1; + + // Describes the view, e.g. "RPC latency distribution" + string description = 2; + + // The Measure to which this view is applied. + Measure measure = 3; + + // An array of tag keys. These values associated with tags of this name form the basis by which + // individual stats will be aggregated (one aggregation per unique tag value). If none are + // provided, then all data is recorded in a single aggregation. + repeated string columns = 4; + + // The description of the aggregation used for this view which describes how data collected are + // aggregated. + oneof aggregation { + // Counts the number of measurements recorded. + CountAggregation count_aggregation = 5; + // Indicates that data collected and aggregated with this Aggregation will be summed up. + SumAggregation sum_aggregation = 6; + // Indicates that data collected and aggregated with this Aggregation will represent the last + // recorded value. This is useful to support Gauges. + LastValueAggregation last_value_aggregation = 7; + // Indicates that the desired Aggregation is a histogram distribution. A distribution + // Aggregation may contain a histogram of the values in the population. User should define the + // bucket boundaries for that histogram (see DistributionAggregation). + DistributionAggregation distribution_aggregation = 8; + } +} + +message CountAggregation {} + +message SumAggregation {} + +message LastValueAggregation {} + +message DistributionAggregation { + // A Distribution may optionally contain a histogram of the values in the + // population. The bucket boundaries for that histogram are described by + // `bucket_bounds`. This defines `size(bucket_bounds) + 1` (= N) + // buckets. The boundaries for bucket index i are: + // + // (-infinity, bucket_bounds[i]) for i == 0 + // [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-2 + // [bucket_bounds[i-1], +infinity) for i == N-1 + // + // i.e. an underflow bucket (number 0), zero or more finite buckets (1 + // through N - 2, and an overflow bucket (N - 1), with inclusive lower + // bounds and exclusive upper bounds. + // + // If `bucket_bounds` has no elements (zero size), then there is no + // histogram associated with the Distribution. If `bucket_bounds` has only + // one element, there are no finite buckets, and that single element is the + // common boundary of the overflow and underflow buckets. The values must + // be monotonically increasing. + repeated double bucket_bounds = 1; +} + +// Describes a data point to be collected for a Measure. +message Measurement { + repeated Tag tags = 1; + + // The name of the measure to which the value is applied. + string measure_name = 2; + + // The recorded value, MUST have the appropriate type to match the Measure. + oneof value { + double double_value = 3; + int64 int_value = 4; + } + + // The time when this measurement was recorded. If the implementation uses a async buffer to + // record measurements this may be the time when the measurement was read from the buffer. + google.protobuf.Timestamp time = 5; +} diff --git a/src/main/proto/opencensus/proto/trace/v1/trace.proto b/src/main/proto/opencensus/proto/trace/v1/trace.proto new file mode 100644 index 0000000..96d706a --- /dev/null +++ b/src/main/proto/opencensus/proto/trace/v1/trace.proto @@ -0,0 +1,422 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opencensus.proto.trace.v1; + +import "opencensus/proto/resource/v1/resource.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + +option java_multiple_files = true; +option java_package = "io.opencensus.proto.trace.v1"; +option java_outer_classname = "TraceProto"; + +option go_package = "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"; + +option ruby_package = "OpenCensus::Proto::Trace::V1"; + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Spans may also be linked to other spans +// from the same or different trace. And form graphs. Often, a trace +// contains a root span that describes the end-to-end latency, and one +// or more subspans for its sub-operations. A trace can also contain +// multiple root spans, or none at all. Spans do not need to be +// contiguous - there may be gaps or overlaps between spans in a trace. +// +// The next id is 17. +// TODO(bdrutu): Add an example. +message Span { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. + // + // This field is semantically required. Receiver should generate new + // random trace_id if empty or invalid trace_id was received. + // + // This field is required. + bytes trace_id = 1; + + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. + // + // This field is semantically required. Receiver should generate new + // random span_id if empty or invalid span_id was received. + // + // This field is required. + bytes span_id = 2; + + // This field conveys information about request position in multiple distributed tracing graphs. + // It is a list of Tracestate.Entry with a maximum of 32 members in the list. + // + // See the https://github.com/w3c/distributed-tracing for more details about this field. + message Tracestate { + message Entry { + // The key must begin with a lowercase letter, and can only contain + // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes + // '-', asterisks '*', and forward slashes '/'. + string key = 1; + + // The value is opaque string up to 256 characters printable ASCII + // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. + // Note that this also excludes tabs, newlines, carriage returns, etc. + string value = 2; + } + + // A list of entries that represent the Tracestate. + repeated Entry entries = 1; + } + + // The Tracestate on the span. + Tracestate tracestate = 15; + + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + bytes parent_span_id = 3; + + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // When null or empty string received - receiver may use string "name" + // as a replacement. There might be smarted algorithms implemented by + // receiver to fix the empty span name. + // + // This field is required. + TruncatableString name = 4; + + // Type of span. Can be used to specify additional relationships between spans + // in addition to a parent/child relationship. + enum SpanKind { + // Unspecified. + SPAN_KIND_UNSPECIFIED = 0; + + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SERVER = 1; + + // Indicates that the span covers the client-side wrapper around an RPC or + // other remote request. + CLIENT = 2; + } + + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + SpanKind kind = 14; + + // The start time of the span. On the client side, this is the time kept by + // the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to the value of end_time field if it was + // set. Or to the current time if neither was set. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + google.protobuf.Timestamp start_time = 5; + + // The end time of the span. On the client side, this is the time kept by + // the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to start_time value. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + google.protobuf.Timestamp end_time = 6; + + // A set of attributes, each with a key and a value. + message Attributes { + // The set of attributes. The value can be a string, an integer, a double + // or the Boolean values `true` or `false`. Note, global attributes like + // server name can be set as tags using resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + map attribute_map = 1; + + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0, then no attributes were dropped. + int32 dropped_attributes_count = 2; + } + + // A set of attributes on the span. + Attributes attributes = 7; + + // A stack trace captured at the start of the span. + StackTrace stack_trace = 8; + + // A time-stamped annotation or message event in the Span. + message TimeEvent { + // The time the event occurred. + google.protobuf.Timestamp time = 1; + + // A text annotation with a set of attributes. + message Annotation { + // A user-supplied message describing the event. + TruncatableString description = 1; + + // A set of attributes on the annotation. + Attributes attributes = 2; + } + + // An event describing a message sent/received between Spans. + message MessageEvent { + // Indicates whether the message was sent or received. + enum Type { + // Unknown event type. + TYPE_UNSPECIFIED = 0; + // Indicates a sent message. + SENT = 1; + // Indicates a received message. + RECEIVED = 2; + } + + // The type of MessageEvent. Indicates whether the message was sent or + // received. + Type type = 1; + + // An identifier for the MessageEvent's message that can be used to match + // SENT and RECEIVED MessageEvents. For example, this field could + // represent a sequence ID for a streaming RPC. It is recommended to be + // unique within a Span. + uint64 id = 2; + + // The number of uncompressed bytes sent or received. + uint64 uncompressed_size = 3; + + // The number of compressed bytes sent or received. If zero, assumed to + // be the same size as uncompressed. + uint64 compressed_size = 4; + } + + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + oneof value { + // A text annotation with a set of attributes. + Annotation annotation = 2; + + // An event describing a message sent/received between Spans. + MessageEvent message_event = 3; + } + } + + // A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation + // on the span, consisting of either user-supplied key-value pairs, or + // details of a message sent/received between Spans. + message TimeEvents { + // A collection of `TimeEvent`s. + repeated TimeEvent time_event = 1; + + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + int32 dropped_annotations_count = 2; + + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + int32 dropped_message_events_count = 3; + } + + // The included time events. + TimeEvents time_events = 9; + + // A pointer from the current span to another span in the same trace or in a + // different trace. For example, this can be used in batching operations, + // where a single batch handler processes multiple requests from different + // traces or when the handler receives a request from a different project. + message Link { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + bytes trace_id = 1; + + // A unique identifier for the linked span. The ID is an 8-byte array. + bytes span_id = 2; + + // The relationship of the current span relative to the linked span: child, + // parent, or unspecified. + enum Type { + // The relationship of the two spans is unknown, or known but other + // than parent-child. + TYPE_UNSPECIFIED = 0; + // The linked span is a child of the current span. + CHILD_LINKED_SPAN = 1; + // The linked span is a parent of the current span. + PARENT_LINKED_SPAN = 2; + } + + // The relationship of the current span relative to the linked span. + Type type = 3; + + // A set of attributes on the link. + Attributes attributes = 4; + + // The Tracestate associated with the link. + Tracestate tracestate = 5; + } + + // A collection of links, which are references from this span to a span + // in the same or different trace. + message Links { + // A collection of links. + repeated Link link = 1; + + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + int32 dropped_links_count = 2; + } + + // The included links. + Links links = 10; + + // An optional final status for this span. Semantically when Status + // wasn't set it is means span ended without errors and assume + // Status.Ok (code = 0). + Status status = 11; + + // An optional resource that is associated with this span. If not set, this span + // should be part of a batch that does include the resource information, unless resource + // information is unknown. + opencensus.proto.resource.v1.Resource resource = 16; + + // A highly recommended but not required flag that identifies when a + // trace crosses a process boundary. True when the parent_span belongs + // to the same process as the current span. This flag is most commonly + // used to indicate the need to adjust time as clocks in different + // processes may not be synchronized. + google.protobuf.BoolValue same_process_as_parent_span = 12; + + // An optional number of child spans that were generated while this span + // was active. If set, allows an implementation to detect missing child spans. + google.protobuf.UInt32Value child_span_count = 13; +} + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. This proto's fields +// are a subset of those of +// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), +// which is used by [gRPC](https://github.com/grpc). +message Status { + // The status code. This is optional field. It is safe to assume 0 (OK) + // when not set. + int32 code = 1; + + // A developer-facing error message, which should be in English. + string message = 2; +} + +// The value of an Attribute. +message AttributeValue { + // The type of the value. + oneof value { + // A string up to 256 bytes long. + TruncatableString string_value = 1; + // A 64-bit signed integer. + int64 int_value = 2; + // A Boolean value represented by `true` or `false`. + bool bool_value = 3; + // A double value. + double double_value = 4; + } +} + +// The call stack which originated this span. +message StackTrace { + // A single stack frame in a stack trace. + message StackFrame { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame. + TruncatableString function_name = 1; + // An un-mangled function name, if `function_name` is + // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can + // be fully qualified. + TruncatableString original_function_name = 2; + // The name of the source file where the function call appears. + TruncatableString file_name = 3; + // The line number in `file_name` where the function call appears. + int64 line_number = 4; + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + int64 column_number = 5; + // The binary module from where the code was loaded. + Module load_module = 6; + // The version of the deployed source code. + TruncatableString source_version = 7; + } + + // A collection of stack frames, which can be truncated. + message StackFrames { + // Stack frames in this call stack. + repeated StackFrame frame = 1; + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + int32 dropped_frames_count = 2; + } + + // Stack frames in this stack trace. + StackFrames stack_frames = 1; + + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both + // `stack_frames` and a value in `stack_trace_hash_id`. + // + // Subsequent spans within the same request can refer + // to that stack trace by setting only `stack_trace_hash_id`. + // + // TODO: describe how to deal with the case where stack_trace_hash_id is + // zero because it was not set. + uint64 stack_trace_hash_id = 2; +} + +// A description of a binary module. +message Module { + // TODO: document the meaning of this field. + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so. + TruncatableString module = 1; + + // A unique identifier for the module, usually a hash of its + // contents. + TruncatableString build_id = 2; +} + +// A string that might be shortened to a specified length. +message TruncatableString { + // The shortened string. For example, if the original string was 500 bytes long and + // the limit of the string was 128 bytes, then this value contains the first 128 + // bytes of the 500-byte string. Note that truncation always happens on a + // character boundary, to ensure that a truncated string is still valid UTF-8. + // Because it may contain multi-byte characters, the size of the truncated string + // may be less than the truncation limit. + string value = 1; + + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + int32 truncated_byte_count = 2; +} diff --git a/src/main/proto/opencensus/proto/trace/v1/trace_config.proto b/src/main/proto/opencensus/proto/trace/v1/trace_config.proto new file mode 100644 index 0000000..037247d --- /dev/null +++ b/src/main/proto/opencensus/proto/trace/v1/trace_config.proto @@ -0,0 +1,81 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opencensus.proto.trace.v1; + +option java_multiple_files = true; +option java_package = "io.opencensus.proto.trace.v1"; +option java_outer_classname = "TraceConfigProto"; + +option go_package = "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"; + +option ruby_package = "OpenCensus::Proto::Trace::V1"; + +// Global configuration of the trace service. All fields must be specified, or +// the default (zero) values will be used for each type. +message TraceConfig { + + // The global default sampler used to make decisions on span sampling. + oneof sampler { + ProbabilitySampler probability_sampler = 1; + + ConstantSampler constant_sampler = 2; + + RateLimitingSampler rate_limiting_sampler = 3; + } + + // The global default max number of attributes per span. + int64 max_number_of_attributes = 4; + + // The global default max number of annotation events per span. + int64 max_number_of_annotations = 5; + + // The global default max number of message events per span. + int64 max_number_of_message_events = 6; + + // The global default max number of link entries per span. + int64 max_number_of_links = 7; +} + +// Sampler that tries to uniformly sample traces with a given probability. +// The probability of sampling a trace is equal to that of the specified probability. +message ProbabilitySampler { + + // The desired probability of sampling. Must be within [0.0, 1.0]. + double samplingProbability = 1; +} + +// Sampler that always makes a constant decision on span sampling. +message ConstantSampler { + + // How spans should be sampled: + // - Always off + // - Always on + // - Always follow the parent Span's decision (off if no parent). + enum ConstantDecision { + ALWAYS_OFF = 0; + ALWAYS_ON = 1; + ALWAYS_PARENT = 2; + } + ConstantDecision decision = 1; +} + +// Sampler that tries to sample with a rate per time window. +message RateLimitingSampler { + + // Rate per second. + int64 qps = 1; +} diff --git a/src/main/proto/opentelemetry/proto/collector/profiles/v1development/profiles_service.proto b/src/main/proto/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto similarity index 91% rename from src/main/proto/opentelemetry/proto/collector/profiles/v1development/profiles_service.proto rename to src/main/proto/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto index ab2433e..d0e7894 100644 --- a/src/main/proto/opentelemetry/proto/collector/profiles/v1development/profiles_service.proto +++ b/src/main/proto/opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto @@ -14,15 +14,15 @@ syntax = "proto3"; -package opentelemetry.proto.collector.profiles.v1development; +package opentelemetry.proto.collector.profiles.v1experimental; -import "opentelemetry/proto/profiles/v1development/profiles.proto"; +import "opentelemetry/proto/profiles/v1experimental/profiles.proto"; -option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Development"; +option csharp_namespace = "OpenTelemetry.Proto.Collector.Profiles.V1Experimental"; option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.collector.profiles.v1development"; +option java_package = "io.opentelemetry.proto.collector.profiles.v1experimental"; option java_outer_classname = "ProfilesServiceProto"; -option go_package = "go.opentelemetry.io/proto/otlp/collector/profiles/v1development"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/profiles/v1experimental"; // Service that can be used to push profiles between one Application instrumented with // OpenTelemetry and a collector, or between a collector and a central collector. @@ -38,7 +38,7 @@ message ExportProfilesServiceRequest { // element. Intermediary nodes (such as OpenTelemetry Collector) that receive // data from multiple origins typically batch the data before forwarding further and // in that case this array will contain multiple elements. - repeated opentelemetry.proto.profiles.v1development.ResourceProfiles resource_profiles = 1; + repeated opentelemetry.proto.profiles.v1experimental.ResourceProfiles resource_profiles = 1; } message ExportProfilesServiceResponse { diff --git a/src/main/proto/opentelemetry/proto/logs/v1/logs.proto b/src/main/proto/opentelemetry/proto/logs/v1/logs.proto index 261d229..f9b97dd 100644 --- a/src/main/proto/opentelemetry/proto/logs/v1/logs.proto +++ b/src/main/proto/opentelemetry/proto/logs/v1/logs.proto @@ -56,8 +56,7 @@ message ResourceLogs { repeated ScopeLogs scope_logs = 2; // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see + // is recorded in. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_logs" field which have their own schema_url field. @@ -75,8 +74,7 @@ message ScopeLogs { repeated LogRecord log_records = 2; // The Schema URL, if known. This is the identifier of the Schema that the log data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see + // is recorded in. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all logs in the "logs" field. string schema_url = 3; @@ -210,18 +208,4 @@ message LogRecord { // - the field is not present, // - the field contains an invalid value. bytes span_id = 10; - - // A unique identifier of event category/type. - // All events with the same event_name are expected to conform to the same - // schema for both their attributes and their body. - // - // Recommended to be fully qualified and short (no longer than 256 characters). - // - // Presence of event_name on the log record identifies this record - // as an event. - // - // [Optional]. - // - // Status: [Development] - string event_name = 12; } diff --git a/src/main/proto/opentelemetry/proto/metrics/v1/metrics.proto b/src/main/proto/opentelemetry/proto/metrics/v1/metrics.proto index 00c5112..19bb7ff 100644 --- a/src/main/proto/opentelemetry/proto/metrics/v1/metrics.proto +++ b/src/main/proto/opentelemetry/proto/metrics/v1/metrics.proto @@ -29,24 +29,6 @@ option go_package = "go.opentelemetry.io/proto/otlp/metrics/v1"; // storage, OR can be embedded by other protocols that transfer OTLP metrics // data but do not implement the OTLP protocol. // -// MetricsData -// └─── ResourceMetrics -// ├── Resource -// ├── SchemaURL -// └── ScopeMetrics -// ├── Scope -// ├── SchemaURL -// └── Metric -// ├── Name -// ├── Description -// ├── Unit -// └── data -// ├── Gauge -// ├── Sum -// ├── Histogram -// ├── ExponentialHistogram -// └── Summary -// // The main difference between this message and collector protocol is that // in this message there will not be any "control" or "metadata" specific to // OTLP protocol. @@ -74,8 +56,7 @@ message ResourceMetrics { repeated ScopeMetrics scope_metrics = 2; // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see + // is recorded in. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_metrics" field which have their own schema_url field. @@ -93,8 +74,7 @@ message ScopeMetrics { repeated Metric metrics = 2; // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see + // is recorded in. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all metrics in the "metrics" field. string schema_url = 3; @@ -105,6 +85,7 @@ message ScopeMetrics { // // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md // +// // The data model and relation between entities is shown in the // diagram below. Here, "DataPoint" is the term used to refer to any // one of the specific data point value types, and "points" is the term used @@ -116,7 +97,7 @@ message ScopeMetrics { // - DataPoint contains timestamps, attributes, and one of the possible value type // fields. // -// Metric +// Metric // +------------+ // |name | // |description | @@ -270,9 +251,6 @@ message ExponentialHistogram { // data type. These data points cannot always be merged in a meaningful way. // While they can be useful in some applications, histogram data points are // recommended for new applications. -// Summary metrics do not have an aggregation temporality field. This is -// because the count and sum fields of a SummaryDataPoint are assumed to be -// cumulative values. message Summary { repeated SummaryDataPoint data_points = 1; } @@ -452,7 +430,7 @@ message HistogramDataPoint { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram optional double sum = 5; // bucket_counts is an optional field contains the count values of histogram @@ -531,7 +509,7 @@ message ExponentialHistogramDataPoint { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram optional double sum = 5; // scale describes the resolution of the histogram. Boundaries are @@ -611,8 +589,7 @@ message ExponentialHistogramDataPoint { } // SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. The count and sum fields represent -// cumulative values. +// time-varying values of a Summary metric. message SummaryDataPoint { reserved 1; @@ -645,7 +622,7 @@ message SummaryDataPoint { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary double sum = 5; // Represents the value at a given quantile of a distribution. diff --git a/src/main/proto/opentelemetry/proto/profiles/v1development/profiles.proto b/src/main/proto/opentelemetry/proto/profiles/v1experimental/pprofextended.proto similarity index 54% rename from src/main/proto/opentelemetry/proto/profiles/v1development/profiles.proto rename to src/main/proto/opentelemetry/proto/profiles/v1experimental/pprofextended.proto index 1cb20b0..bd30083 100644 --- a/src/main/proto/opentelemetry/proto/profiles/v1development/profiles.proto +++ b/src/main/proto/opentelemetry/proto/profiles/v1experimental/pprofextended.proto @@ -28,126 +28,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -syntax = "proto3"; - -package opentelemetry.proto.profiles.v1development; - -import "opentelemetry/proto/common/v1/common.proto"; -import "opentelemetry/proto/resource/v1/resource.proto"; - -option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Development"; -option java_multiple_files = true; -option java_package = "io.opentelemetry.proto.profiles.v1development"; -option java_outer_classname = "ProfilesProto"; -option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1development"; - -// Relationships Diagram -// -// ┌──────────────────┐ LEGEND -// │ ProfilesData │ -// └──────────────────┘ ─────▶ embedded -// │ -// │ 1-n ─────▷ referenced by index -// ▼ -// ┌──────────────────┐ -// │ ResourceProfiles │ -// └──────────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ ScopeProfiles │ -// └──────────────────┘ -// │ -// │ 1-1 -// ▼ -// ┌──────────────────┐ -// │ Profile │ -// └──────────────────┘ -// │ n-1 -// │ 1-n ┌───────────────────────────────────────┐ -// ▼ │ ▽ -// ┌──────────────────┐ 1-n ┌──────────────┐ ┌──────────┐ -// │ Sample │ ──────▷ │ KeyValue │ │ Link │ -// └──────────────────┘ └──────────────┘ └──────────┘ -// │ 1-n △ △ -// │ 1-n ┌─────────────────┘ │ 1-n -// ▽ │ │ -// ┌──────────────────┐ n-1 ┌──────────────┐ -// │ Location │ ──────▷ │ Mapping │ -// └──────────────────┘ └──────────────┘ -// │ -// │ 1-n -// ▼ -// ┌──────────────────┐ -// │ Line │ -// └──────────────────┘ -// │ -// │ 1-1 -// ▽ -// ┌──────────────────┐ -// │ Function │ -// └──────────────────┘ -// - -// ProfilesData represents the profiles data that can be stored in persistent storage, -// OR can be embedded by other protocols that transfer OTLP profiles data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -message ProfilesData { - // An array of ResourceProfiles. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - repeated ResourceProfiles resource_profiles = 1; -} - - -// A collection of ScopeProfiles from a Resource. -message ResourceProfiles { - reserved 1000; - - // The resource for the profiles in this message. - // If this field is not set then no resource info is known. - opentelemetry.proto.resource.v1.Resource resource = 1; - - // A list of ScopeProfiles that originate from a resource. - repeated ScopeProfiles scope_profiles = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_profiles" field which have their own schema_url field. - string schema_url = 3; -} - -// A collection of Profiles produced by an InstrumentationScope. -message ScopeProfiles { - // The instrumentation scope information for the profiles in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - opentelemetry.proto.common.v1.InstrumentationScope scope = 1; - - // A list of Profiles that originate from an instrumentation scope. - repeated Profile profiles = 2; - - // The Schema URL, if known. This is the identifier of the Schema that the profile data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all profiles in the "profiles" field. - string schema_url = 3; -} - // Profile is a common stacktrace profile format. // // Measurements represented with this format should follow the @@ -172,15 +52,18 @@ message ScopeProfiles { // mappings. For every nonzero Location.mapping_id there must be a // unique Mapping with that index. +syntax = "proto3"; + +package opentelemetry.proto.profiles.v1experimental; + +import "opentelemetry/proto/common/v1/common.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; +option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; + // Represents a complete profile, including sample types, samples, // mappings to binaries, locations, functions, string table, and additional metadata. -// It modifies and annotates pprof Profile with OpenTelemetry specific fields. -// -// Note that whilst fields in this message retain the name and field id from pprof in most cases -// for ease of understanding data migration, it is not intended that pprof:Profile and -// OpenTelemetry:Profile encoding be wire compatible. message Profile { - // A description of the samples associated with each Sample.value. // For a cpu profile this might be: // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] @@ -194,92 +77,58 @@ message Profile { repeated Sample sample = 2; // Mapping from address ranges to the image/binary/library mapped // into that address range. mapping[0] will be the main binary. - // If multiple binaries contribute to the Profile and no main - // binary can be identified, mapping[0] has no special meaning. - repeated Mapping mapping_table = 3; + repeated Mapping mapping = 3; // Locations referenced by samples via location_indices. - repeated Location location_table = 4; + repeated Location location = 4; // Array of locations referenced by samples. - repeated int32 location_indices = 5; + repeated int64 location_indices = 15; // Functions referenced by locations. - repeated Function function_table = 6; + repeated Function function = 5; // Lookup table for attributes. - repeated opentelemetry.proto.common.v1.KeyValue attribute_table = 7; + repeated opentelemetry.proto.common.v1.KeyValue attribute_table = 16; // Represents a mapping between Attribute Keys and Units. - repeated AttributeUnit attribute_units = 8; + repeated AttributeUnit attribute_units = 17; // Lookup table for links. - repeated Link link_table = 9; + repeated Link link_table = 18; // A common table for strings referenced by various messages. // string_table[0] must always be "". - repeated string string_table = 10; - - // The following fields 9-14 are informational, do not affect + repeated string string_table = 6; + // frames with Function.function_name fully matching the following + // regexp will be dropped from the samples, along with their successors. + int64 drop_frames = 7; // Index into string table. + // frames with Function.function_name fully matching the following + // regexp will be kept, even if it matches drop_frames. + int64 keep_frames = 8; // Index into string table. + + // The following fields are informational, do not affect // interpretation of results. // Time of collection (UTC) represented as nanoseconds past the epoch. - int64 time_nanos = 11; + int64 time_nanos = 9; // Duration of the profile, if a duration makes sense. - int64 duration_nanos = 12; + int64 duration_nanos = 10; // The kind of events between sampled occurrences. // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] - ValueType period_type = 13; + ValueType period_type = 11; // The number of events between sampled occurrences. - int64 period = 14; + int64 period = 12; // Free-form text associated with the profile. The text is displayed as is // to the user by the tools that read profiles (e.g. by pprof). This field // should not be used to store any machine-readable information, it is only // for human-friendly content. The profile must stay functional if this field // is cleaned. - repeated int32 comment_strindices = 15; // Indices into string table. + repeated int64 comment = 13; // Indices into string table. // Index into the string table of the type of the preferred sample // value. If unset, clients should default to the last sample value. - int32 default_sample_type_strindex = 16; - - - // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with - // all zeroes is considered invalid. - // - // This field is required. - bytes profile_id = 17; - - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - uint32 dropped_attributes_count = 19; - - // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] - string original_payload_format = 20; - - // Original payload can be stored in this field. This can be useful for users who want to get the original payload. - // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. - // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. - // If the original payload is in pprof format, it SHOULD not be included in this field. - // The field is optional, however if it is present then equivalent converted data should be populated in other fields - // of this message as far as is practicable. - bytes original_payload = 21; - - // References to attributes in attribute_table. [optional] - // It is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - repeated int32 attribute_indices = 22; + int64 default_sample_type = 14; } // Represents a mapping between Attribute Keys and Units. message AttributeUnit { // Index into string table. - int32 attribute_key_strindex = 1; + int64 attribute_key = 1; // Index into string table. - int32 unit_strindex = 2; + int64 unit = 2; } // A pointer from a profile Sample to a trace Span. @@ -354,7 +203,7 @@ enum AggregationTemporality { 11. A request is received, the system measures 1 request. 12. The 1 second collection cycle ends. A metric is exported for the number of requests received over the interval of time t_1 to - t_1+1 with a value of 1. + t_0+1 with a value of 1. Note: Even though, when reporting changes since last report time, using CUMULATIVE is valid, it is not recommended. */ @@ -363,8 +212,8 @@ enum AggregationTemporality { // ValueType describes the type and units of a value, with an optional aggregation temporality. message ValueType { - int32 type_strindex = 1; // Index into string table. - int32 unit_strindex = 2; // Index into string table. + int64 type = 1; // Index into string table. + int64 unit = 2; // Index into string table. AggregationTemporality aggregation_temporality = 3; } @@ -374,63 +223,120 @@ message ValueType { // augmented with auxiliary information like the thread-id, some // indicator of a higher level request being handled etc. message Sample { - // locations_start_index along with locations_length refers to to a slice of locations in Profile.location_indices. - int32 locations_start_index = 1; - // locations_length along with locations_start_index refers to a slice of locations in Profile.location_indices. + // The indices recorded here correspond to locations in Profile.location. + // The leaf is at location_index[0]. [deprecated, superseded by locations_start_index / locations_length] + repeated uint64 location_index = 1; + // locations_start_index along with locations_length refers to to a slice of locations in Profile.location. // Supersedes location_index. - int32 locations_length = 2; + uint64 locations_start_index = 7; + // locations_length along with locations_start_index refers to a slice of locations in Profile.location. + // Supersedes location_index. + uint64 locations_length = 8; + // A 128bit id that uniquely identifies this stacktrace, globally. Index into string table. [optional] + uint32 stacktrace_id_index = 9; // The type and unit of each value is defined by the corresponding // entry in Profile.sample_type. All samples must have the same // number of values, the same as the length of Profile.sample_type. // When aggregating multiple samples into a single sample, the // result has a list of values that is the element-wise sum of the // lists of the originals. - repeated int64 value = 3; + repeated int64 value = 2; + // label includes additional context for this sample. It can include + // things like a thread id, allocation size, etc. + // + // NOTE: While possible, having multiple values for the same label key is + // strongly discouraged and should never be used. Most tools (e.g. pprof) do + // not have good (or any) support for multi-value labels. And an even more + // discouraged case is having a string label and a numeric label of the same + // name on a sample. Again, possible to express, but should not be used. + // [deprecated, superseded by attributes] + repeated Label label = 3; // References to attributes in Profile.attribute_table. [optional] - repeated int32 attribute_indices = 4; + repeated uint64 attributes = 10; // Reference to link in Profile.link_table. [optional] - optional int32 link_index = 5; + uint64 link = 12; // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected // to fall within the Profile's time range. [optional] - repeated uint64 timestamps_unix_nano = 6; + repeated uint64 timestamps_unix_nano = 13; +} + +// Provides additional context for a sample, +// such as thread ID or allocation size, with optional units. [deprecated] +message Label { + int64 key = 1; // Index into string table + + // At most one of the following must be present + int64 str = 2; // Index into string table + int64 num = 3; + + // Should only be present when num is present. + // Specifies the units of num. + // Use arbitrary string (for example, "requests") as a custom count unit. + // If no unit is specified, consumer may apply heuristic to deduce the unit. + // Consumers may also interpret units like "bytes" and "kilobytes" as memory + // units and units like "seconds" and "nanoseconds" as time units, + // and apply appropriate unit conversions to these. + int64 num_unit = 4; // Index into string table +} + +// Indicates the semantics of the build_id field. +enum BuildIdKind { + // Linker-generated build ID, stored in the ELF binary notes. + BUILD_ID_LINKER = 0; + // Build ID based on the content hash of the binary. Currently no particular + // hashing approach is standardized, so a given producer needs to define it + // themselves and thus unlike BUILD_ID_LINKER this kind of hash is producer-specific. + // We may choose to provide a standardized stable hash recommendation later. + BUILD_ID_BINARY_HASH = 1; } // Describes the mapping of a binary in memory, including its address range, // file offset, and metadata like build ID message Mapping { + // Unique nonzero id for the mapping. [deprecated] + uint64 id = 1; // Address at which the binary (or DLL) is loaded into memory. - uint64 memory_start = 1; + uint64 memory_start = 2; // The limit of the address range occupied by this mapping. - uint64 memory_limit = 2; + uint64 memory_limit = 3; // Offset in the binary that corresponds to the first mapped address. - uint64 file_offset = 3; + uint64 file_offset = 4; // The object this entry is loaded from. This can be a filename on // disk for the main binary and shared libraries, or virtual // abstractions like "[vdso]". - int32 filename_strindex = 4; // Index into string table + int64 filename = 5; // Index into string table + // A string that uniquely identifies a particular program version + // with high probability. E.g., for binaries generated by GNU tools, + // it could be the contents of the .note.gnu.build-id field. + int64 build_id = 6; // Index into string table + // Specifies the kind of build id. See BuildIdKind enum for more details [optional] + BuildIdKind build_id_kind = 11; // References to attributes in Profile.attribute_table. [optional] - repeated int32 attribute_indices = 5; + repeated uint64 attributes = 12; // The following fields indicate the resolution of symbolic info. - bool has_functions = 6; - bool has_filenames = 7; - bool has_line_numbers = 8; - bool has_inline_frames = 9; + bool has_functions = 7; + bool has_filenames = 8; + bool has_line_numbers = 9; + bool has_inline_frames = 10; } // Describes function and line table debug information. message Location { - // Reference to mapping in Profile.mapping_table. + // Unique nonzero id for the location. A profile could use + // instruction addresses or any integer sequence as ids. [deprecated] + uint64 id = 1; + // The index of the corresponding profile.Mapping for this location. // It can be unset if the mapping is unknown or not applicable for // this profile type. - optional int32 mapping_index = 1; + uint64 mapping_index = 2; // The instruction address for this location, if available. It // should be within [Mapping.memory_start...Mapping.memory_limit] // for the corresponding mapping. A non-leaf address may be in the // middle of a call instruction. It is up to display tools to find // the beginning of the instruction if necessary. - uint64 address = 2; + uint64 address = 3; // Multiple line indicates this location has inlined functions, // where the last entry represents the caller into which the // preceding entries were inlined. @@ -438,22 +344,25 @@ message Location { // E.g., if memcpy() is inlined into printf: // line[0].function_name == "memcpy" // line[1].function_name == "printf" - repeated Line line = 3; + repeated Line line = 4; // Provides an indication that multiple symbols map to this location's // address, for example due to identical code folding by the linker. In that // case the line information above represents one of the multiple // symbols. This field must be recomputed when the symbolization state of the // profile changes. - bool is_folded = 4; + bool is_folded = 5; + + // Type of frame (e.g. kernel, native, python, hotspot, php). Index into string table. + uint32 type_index = 6; // References to attributes in Profile.attribute_table. [optional] - repeated int32 attribute_indices = 5; + repeated uint64 attributes = 7; } // Details a specific line in a source code, linked to a function. message Line { - // Reference to function in Profile.function_table. - int32 function_index = 1; + // The index of the corresponding profile.Function for this line. + uint64 function_index = 1; // Line number in source code. int64 line = 2; // Column number in source code. @@ -463,13 +372,15 @@ message Line { // Describes a function, including its human-readable name, system name, // source file, and starting line number in the source. message Function { + // Unique nonzero id for the function. [deprecated] + uint64 id = 1; // Name of the function, in human-readable form if available. - int32 name_strindex = 1; // Index into string table + int64 name = 2; // Index into string table // Name of the function, as identified by the system. // For instance, it can be a C++ mangled name. - int32 system_name_strindex = 2; // Index into string table + int64 system_name = 3; // Index into string table // Source file containing the function. - int32 filename_strindex = 3; // Index into string table + int64 filename = 4; // Index into string table // Line number in source file. - int64 start_line = 4; + int64 start_line = 5; } diff --git a/src/main/proto/opentelemetry/proto/profiles/v1experimental/profiles.proto b/src/main/proto/opentelemetry/proto/profiles/v1experimental/profiles.proto new file mode 100644 index 0000000..bbc2b29 --- /dev/null +++ b/src/main/proto/opentelemetry/proto/profiles/v1experimental/profiles.proto @@ -0,0 +1,191 @@ +// Copyright 2023, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.profiles.v1experimental; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; +import "opentelemetry/proto/profiles/v1experimental/pprofextended.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Profiles.V1Experimental"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.profiles.v1experimental"; +option java_outer_classname = "ProfilesProto"; +option go_package = "go.opentelemetry.io/proto/otlp/profiles/v1experimental"; + +// Relationships Diagram +// +// ┌──────────────────┐ LEGEND +// │ ProfilesData │ +// └──────────────────┘ ─────▶ embedded +// │ +// │ 1-n ─────▷ referenced by index +// ▼ +// ┌──────────────────┐ +// │ ResourceProfiles │ +// └──────────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ ScopeProfiles │ +// └──────────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ ProfileContainer │ +// └──────────────────┘ +// │ +// │ 1-1 +// ▼ +// ┌──────────────────┐ +// │ Profile │ +// └──────────────────┘ +// │ 1-n +// │ 1-n ┌───────────────────────────────────────┐ +// ▼ │ ▽ +// ┌──────────────────┐ 1-n ┌──────────────┐ ┌──────────┐ +// │ Sample │ ──────▷ │ KeyValue │ │ Link │ +// └──────────────────┘ └──────────────┘ └──────────┘ +// │ 1-n △ △ +// │ 1-n ┌─────────────────┘ │ 1-n +// ▽ │ │ +// ┌──────────────────┐ n-1 ┌──────────────┐ +// │ Location │ ──────▷ │ Mapping │ +// └──────────────────┘ └──────────────┘ +// │ +// │ 1-n +// ▼ +// ┌──────────────────┐ +// │ Line │ +// └──────────────────┘ +// │ +// │ 1-1 +// ▽ +// ┌──────────────────┐ +// │ Function │ +// └──────────────────┘ +// + +// ProfilesData represents the profiles data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP profiles data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message ProfilesData { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceProfiles resource_profiles = 1; +} + + +// A collection of ScopeProfiles from a Resource. +message ResourceProfiles { + reserved 1000; + + // The resource for the profiles in this message. + // If this field is not set then no resource info is known. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of ScopeProfiles that originate from a resource. + repeated ScopeProfiles scope_profiles = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_profiles" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of ProfileContainers produced by an InstrumentationScope. +message ScopeProfiles { + // The instrumentation scope information for the profiles in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of ProfileContainers that originate from an instrumentation scope. + repeated ProfileContainer profiles = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the metric data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all profiles in the "profiles" field. + string schema_url = 3; +} + +// A ProfileContainer represents a single profile. It wraps pprof profile with OpenTelemetry specific metadata. +message ProfileContainer { + // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with + // all zeroes is considered invalid. + // + // This field is required. + bytes profile_id = 1; + + // start_time_unix_nano is the start time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 start_time_unix_nano = 2; + + // end_time_unix_nano is the end time of the profile. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 end_time_unix_nano = 3; + + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; + + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + uint32 dropped_attributes_count = 5; + + // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] + string original_payload_format = 6; + + // Original payload can be stored in this field. This can be useful for users who want to get the original payload. + // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. + // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. + // If the original payload is in pprof format, it SHOULD not be included in this field. + // The field is optional, however if it is present `profile` MUST be present and contain the same profiling information. + bytes original_payload = 7; + + // This is a reference to a pprof profile. Required, even when original_payload is present. + opentelemetry.proto.profiles.v1experimental.Profile profile = 8; +} diff --git a/src/main/proto/opentelemetry/proto/trace/v1/trace.proto b/src/main/proto/opentelemetry/proto/trace/v1/trace.proto index 2444285..5cb2f3c 100644 --- a/src/main/proto/opentelemetry/proto/trace/v1/trace.proto +++ b/src/main/proto/opentelemetry/proto/trace/v1/trace.proto @@ -56,8 +56,7 @@ message ResourceSpans { repeated ScopeSpans scope_spans = 2; // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see + // is recorded in. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -75,8 +74,7 @@ message ScopeSpans { repeated Span spans = 2; // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. Notably, the last part of the URL path is the version number of the - // schema: http[s]://server[:port]/path/. To learn more about Schema URL see + // is recorded in. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. string schema_url = 3; diff --git a/src/main/proto/xds/data/orca/v3/orca_load_report.proto b/src/main/proto/xds/data/orca/v3/orca_load_report.proto index 1b08475..53da75f 100644 --- a/src/main/proto/xds/data/orca/v3/orca_load_report.proto +++ b/src/main/proto/xds/data/orca/v3/orca_load_report.proto @@ -10,7 +10,7 @@ option go_package = "github.com/cncf/xds/go/xds/data/orca/v3"; import "validate/validate.proto"; // See section `ORCA load report format` of the design document in -// https://github.com/envoyproxy/envoy/issues/6614. +// :ref:`https://github.com/envoyproxy/envoy/issues/6614`. message OrcaLoadReport { // CPU utilization expressed as a fraction of available CPU resources. This diff --git a/src/main/proto/xds/type/matcher/v3/cel.proto b/src/main/proto/xds/type/matcher/v3/cel.proto index a9a4e01..b1ad1fa 100644 --- a/src/main/proto/xds/type/matcher/v3/cel.proto +++ b/src/main/proto/xds/type/matcher/v3/cel.proto @@ -2,7 +2,9 @@ syntax = "proto3"; package xds.type.matcher.v3; +import "xds/annotations/v3/status.proto"; import "xds/type/v3/cel.proto"; + import "validate/validate.proto"; option java_package = "com.github.xds.type.matcher.v3"; @@ -10,6 +12,8 @@ option java_outer_classname = "CelProto"; option java_multiple_files = true; option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; +option (xds.annotations.v3.file_status).work_in_progress = true; + // [#protodoc-title: Common Expression Language (CEL) matchers] // Performs a match by evaluating a `Common Expression Language @@ -26,7 +30,8 @@ option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; // Refer to :ref:`Unified Matcher API ` documentation // for usage details. // -// [#comment: envoy.matching.matchers.cel_matcher] +// [#comment:TODO(sergiitk): Link HttpAttributesMatchInput + usage example.] +// [#comment:TODO(sergiitk): When implemented, add the extension tag.] message CelMatcher { // Either parsed or checked representation of the CEL program. type.v3.CelExpression expr_match = 1 [(validate.rules).message = {required: true}]; diff --git a/src/main/proto/xds/type/matcher/v3/http_inputs.proto b/src/main/proto/xds/type/matcher/v3/http_inputs.proto index 5709d64..0dd80cd 100644 --- a/src/main/proto/xds/type/matcher/v3/http_inputs.proto +++ b/src/main/proto/xds/type/matcher/v3/http_inputs.proto @@ -2,11 +2,15 @@ syntax = "proto3"; package xds.type.matcher.v3; +import "xds/annotations/v3/status.proto"; + option java_package = "com.github.xds.type.matcher.v3"; option java_outer_classname = "HttpInputsProto"; option java_multiple_files = true; option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; +option (xds.annotations.v3.file_status).work_in_progress = true; + // [#protodoc-title: Common HTTP Inputs] // Specifies that matching should be performed on the set of :ref:`HTTP attributes @@ -18,6 +22,6 @@ option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; // Refer to :ref:`Unified Matcher API ` documentation // for usage details. // -// [#comment: envoy.matching.inputs.cel_data_input] +// [#comment:TODO(sergiitk): When implemented, add the extension tag.] message HttpAttributesCelMatchInput { } diff --git a/src/main/proto/xds/type/matcher/v3/matcher.proto b/src/main/proto/xds/type/matcher/v3/matcher.proto index da7c1f9..4966b45 100644 --- a/src/main/proto/xds/type/matcher/v3/matcher.proto +++ b/src/main/proto/xds/type/matcher/v3/matcher.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package xds.type.matcher.v3; +import "xds/annotations/v3/status.proto"; import "xds/core/v3/extension.proto"; import "xds/type/matcher/v3/string.proto"; @@ -20,6 +21,8 @@ option go_package = "github.com/cncf/xds/go/xds/type/matcher/v3"; // As an on_no_match might result in another matching tree being evaluated, this process // might repeat several times until the final OnMatch (or no match) is decided. message Matcher { + option (xds.annotations.v3.message_status).work_in_progress = true; + // What to do if a match is successful. message OnMatch { oneof on_match { diff --git a/tools/API_SHAS b/tools/API_SHAS index d831917..9615dfb 100644 --- a/tools/API_SHAS +++ b/tools/API_SHAS @@ -1,10 +1,11 @@ # envoy (source: SHA from https://github.com/envoyproxy/envoy) -ENVOY_SHA="d7809ba2b07fd869d49bfb122b27f6a7977b4d94" +ENVOY_SHA="dac139cd497d5c67a31d0944430e814c86ce43f3" -# dependencies (source: https://github.com/envoyproxy/envoy/blob/d7809ba2b07fd869d49bfb122b27f6a7977b4d94/api/bazel/repository_locations.bzl) -GOOGLEAPIS_SHA="fd52b5754b2b268bc3a22a10f29844f206abb327" # 2025-05-07 -PGV_VERSION="1.0.4" # 2025-05-07 -PROMETHEUS_VERSION="0.6.1" # 2025-05-07 -OPENTELEMETRY_VERSION="1.5.0" #2025-05-07 -CEL_VERSION="0.22.1" -XDS_SHA="b4127c9b8d78b77423fd25169f05b7476b6ea932" #2025-05-07 +# dependencies (source: https://github.com/envoyproxy/envoy/blob/8eef22b927682e9ff6f59cf9f26e440b41219fe6/api/bazel/repository_locations.bzl) +GOOGLEAPIS_SHA="114a745b2841a044e98cdbb19358ed29fcf4a5f1" # 2023-01-10 +PGV_VERSION="1.0.4" # 2023-06-26 +PROMETHEUS_VERSION="0.6.1" # 2023-10-03 +OPENCENSUS_VERSION="0.4.1" # 2022-09-23 +OPENTELEMETRY_VERSION="1.3.1" # 2023-07-03 +XDS_SHA="555b57ec207be86f811fb0c04752db6f85e3d7e2" # 2023-11-16 +CEL_VERSION="0.15.0" \ No newline at end of file diff --git a/tools/update-api.sh b/tools/update-api.sh index f784e79..92ca0fe 100755 --- a/tools/update-api.sh +++ b/tools/update-api.sh @@ -67,4 +67,8 @@ curl -sL https://github.com/google/cel-spec/archive/v${CEL_VERSION}.tar.gz | tar mkdir -p "${protodir}/cel/" cp -r cel-spec-*/proto/cel/* "${protodir}/cel/" +curl -sL https://github.com/census-instrumentation/opencensus-proto/archive/v${OPENCENSUS_VERSION}.tar.gz | tar xz --include '*.proto' +mkdir -p "${protodir}/opencensus/proto" +cp -r opencensus-proto-*/src/opencensus/proto/* "${protodir}/opencensus/proto" + popd >/dev/null