From a8580c30bb4aa0eb84f9923b07a873ea056953de Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Fri, 5 Apr 2019 15:18:39 -0700
Subject: [PATCH 01/76] Add CELExpression field to Trigger.Filter
---
pkg/apis/eventing/v1alpha1/trigger_types.go | 5 +++++
pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go | 5 +++++
2 files changed, 10 insertions(+)
diff --git a/pkg/apis/eventing/v1alpha1/trigger_types.go b/pkg/apis/eventing/v1alpha1/trigger_types.go
index 8b423bec698..e14b032542c 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_types.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_types.go
@@ -65,7 +65,12 @@ type TriggerSpec struct {
}
type TriggerFilter struct {
+ // SourceAndType filters events based on exact matches on the type and source
+ // attributes.
SourceAndType *TriggerFilterSourceAndType `json:"sourceAndType,omitempty"`
+ // CELExpression filters events by evaluating the expression with the CEL
+ // runtime.
+ CELExpression *string `json:"celExpression,omitempty"`
}
// TriggerFilterSourceAndType filters events based on exact matches on the cloud event's type and
diff --git a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
index 14ada61e705..cba19e2aae6 100644
--- a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
@@ -552,6 +552,11 @@ func (in *TriggerFilter) DeepCopyInto(out *TriggerFilter) {
*out = new(TriggerFilterSourceAndType)
**out = **in
}
+ if in.CELExpression != nil {
+ in, out := &in.CELExpression, &out.CELExpression
+ *out = new(string)
+ **out = **in
+ }
return
}
From 5f0b69df3ede435e34413d506895781355a5046c Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Fri, 5 Apr 2019 15:21:57 -0700
Subject: [PATCH 02/76] Add CEL filtering to Trigger receiver
Triggers with the Filter.CELExpression field populated use CEL
to filter events. The CEL expression has access to a `ce` object with the
following fields:
- `specversion`
- `type`
- `source`
This list of fields is defined by the dev.knative.CloudEventFilterMeta
proto in pkg/broker/dev_knative.
The following CEL expression selects events with type `com.example`:
`ce.type == "com.example"`
---
Gopkg.lock | 52 +-
Gopkg.toml | 8 +
pkg/broker/cel.go | 66 +
pkg/broker/dev_knative/ce.pb.go | 113 +
pkg/broker/dev_knative/ce.proto | 11 +
pkg/broker/receiver.go | 19 +-
third_party/VENDOR-LICENSE | 269 +-
vendor/github.com/antlr/antlr4/LICENSE.txt | 52 +
.../github.com/antlr/antlr4/contributors.txt | 212 +
.../antlr/antlr4/runtime/Go/antlr/atn.go | 152 +
.../antlr4/runtime/Go/antlr/atn_config.go | 295 ++
.../antlr4/runtime/Go/antlr/atn_config_set.go | 387 ++
.../Go/antlr/atn_deserialization_options.go | 25 +
.../runtime/Go/antlr/atn_deserializer.go | 828 ++++
.../antlr4/runtime/Go/antlr/atn_simulator.go | 50 +
.../antlr4/runtime/Go/antlr/atn_state.go | 386 ++
.../antlr/antlr4/runtime/Go/antlr/atn_type.go | 11 +
.../antlr4/runtime/Go/antlr/char_stream.go | 12 +
.../runtime/Go/antlr/common_token_factory.go | 56 +
.../runtime/Go/antlr/common_token_stream.go | 447 ++
.../antlr/antlr4/runtime/Go/antlr/dfa.go | 171 +
.../antlr4/runtime/Go/antlr/dfa_serializer.go | 152 +
.../antlr4/runtime/Go/antlr/dfa_state.go | 166 +
.../Go/antlr/diagnostic_error_listener.go | 111 +
.../antlr4/runtime/Go/antlr/error_listener.go | 108 +
.../antlr4/runtime/Go/antlr/error_strategy.go | 758 +++
.../antlr/antlr4/runtime/Go/antlr/errors.go | 241 +
.../antlr4/runtime/Go/antlr/file_stream.go | 49 +
.../antlr4/runtime/Go/antlr/input_stream.go | 113 +
.../antlr4/runtime/Go/antlr/int_stream.go | 16 +
.../antlr4/runtime/Go/antlr/interval_set.go | 296 ++
.../antlr/antlr4/runtime/Go/antlr/lexer.go | 417 ++
.../antlr4/runtime/Go/antlr/lexer_action.go | 431 ++
.../runtime/Go/antlr/lexer_action_executor.go | 170 +
.../runtime/Go/antlr/lexer_atn_simulator.go | 658 +++
.../antlr4/runtime/Go/antlr/ll1_analyzer.go | 215 +
.../antlr/antlr4/runtime/Go/antlr/parser.go | 718 +++
.../runtime/Go/antlr/parser_atn_simulator.go | 1473 ++++++
.../runtime/Go/antlr/parser_rule_context.go | 362 ++
.../runtime/Go/antlr/prediction_context.go | 756 +++
.../runtime/Go/antlr/prediction_mode.go | 553 +++
.../antlr4/runtime/Go/antlr/recognizer.go | 217 +
.../antlr4/runtime/Go/antlr/rule_context.go | 114 +
.../runtime/Go/antlr/semantic_context.go | 455 ++
.../antlr/antlr4/runtime/Go/antlr/token.go | 210 +
.../antlr4/runtime/Go/antlr/token_source.go | 17 +
.../antlr4/runtime/Go/antlr/token_stream.go | 20 +
.../runtime/Go/antlr/tokenstream_rewriter.go | 649 +++
.../antlr4/runtime/Go/antlr/trace_listener.go | 32 +
.../antlr4/runtime/Go/antlr/transition.go | 421 ++
.../antlr/antlr4/runtime/Go/antlr/tree.go | 251 +
.../antlr/antlr4/runtime/Go/antlr/trees.go | 137 +
.../antlr/antlr4/runtime/Go/antlr/utils.go | 417 ++
vendor/github.com/golang/protobuf/LICENSE | 3 -
.../golang/protobuf/descriptor/descriptor.go | 93 +
.../golang/protobuf/proto/decode.go | 1 -
.../golang/protobuf/proto/deprecated.go | 63 +
.../golang/protobuf/proto/encode.go | 18 -
.../github.com/golang/protobuf/proto/equal.go | 3 +-
.../golang/protobuf/proto/extensions.go | 78 +-
.../github.com/golang/protobuf/proto/lib.go | 100 +-
.../golang/protobuf/proto/message_set.go | 137 +-
.../golang/protobuf/proto/pointer_reflect.go | 5 +-
.../golang/protobuf/proto/pointer_unsafe.go | 15 +-
.../golang/protobuf/proto/properties.go | 47 +-
.../golang/protobuf/proto/table_marshal.go | 229 +-
.../golang/protobuf/proto/table_unmarshal.go | 196 +-
.../github.com/golang/protobuf/proto/text.go | 4 +-
.../golang/protobuf/proto/text_parser.go | 6 +-
.../protoc-gen-go/descriptor/descriptor.pb.go | 603 +--
.../github.com/golang/protobuf/ptypes/any.go | 10 +-
.../golang/protobuf/ptypes/any/any.pb.go | 47 +-
.../golang/protobuf/ptypes/duration.go | 2 +-
.../protobuf/ptypes/duration/duration.pb.go | 30 +-
.../golang/protobuf/ptypes/empty/empty.pb.go | 24 +-
.../protobuf/ptypes/struct/struct.pb.go | 336 ++
.../golang/protobuf/ptypes/timestamp.go | 6 +-
.../protobuf/ptypes/timestamp/timestamp.pb.go | 38 +-
.../protobuf/ptypes/wrappers/wrappers.pb.go | 461 ++
vendor/github.com/google/cel-go/LICENSE | 202 +
vendor/github.com/google/cel-go/cel/cel.go | 19 +
vendor/github.com/google/cel-go/cel/env.go | 270 ++
vendor/github.com/google/cel-go/cel/io.go | 66 +
.../github.com/google/cel-go/cel/options.go | 249 +
.../github.com/google/cel-go/cel/program.go | 231 +
.../google/cel-go/checker/checker.go | 603 +++
.../google/cel-go/checker/decls/decls.go | 215 +
.../google/cel-go/checker/decls/scopes.go | 115 +
.../github.com/google/cel-go/checker/env.go | 352 ++
.../google/cel-go/checker/errors.go | 123 +
.../google/cel-go/checker/mapping.go | 62 +
.../google/cel-go/checker/printer.go | 71 +
.../google/cel-go/checker/standard.go | 440 ++
.../github.com/google/cel-go/checker/types.go | 479 ++
.../google/cel-go/common/debug/debug.go | 305 ++
vendor/github.com/google/cel-go/common/doc.go | 17 +
.../github.com/google/cel-go/common/error.go | 70 +
.../github.com/google/cel-go/common/errors.go | 58 +
.../google/cel-go/common/location.go | 51 +
.../cel-go/common/operators/operators.go | 77 +
.../cel-go/common/overloads/overloads.go | 273 ++
.../google/cel-go/common/packages/packager.go | 82 +
.../github.com/google/cel-go/common/source.go | 189 +
.../google/cel-go/common/types/any_value.go | 24 +
.../google/cel-go/common/types/bool.go | 126 +
.../google/cel-go/common/types/bytes.go | 103 +
.../google/cel-go/common/types/doc.go | 17 +
.../google/cel-go/common/types/double.go | 158 +
.../google/cel-go/common/types/duration.go | 199 +
.../google/cel-go/common/types/dyn.go | 33 +
.../google/cel-go/common/types/err.go | 87 +
.../google/cel-go/common/types/int.go | 178 +
.../google/cel-go/common/types/iterator.go | 55 +
.../google/cel-go/common/types/json_list.go | 186 +
.../google/cel-go/common/types/json_struct.go | 188 +
.../google/cel-go/common/types/json_value.go | 24 +
.../google/cel-go/common/types/list.go | 450 ++
.../google/cel-go/common/types/map.go | 263 ++
.../google/cel-go/common/types/null.go | 89 +
.../google/cel-go/common/types/object.go | 196 +
.../google/cel-go/common/types/pb/checked.go | 90 +
.../google/cel-go/common/types/pb/enum.go | 36 +
.../google/cel-go/common/types/pb/file.go | 112 +
.../google/cel-go/common/types/pb/pb.go | 148 +
.../google/cel-go/common/types/pb/type.go | 326 ++
.../google/cel-go/common/types/provider.go | 383 ++
.../cel-go/common/types/ref/provider.go | 87 +
.../cel-go/common/types/ref/reference.go | 59 +
.../google/cel-go/common/types/string.go | 200 +
.../google/cel-go/common/types/timestamp.go | 284 ++
.../cel-go/common/types/traits/comparer.go | 33 +
.../cel-go/common/types/traits/container.go | 23 +
.../common/types/traits/field_tester.go | 30 +
.../cel-go/common/types/traits/indexer.go | 25 +
.../cel-go/common/types/traits/iterator.go | 36 +
.../cel-go/common/types/traits/lister.go | 27 +
.../cel-go/common/types/traits/mapper.go | 26 +
.../cel-go/common/types/traits/matcher.go | 23 +
.../google/cel-go/common/types/traits/math.go | 62 +
.../cel-go/common/types/traits/receiver.go | 24 +
.../cel-go/common/types/traits/sizer.go | 25 +
.../cel-go/common/types/traits/traits.go | 64 +
.../google/cel-go/common/types/type.go | 105 +
.../google/cel-go/common/types/uint.go | 170 +
.../google/cel-go/common/types/unknown.go | 61 +
.../google/cel-go/common/types/util.go | 29 +
.../google/cel-go/interpreter/activation.go | 189 +
.../google/cel-go/interpreter/decorators.go | 287 ++
.../google/cel-go/interpreter/dispatcher.go | 100 +
.../google/cel-go/interpreter/evalstate.go | 60 +
.../cel-go/interpreter/functions/functions.go | 58 +
.../cel-go/interpreter/functions/standard.go | 260 ++
.../google/cel-go/interpreter/interpreter.go | 144 +
.../google/cel-go/interpreter/planner.go | 1106 +++++
.../google/cel-go/interpreter/prune.go | 256 ++
.../github.com/google/cel-go/parser/errors.go | 42 +
.../cel-go/parser/gen/cel_base_listener.go | 195 +
.../cel-go/parser/gen/cel_base_visitor.go | 124 +
.../google/cel-go/parser/gen/cel_lexer.go | 319 ++
.../google/cel-go/parser/gen/cel_listener.go | 183 +
.../google/cel-go/parser/gen/cel_parser.go | 4065 +++++++++++++++++
.../google/cel-go/parser/gen/cel_visitor.go | 96 +
.../github.com/google/cel-go/parser/helper.go | 392 ++
.../github.com/google/cel-go/parser/macro.go | 387 ++
.../github.com/google/cel-go/parser/parser.go | 593 +++
.../google/cel-go/parser/unescape.go | 225 +
vendor/golang.org/x/text/width/gen.go | 115 +
vendor/golang.org/x/text/width/gen_common.go | 96 +
vendor/golang.org/x/text/width/gen_trieval.go | 34 +
vendor/golang.org/x/text/width/kind_string.go | 16 +
.../golang.org/x/text/width/tables10.0.0.go | 1318 ++++++
vendor/golang.org/x/text/width/tables9.0.0.go | 1286 ++++++
vendor/golang.org/x/text/width/transform.go | 239 +
vendor/golang.org/x/text/width/trieval.go | 30 +
vendor/golang.org/x/text/width/width.go | 206 +
.../api/expr/v1alpha1/cel_service.pb.go | 194 +
.../api/expr/v1alpha1/checked.pb.go | 1445 ++++++
.../expr/v1alpha1/conformance_service.pb.go | 807 ++++
.../googleapis/api/expr/v1alpha1/eval.pb.go | 434 ++
.../api/expr/v1alpha1/explain.pb.go | 161 +
.../googleapis/api/expr/v1alpha1/syntax.pb.go | 1588 +++++++
.../googleapis/api/expr/v1alpha1/value.pb.go | 715 +++
182 files changed, 43437 insertions(+), 691 deletions(-)
create mode 100644 pkg/broker/cel.go
create mode 100644 pkg/broker/dev_knative/ce.pb.go
create mode 100644 pkg/broker/dev_knative/ce.proto
create mode 100644 vendor/github.com/antlr/antlr4/LICENSE.txt
create mode 100644 vendor/github.com/antlr/antlr4/contributors.txt
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
create mode 100644 vendor/github.com/golang/protobuf/descriptor/descriptor.go
create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
create mode 100644 vendor/github.com/google/cel-go/LICENSE
create mode 100644 vendor/github.com/google/cel-go/cel/cel.go
create mode 100644 vendor/github.com/google/cel-go/cel/env.go
create mode 100644 vendor/github.com/google/cel-go/cel/io.go
create mode 100644 vendor/github.com/google/cel-go/cel/options.go
create mode 100644 vendor/github.com/google/cel-go/cel/program.go
create mode 100644 vendor/github.com/google/cel-go/checker/checker.go
create mode 100644 vendor/github.com/google/cel-go/checker/decls/decls.go
create mode 100644 vendor/github.com/google/cel-go/checker/decls/scopes.go
create mode 100644 vendor/github.com/google/cel-go/checker/env.go
create mode 100644 vendor/github.com/google/cel-go/checker/errors.go
create mode 100644 vendor/github.com/google/cel-go/checker/mapping.go
create mode 100644 vendor/github.com/google/cel-go/checker/printer.go
create mode 100644 vendor/github.com/google/cel-go/checker/standard.go
create mode 100644 vendor/github.com/google/cel-go/checker/types.go
create mode 100644 vendor/github.com/google/cel-go/common/debug/debug.go
create mode 100644 vendor/github.com/google/cel-go/common/doc.go
create mode 100644 vendor/github.com/google/cel-go/common/error.go
create mode 100644 vendor/github.com/google/cel-go/common/errors.go
create mode 100644 vendor/github.com/google/cel-go/common/location.go
create mode 100644 vendor/github.com/google/cel-go/common/operators/operators.go
create mode 100644 vendor/github.com/google/cel-go/common/overloads/overloads.go
create mode 100644 vendor/github.com/google/cel-go/common/packages/packager.go
create mode 100644 vendor/github.com/google/cel-go/common/source.go
create mode 100644 vendor/github.com/google/cel-go/common/types/any_value.go
create mode 100644 vendor/github.com/google/cel-go/common/types/bool.go
create mode 100644 vendor/github.com/google/cel-go/common/types/bytes.go
create mode 100644 vendor/github.com/google/cel-go/common/types/doc.go
create mode 100644 vendor/github.com/google/cel-go/common/types/double.go
create mode 100644 vendor/github.com/google/cel-go/common/types/duration.go
create mode 100644 vendor/github.com/google/cel-go/common/types/dyn.go
create mode 100644 vendor/github.com/google/cel-go/common/types/err.go
create mode 100644 vendor/github.com/google/cel-go/common/types/int.go
create mode 100644 vendor/github.com/google/cel-go/common/types/iterator.go
create mode 100644 vendor/github.com/google/cel-go/common/types/json_list.go
create mode 100644 vendor/github.com/google/cel-go/common/types/json_struct.go
create mode 100644 vendor/github.com/google/cel-go/common/types/json_value.go
create mode 100644 vendor/github.com/google/cel-go/common/types/list.go
create mode 100644 vendor/github.com/google/cel-go/common/types/map.go
create mode 100644 vendor/github.com/google/cel-go/common/types/null.go
create mode 100644 vendor/github.com/google/cel-go/common/types/object.go
create mode 100644 vendor/github.com/google/cel-go/common/types/pb/checked.go
create mode 100644 vendor/github.com/google/cel-go/common/types/pb/enum.go
create mode 100644 vendor/github.com/google/cel-go/common/types/pb/file.go
create mode 100644 vendor/github.com/google/cel-go/common/types/pb/pb.go
create mode 100644 vendor/github.com/google/cel-go/common/types/pb/type.go
create mode 100644 vendor/github.com/google/cel-go/common/types/provider.go
create mode 100644 vendor/github.com/google/cel-go/common/types/ref/provider.go
create mode 100644 vendor/github.com/google/cel-go/common/types/ref/reference.go
create mode 100644 vendor/github.com/google/cel-go/common/types/string.go
create mode 100644 vendor/github.com/google/cel-go/common/types/timestamp.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/comparer.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/container.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/field_tester.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/indexer.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/iterator.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/lister.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/mapper.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/matcher.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/math.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/receiver.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/sizer.go
create mode 100644 vendor/github.com/google/cel-go/common/types/traits/traits.go
create mode 100644 vendor/github.com/google/cel-go/common/types/type.go
create mode 100644 vendor/github.com/google/cel-go/common/types/uint.go
create mode 100644 vendor/github.com/google/cel-go/common/types/unknown.go
create mode 100644 vendor/github.com/google/cel-go/common/types/util.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/activation.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/decorators.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/dispatcher.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/evalstate.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/functions/functions.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/functions/standard.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/interpreter.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/planner.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/prune.go
create mode 100644 vendor/github.com/google/cel-go/parser/errors.go
create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_listener.go
create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_parser.go
create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_visitor.go
create mode 100644 vendor/github.com/google/cel-go/parser/helper.go
create mode 100644 vendor/github.com/google/cel-go/parser/macro.go
create mode 100644 vendor/github.com/google/cel-go/parser/parser.go
create mode 100644 vendor/github.com/google/cel-go/parser/unescape.go
create mode 100644 vendor/golang.org/x/text/width/gen.go
create mode 100644 vendor/golang.org/x/text/width/gen_common.go
create mode 100644 vendor/golang.org/x/text/width/gen_trieval.go
create mode 100644 vendor/golang.org/x/text/width/kind_string.go
create mode 100644 vendor/golang.org/x/text/width/tables10.0.0.go
create mode 100644 vendor/golang.org/x/text/width/tables9.0.0.go
create mode 100644 vendor/golang.org/x/text/width/transform.go
create mode 100644 vendor/golang.org/x/text/width/trieval.go
create mode 100644 vendor/golang.org/x/text/width/width.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/cel_service.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/conformance_service.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 0dd5d6918c9..fffcf55c424 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -25,6 +25,14 @@
revision = "ec843464b50d4c8b56403ec9d589cf41ea30e722"
version = "v1.19.0"
+[[projects]]
+ digest = "1:d04889482897652dedae6d8575b479c06fa3eb3c3abe248163b25d3df5fab43e"
+ name = "github.com/antlr/antlr4"
+ packages = ["runtime/Go/antlr"]
+ pruneopts = "NUT"
+ revision = "be58ebffde8e29c154192c019608f0a5b8e6a064"
+ version = "4.7.2"
+
[[projects]]
branch = "master"
digest = "1:f12358576cd79bba0ae626530d23cde63416744f486c8bc817802c6907eaadd7"
@@ -187,20 +195,23 @@
revision = "24b0969c4cb722950103eed87108c8d291a8df00"
[[projects]]
- digest = "1:0f7f0d9512487860d967bd31b4a9668316e53630fd71cb57a84ccf97c852df84"
+ digest = "1:ac06172e8420ee3192527e84a3f373ada56043e6b0e27c2e765b4dd8408f2ec9"
name = "github.com/golang/protobuf"
packages = [
+ "descriptor",
"proto",
"protoc-gen-go/descriptor",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/empty",
+ "ptypes/struct",
"ptypes/timestamp",
+ "ptypes/wrappers",
]
pruneopts = "NUT"
- revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
- version = "v1.1.0"
+ revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
+ version = "v1.3.1"
[[projects]]
branch = "master"
@@ -218,6 +229,31 @@
pruneopts = "NUT"
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
+[[projects]]
+ digest = "1:4d49996da7ca0a062d7a136b65608c24b258673968cc162e97183171a6cb056f"
+ name = "github.com/google/cel-go"
+ packages = [
+ "cel",
+ "checker",
+ "checker/decls",
+ "common",
+ "common/debug",
+ "common/operators",
+ "common/overloads",
+ "common/packages",
+ "common/types",
+ "common/types/pb",
+ "common/types/ref",
+ "common/types/traits",
+ "interpreter",
+ "interpreter/functions",
+ "parser",
+ "parser/gen",
+ ]
+ pruneopts = "NUT"
+ revision = "2c29ff3f5d3d7856e562e821214cc9c77657b853"
+ version = "v0.2.0"
+
[[projects]]
digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
name = "github.com/google/go-cmp"
@@ -766,7 +802,7 @@
revision = "c11f84a56e43e20a78cee75a7c034031ecf57d1f"
[[projects]]
- digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
+ digest = "1:e33513a825fcd765e97b5de639a2f7547542d1a8245df0cef18e1fd390b778a9"
name = "golang.org/x/text"
packages = [
"collate",
@@ -783,6 +819,7 @@
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
+ "width",
]
pruneopts = "NUT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
@@ -849,10 +886,11 @@
[[projects]]
branch = "master"
- digest = "1:5bc750ea612e0650b4095b019e70fc7530c0a30a33445fbd0b79c5a54a729908"
+ digest = "1:fcda0da85df750a2a18805181c163354328687d32073d4b7087fcf8d4da05420"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
+ "googleapis/api/expr/v1alpha1",
"googleapis/iam/v1",
"googleapis/pubsub/v1",
"googleapis/rpc/status",
@@ -1271,6 +1309,10 @@
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http",
"github.com/cloudevents/sdk-go/pkg/cloudevents/types",
"github.com/fsnotify/fsnotify",
+ "github.com/golang/protobuf/proto",
+ "github.com/google/cel-go/cel",
+ "github.com/google/cel-go/checker/decls",
+ "github.com/google/cel-go/common/types",
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/google/uuid",
diff --git a/Gopkg.toml b/Gopkg.toml
index d0567a7547c..8f30bd82606 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -145,3 +145,11 @@ required = [
[[constraint]]
name = "github.com/cloudevents/sdk-go"
version = "=0.4.4"
+
+[[constraint]]
+ name = "github.com/google/cel-go"
+ version = "=0.2.0"
+
+[[constraint]]
+ name = "github.com/golang/protobuf"
+ version = "^1.3.1"
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
new file mode 100644
index 00000000000..8230f9dc64a
--- /dev/null
+++ b/pkg/broker/cel.go
@@ -0,0 +1,66 @@
+package broker
+
+import (
+ "github.com/cloudevents/sdk-go/pkg/cloudevents"
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types"
+ eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ celtypes "github.com/knative/eventing/pkg/broker/dev_knative"
+)
+
+func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) bool {
+ e, err := cel.NewEnv(
+ cel.Types(&celtypes.CloudEventFilterMeta{}),
+ cel.Declarations(
+ decls.NewIdent("ce", decls.NewObjectType("dev.knative.CloudEventFilterMeta"), nil),
+ //decls.NewIdent("data", types.DynType, nil),
+ ),
+ )
+ if err != nil {
+ //TODO do something with error
+ return false
+ }
+
+ p, iss := e.Parse(*ts.Filter.CELExpression)
+ if iss != nil && iss.Err() != nil {
+ //TODO do something with error
+ return false
+ }
+ c, iss := e.Check(p)
+ if iss != nil && iss.Err() != nil {
+ //TODO do something with error
+ return false
+ }
+
+ prg, err := e.Program(c)
+ if err != nil {
+ //TODO do something with error
+ return false
+ }
+
+ // Would it be useful to cache programs by trigger UID and resourceversion?
+
+ //TODO generate the variables
+ ce := &celtypes.CloudEventFilterMeta{
+ Specversion: event.SpecVersion(),
+ Type: event.Type(),
+ Source: event.Source(),
+ //TODO Is this the right way to get id? Do we even need id?
+ //Id: event.Context.AsV02().ID
+ //TODO should this use google.protobuf.Timestamp? Do we even need time?
+ Time: event.Context.AsV02().Time.String(),
+ }
+
+ out, _, err := prg.Eval(map[string]interface{}{
+ // Native values are converted to CEL values under the covers.
+ "ce": ce,
+ //"data": data,
+ })
+ if err != nil {
+ //TODO do something with error
+ return false
+ }
+
+ return out == types.True
+}
diff --git a/pkg/broker/dev_knative/ce.pb.go b/pkg/broker/dev_knative/ce.pb.go
new file mode 100644
index 00000000000..c67d0e5009a
--- /dev/null
+++ b/pkg/broker/dev_knative/ce.pb.go
@@ -0,0 +1,113 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: ce.proto
+
+package dev_knative
+
+import (
+ fmt "fmt"
+ math "math"
+
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type CloudEventFilterMeta struct {
+ Specversion string `protobuf:"bytes,1,opt,name=specversion,proto3" json:"specversion,omitempty"`
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
+ Id string `protobuf:"bytes,4,opt,name=id,proto3" json:"id,omitempty"`
+ Time string `protobuf:"bytes,5,opt,name=time,proto3" json:"time,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CloudEventFilterMeta) Reset() { *m = CloudEventFilterMeta{} }
+func (m *CloudEventFilterMeta) String() string { return proto.CompactTextString(m) }
+func (*CloudEventFilterMeta) ProtoMessage() {}
+func (*CloudEventFilterMeta) Descriptor() ([]byte, []int) {
+ return fileDescriptor_85884d17b902dd0c, []int{0}
+}
+
+func (m *CloudEventFilterMeta) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CloudEventFilterMeta.Unmarshal(m, b)
+}
+func (m *CloudEventFilterMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CloudEventFilterMeta.Marshal(b, m, deterministic)
+}
+func (m *CloudEventFilterMeta) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudEventFilterMeta.Merge(m, src)
+}
+func (m *CloudEventFilterMeta) XXX_Size() int {
+ return xxx_messageInfo_CloudEventFilterMeta.Size(m)
+}
+func (m *CloudEventFilterMeta) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudEventFilterMeta.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudEventFilterMeta proto.InternalMessageInfo
+
+func (m *CloudEventFilterMeta) GetSpecversion() string {
+ if m != nil {
+ return m.Specversion
+ }
+ return ""
+}
+
+func (m *CloudEventFilterMeta) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *CloudEventFilterMeta) GetSource() string {
+ if m != nil {
+ return m.Source
+ }
+ return ""
+}
+
+func (m *CloudEventFilterMeta) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
+func (m *CloudEventFilterMeta) GetTime() string {
+ if m != nil {
+ return m.Time
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*CloudEventFilterMeta)(nil), "dev.knative.CloudEventFilterMeta")
+}
+
+func init() { proto.RegisterFile("ce.proto", fileDescriptor_85884d17b902dd0c) }
+
+var fileDescriptor_85884d17b902dd0c = []byte{
+ // 155 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x48, 0x4e, 0xd5, 0x2b,
+ 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0x49, 0x2d, 0xd3, 0xcb, 0xce, 0x4b, 0x2c, 0xc9, 0x2c,
+ 0x4b, 0x55, 0xea, 0x60, 0xe4, 0x12, 0x71, 0xce, 0xc9, 0x2f, 0x4d, 0x71, 0x2d, 0x4b, 0xcd, 0x2b,
+ 0x71, 0xcb, 0xcc, 0x29, 0x49, 0x2d, 0xf2, 0x4d, 0x2d, 0x49, 0x14, 0x52, 0xe0, 0xe2, 0x2e, 0x2e,
+ 0x48, 0x4d, 0x2e, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x93, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c,
+ 0x42, 0x16, 0x12, 0x12, 0xe2, 0x62, 0x29, 0xa9, 0x2c, 0x48, 0x95, 0x60, 0x02, 0x4b, 0x81, 0xd9,
+ 0x42, 0x62, 0x5c, 0x6c, 0xc5, 0xf9, 0xa5, 0x45, 0xc9, 0xa9, 0x12, 0xcc, 0x60, 0x51, 0x28, 0x4f,
+ 0x88, 0x8f, 0x8b, 0x29, 0x33, 0x45, 0x82, 0x05, 0x2c, 0xc6, 0x94, 0x99, 0x02, 0xd6, 0x9b, 0x99,
+ 0x9b, 0x2a, 0xc1, 0x0a, 0xd5, 0x9b, 0x99, 0x9b, 0x9a, 0xc4, 0x06, 0x76, 0x9e, 0x31, 0x20, 0x00,
+ 0x00, 0xff, 0xff, 0x94, 0x85, 0x6f, 0x90, 0xaa, 0x00, 0x00, 0x00,
+}
diff --git a/pkg/broker/dev_knative/ce.proto b/pkg/broker/dev_knative/ce.proto
new file mode 100644
index 00000000000..401e4e616c0
--- /dev/null
+++ b/pkg/broker/dev_knative/ce.proto
@@ -0,0 +1,11 @@
+syntax = "proto3";
+
+package dev.knative;
+
+message CloudEventFilterMeta {
+ string specversion = 1;
+ string type = 2;
+ string source = 3;
+ string id = 4;
+ string time = 5;
+}
diff --git a/pkg/broker/receiver.go b/pkg/broker/receiver.go
index 632cc024b56..cda1306f88f 100644
--- a/pkg/broker/receiver.go
+++ b/pkg/broker/receiver.go
@@ -212,11 +212,28 @@ func (r *Receiver) getTrigger(ctx context.Context, ref provisioners.ChannelRefer
// shouldSendMessage determines whether message 'm' should be sent based on the triggerSpec 'ts'.
// Currently it supports exact matching on type and/or source of events.
+// TODO this should allow returning error so the errors can be surfaced to the
+// trigger
func (r *Receiver) shouldSendMessage(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) bool {
- if ts.Filter == nil || ts.Filter.SourceAndType == nil {
+ if ts.Filter == nil {
r.logger.Error("No filter specified")
return false
}
+
+ // TODO what should happen if multiple filter types are specified? OR? AND?
+ // precedence rules?
+ if ts.Filter.CELExpression != nil {
+ return filterEventByCEL(ts, event)
+ }
+
+ if ts.Filter.SourceAndType != nil {
+ return r.filterEventBySourceAndType(ts, event)
+ }
+ // TODO is this supposed to be default true?
+ return true
+}
+
+func (r *Receiver) filterEventBySourceAndType(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) bool {
filterType := ts.Filter.SourceAndType.Type
if filterType != eventingv1alpha1.TriggerAnyFilter && filterType != event.Type() {
r.logger.Debug("Wrong type", zap.String("trigger.spec.filter.sourceAndType.type", filterType), zap.String("event.Type()", event.Type()))
diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE
index bdfec23c2a0..ec2c358b8b3 100644
--- a/third_party/VENDOR-LICENSE
+++ b/third_party/VENDOR-LICENSE
@@ -233,6 +233,64 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+===========================================================
+Import: github.com/knative/eventing/vendor/github.com/antlr/antlr4
+
+[The "BSD 3-clause license"]
+Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+=====
+
+MIT License for codepointat.js from https://git.io/codepointat
+MIT License for fromcodepoint.js from https://git.io/vDW1m
+
+Copyright Mathias Bynens
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+
===========================================================
Import: github.com/knative/eventing/vendor/github.com/beorn7/perks
@@ -1584,10 +1642,7 @@ third-party archives.
===========================================================
Import: github.com/knative/eventing/vendor/github.com/golang/protobuf
-Go support for Protocol Buffers - Google's data interchange format
-
Copyright 2010 The Go Authors. All rights reserved.
-https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -1859,6 +1914,214 @@ Import: github.com/knative/eventing/vendor/github.com/google/btree
+===========================================================
+Import: github.com/knative/eventing/vendor/github.com/google/cel-go
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+
===========================================================
Import: github.com/knative/eventing/vendor/github.com/google/go-cmp
diff --git a/vendor/github.com/antlr/antlr4/LICENSE.txt b/vendor/github.com/antlr/antlr4/LICENSE.txt
new file mode 100644
index 00000000000..2042d1bda6c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/LICENSE.txt
@@ -0,0 +1,52 @@
+[The "BSD 3-clause license"]
+Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+=====
+
+MIT License for codepointat.js from https://git.io/codepointat
+MIT License for fromcodepoint.js from https://git.io/vDW1m
+
+Copyright Mathias Bynens
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/antlr/antlr4/contributors.txt b/vendor/github.com/antlr/antlr4/contributors.txt
new file mode 100644
index 00000000000..230e494f22f
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/contributors.txt
@@ -0,0 +1,212 @@
+ANTLR Project Contributors Certification of Origin and Rights
+
+All contributors to ANTLR v4 must formally agree to abide by this
+certificate of origin by signing on the bottom with their github
+userid, full name, email address (you can obscure your e-mail, but it
+must be computable by human), and date.
+
+By signing this agreement, you are warranting and representing that
+you have the right to release code contributions or other content free
+of any obligations to third parties and are granting Terence Parr and
+ANTLR project contributors, henceforth referred to as The ANTLR
+Project, a license to incorporate it into The ANTLR Project tools
+(such as ANTLRWorks and StringTemplate) or related works under the BSD
+license. You understand that The ANTLR Project may or may not
+incorporate your contribution and you warrant and represent the
+following:
+
+1. I am the creator of all my contributions. I am the author of all
+ contributed work submitted and further warrant and represent that
+ such work is my original creation and I have the right to license
+ it to The ANTLR Project for release under the 3-clause BSD
+ license. I hereby grant The ANTLR Project a nonexclusive,
+ irrevocable, royalty-free, worldwide license to reproduce,
+ distribute, prepare derivative works, and otherwise use this
+ contribution as part of the ANTLR project, associated
+ documentation, books, and tools at no cost to The ANTLR Project.
+
+2. I have the right to submit. This submission does not violate the
+ rights of any person or entity and that I have legal authority over
+ this submission and to make this certification.
+
+3. If I violate another's rights, liability lies with me. I agree to
+ defend, indemnify, and hold The ANTLR Project and ANTLR users
+ harmless from any claim or demand, including reasonable attorney
+ fees, made by any third party due to or arising out of my violation
+ of these terms and conditions or my violation of the rights of
+ another person or entity.
+
+4. I understand and agree that this project and the contribution are
+ public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license indicated in the file.
+
+I have read this agreement and do so certify by adding my signoff to
+the end of the following contributors list.
+
+CONTRIBUTORS:
+
+YYYY/MM/DD, github id, Full name, email
+2012/07/12, parrt, Terence Parr, parrt@antlr.org
+2012/09/18, sharwell, Sam Harwell, sam@tunnelvisionlabs.com
+2012/10/10, stephengaito, Stephen Gaito, stephen@percepitsys.co.uk
+2012/11/23, maguro, Alan Cabrera, adc@toolazydogs.com
+2013/01/29, metadave, Dave Parfitt, diparfitt@gmail.com
+2013/03/06, bkiers, Bart Kiers, bkiers@gmail.com
+2013/08/20, cayhorstmann, Cay Horstmann, cay@horstmann.com
+2014/03/18, aphyr, Kyle Kingsbury, aphyr@aphyr.com
+2014/06/07, ericvergnaud, Eric Vergnaud, eric.vergnaud@wanadoo.fr
+2014/07/04, jimidle, Jim Idle, jimi@Idle.ws
+2014/01/01, danmclaughlin, Dan McLaughlin, dan.mclaughlin@gmail.com
+2014/09/04. jeduden, Jan-Eric Duden, jeduden@gmail.com
+2014/09/27, petrbel, Petr Bělohlávek, antlr@petrbel.cz
+2014/10/18, sergiusignacius, Sérgio Silva, serge.a.silva@gmail.com
+2014/10/26, bdkearns, Brian Kearns, bdkearns@gmail.com
+2014/10/27, michaelpj, Michael Peyton Jones, michaelpj@gmail.com
+2015/01/29, TomLottermann, Thomas Lottermann, tomlottermann@gmail.com
+2015/02/15, pavlo, Pavlo Lysov, pavlikus@gmail.com
+2015/03/07, RedTailedHawk, Lawrence Parker, larry@answerrocket.com
+2015/04/03, rljacobson, Robert Jacobson, rljacobson@gmail.com
+2015/04/06, ojakubcik, Ondrej Jakubcik, ojakubcik@gmail.com
+2015/04/29, jszheng, Jinshan Zheng, zheng_js@hotmail.com
+2015/05/08, ViceIce, Michael Kriese, michael.kriese@gmx.de
+2015/05/09, lkraz, Luke Krasnoff, luke.krasnoff@gmail.com
+2015/05/12, Pursuit92, Josh Chase, jcjoshuachase@gmail.com
+2015/05/20, peturingi, Pétur Ingi Egilsson, petur@petur.eu
+2015/05/27, jcbrinfo, Jean-Christophe Beaupré, jcbrinfo@users.noreply.github.com
+2015/06/29, jvanzyl, Jason van Zyl, jason@takari.io
+2015/08/18, krzkaczor, Krzysztof Kaczor, krzysztof@kaczor.io
+2015/09/18, worsht, Rajiv Subrahmanyam, rajiv.public@gmail.com
+2015/09/24, HSorensen, Henrik Sorensen, henrik.b.sorensen@gmail.com
+2015/10/06, brwml, Bryan Wilhelm, bryan.wilhelm@microsoft.com
+2015/10/08, fedotovalex, Alex Fedotov, me@alexfedotov.com
+2015/10/12, KvanTTT, Ivan Kochurkin, ivan.kochurkin@gmail.com
+2015/10/21, martin-probst, Martin Probst, martin-probst@web.de
+2015/10/21, hkff, Walid Benghabrit, walid.benghabrit@mines-nantes.fr
+2015/11/12, cooperra, Robbie Cooper, cooperra@users.noreply.github.com
+2015/11/25, abego, Udo Borkowski, ub@abego.org
+2015/12/17, sebadur, Sebastian Badur, sebadur@users.noreply.github.com
+2015/12/23, pboyer, Peter Boyer, peter.b.boyer@gmail.com
+2015/12/24, dtymon, David Tymon, david.tymon@gmail.com
+2016/02/18, reitzig, Raphael Reitzig, reitzig[at]cs.uni-kl.de
+2016/03/10, mike-lischke, Mike Lischke, mike@lischke-online.de
+2016/03/27, beardlybread, Bradley Steinbacher, bradley.j.steinbacher@gmail.com
+2016/03/29, msteiger, Martin Steiger, antlr@martin-steiger.de
+2016/03/28, gagern, Martin von Gagern, gagern@ma.tum.de
+2016/07/10, twz123, Tom Wieczorek, tom.wieczorek@zalando.de
+2016/07/20, chrisheller, Chris Heller, chris.heller.greyheller@gmail.com
+2016/07/20, nburles, Nathan Burles, nburles@gmail.com
+2016/07/20, kosl90, Li Liqiang, kos1990l@gmail.com
+2016/07/27, timoc, Tim O'Callaghan, timo@linux.com
+2016/07/26, nic30, Michal Orsák, michal.o.socials@gmail.com
+2016/07/18, willfaught, Will Faught, will.faught@gmail.com
+2016/08/08, wjkohnen, Wolfgang Johannes Kohnen, wjkohnen-go-antlr@ko-sys.com
+2016/08/11, BurtHarris, Ralph "Burt" Harris, Burt_Harris_antlr4@azxs.33mail.com
+2016/08/19, andjo403, Andreas Jonson, andjo403@hotmail.com
+2016/09/27, harriman, Kurt Harriman, harriman@acm.org
+2016/10/13, cgudrian, Christian Gudrian, christian.gudrian@gmx.de
+2016/10/13, nielsbasjes, Niels Basjes, niels@basjes.nl
+2016/10/21, FloorGoddijn, Floor Goddijn, floor.goddijn[at]aimms.com
+2016/11/01, RYDB3RG, Kai Stammerjohann, RYDB3RG@users.noreply.github.com
+2016/11/05, runner-mei, meifakun, runner.mei@gmail.com
+2016/11/15, hanjoes, Hanzhou Shi, hanzhou87@gmail.com
+2016/11/16, sridharxp, Sridharan S, aurosridhar@gmail.com
+2016/11/06, NoodleOfDeath, Thom Morgan, github@bytemeapp.com
+2016/11/01, sebkur, Sebastian Kürten, sebastian@topobyte.de
+2016/04/13, renatahodovan, Renata Hodovan, reni@inf.u-szeged.hu
+2016/11/05, ewanmellor, Ewan Mellor, github@ewanmellor.org
+2016/11/06, janyou, Janyou, janyou.antlr@outlook.com
+2016/11/20, marcohu, Marco Hunsicker, antlr@hunsicker.de
+2016/09/02, lygav, Vladimir (Vladi) Lyga, lyvladi@gmail.com
+2016/09/23, ghosthope, Dmitry Shakhtanov, sudstrike@gmail.com
+2016/11/25, MrSampson, Oliver Sampson, olsam@quickaudio.com
+2016/11/29, millergarym, Gary Miller, miller.garym@gmail.com
+2016/11/29, wxio, Gary Miller, gm@wx.io
+2016/11/29, Naios, Denis Blank, naios@users.noreply.github.com
+2016/12/01, samtatasurya, Samuel Tatasurya, xemradiant@gmail.com
+2016/12/03, redxdev, Samuel Bloomberg, sam@redxdev.com
+2016/12/11, Gaulouis, Gaulouis, gaulouis.com@gmail.com
+2016/12/22, akosthekiss, Akos Kiss, akiss@inf.u-szeged.hu
+2016/12/24, adrpo, Adrian Pop, adrian.pop@liu.se
+2017/01/11, robertbrignull, Robert Brignull, robertbrignull@gmail.com
+2017/01/13, marcelo-rocha, Marcelo Rocha, mcrocha@gmail.com
+2017/01/23, bhamiltoncx, Ben Hamilton, bhamiltoncx+antlr@gmail.com
+2017/01/18, mshockwave, Bekket McClane, yihshyng223@gmail.com
+2017/02/10, lionelplessis, Lionel Plessis, lionelplessis@users.noreply.github.com
+2017/02/14, lecode-official, David Neumann, david.neumann@lecode.de
+2017/02/14, xied75, Dong Xie, xied75@gmail.com
+2017/02/20, Thomasb81, Thomas Burg, thomasb81@gmail.com
+2017/02/26, jvasileff, John Vasileff, john@vasileff.com
+2017/03/08, harry-tallbelt, Igor Vysokopoyasny, harry.tallbelt@gmail.com
+2017/03/09, teverett, Tom Everett, tom@khubla.com
+2017/03/03, chund, Christian Hund, christian.hund@gmail.com
+2017/03/15, robertvanderhulst, Robert van der Hulst, robert@xsharp.eu
+2017/03/28, cmd-johnson, Jonas Auer, jonas.auer.94@gmail.com
+2017/04/12, lys0716, Yishuang Lu, luyscmu@gmail.com
+2017/04/30, shravanrn, Shravan Narayan, shravanrn@gmail.com
+2017/05/11, jimallman, Jim Allman, jim@ibang.com
+2017/05/26, waf, Will Fuqua, wafuqua@gmail.com
+2017/05/29, kosak, Corey Kosak, kosak@kosak.com
+2017/06/11, erikbra, Erik A. Brandstadmoen, erik@brandstadmoen.net
+2017/06/10, jm-mikkelsen, Jan Martin Mikkelsen, janm@transactionware.com
+2017/06/25, alimg, Alim Gökkaya, alim.gokkaya@gmail.com
+2017/06/28, jBugman, Sergey Parshukov, codedby@bugman.me
+2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com
+2017/07/11, dhalperi, Daniel Halperin, daniel@halper.in
+2017/07/17, vaibhavaingankar09, Vaibhav Vaingankar, vbhvvaingankar9@gmail.com
+2017/07/23, venkatperi, Venkat Peri, venkatperi@gmail.com
+2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com
+2017/07/09, neatnerd, Mike Arshinskiy, neatnerd@users.noreply.github.com
+2017/07/27, matthauck, Matt Hauck, matthauck@gmail.com
+2017/07/27, shirou, WAKAYAMA Shirou, shirou.faw@gmail.com
+2017/08/20, tiagomazzutti, Tiago Mazzutti, tiagomzt@gmail.com
+2017/08/20, milanaleksic, Milan Aleksic, milanaleksic@gmail.com
+2017/08/29, Eddy Reyes, eddy@mindsight.io
+2017/09/09, brauliobz, Bráulio Bezerra, brauliobezerra@gmail.com
+2017/09/11, sachinjain024, Sachin Jain, sachinjain024@gmail.com
+2017/09/25, kaedvann, Rostislav Listerenko, r.listerenko@gmail.com
+2017/10/06, bramp, Andrew Brampton, brampton@gmail.com
+2017/10/15, simkimsia, Sim Kim Sia, kimcity@gmail.com
+2017/10/27, Griffon26, Maurice van der Pot, griffon26@kfk4ever.com
+2017/05/29, rlfnb, Ralf Neeb, rlfnb@rlfnb.de
+2017/10/29, gendalph, Максим Прохоренко, Maxim\dotProhorenko@gm@il.com
+2017/11/02, jasonmoo, Jason Mooberry, jason.mooberry@gmail.com
+2017/11/05, ajaypanyala, Ajay Panyala, ajay.panyala@gmail.com
+2017/11/24, zqlu.cn, Zhiqiang Lu, zqlu.cn@gmail.com
+2017/11/28, niccroad, Nicolas Croad, nic.croad@gmail.com
+2017/12/01, DavidMoraisFerreira, David Morais Ferreira, david.moraisferreira@gmail.com
+2017/12/01, SebastianLng, Sebastian Lang, sebastian.lang@outlook.com
+2017/12/03, oranoran, Oran Epelbaum, oran / epelbaum me
+2017/12/12, janlinde, Jan Lindemann, jan@janware.com
+2017/12/13, enessoylu, Enes Soylu, enessoylutr@gmail.com
+2017/12/20, kbsletten, Kyle Sletten, kbsletten@gmail.com
+2017/12/27, jkmar, Jakub Marciniszyn, marciniszyn.jk@gmail.com
+2018/03/08, dannoc, Daniel Clifford, danno@google.com
+2018/03/10, uvguy, kangjoni76@gmail.com
+2018/01/06, kasbah, Kaspar Emanuel, kaspar@monostable.co.uk
+2018/01/15, xgcssch, Sönke Schau, xgcssch@users.noreply.github.com
+2018/02/08, razfriman, Raz Friman, raz@razfriman.com
+2018/02/11, io7m, Mark Raynsford, code@io7m.com
+2018/04/24, solussd, Joe Smith, joe@uwcreations.com
+2018/15/05, johnvanderholt, jan dillingh johnvanderholte@gmail.com
+2018/06/14, scadgek, Sergey Chupov, scadgek@live.com
+2018/06/16, EternalPhane, Zongyuan Zuo, eternalphane@gmail.com
+2018/06/27, wu-sheng, Wu Sheng, wu.sheng@foxmail.com
+2018/02/25, chaseoxide, Marcus Ong, taccs97[at]gmail[dot]com
+2018/05/15, johnvanderholt, jan dillingh johnvanderholte@gmail.com
+2018/06/16, EternalPhane, Zongyuan Zuo, eternalphane@gmail.com
+2018/05/15, johnvanderholt, jan dillingh johnvanderholte@gmail.com
+2018/05/17, sinopsysHK, Eric Bardes, sinofwd@gmail.com
+2018/05/23, srvance, Stephen Vance, steve@vance.com
+2018/06/14, alecont, Alessandro Contenti, alecontenti@hotmail.com
+2018/06/16, EternalPhane, Zongyuan Zuo, eternalphane@gmail.com
+2018/07/03, jgoppert, James Goppert, james.goppert@gmail.com
+2018/07/27, Maksim Novikov, mnovikov.work@gmail.com
+2018/07/31, Lucas Henrqiue, lucashenrique580@gmail.com
+2018/08/03, ENDOH takanao, djmchl@gmail.com
+2018/10/29, chrisaycock, Christopher Aycock, chris[at]chrisaycock[dot]com
+2018/11/12, vinoski, Steve Vinoski, vinoski@ieee.org
+2018/11/14, nxtstep, Adriaan (Arjan) Duz, codewithadriaan[et]gmail[dot]com
+2018/11/15, amykyta3, Alex Mykyta, amykyta3@users.noreply.github.com
+2018/11/29, hannemann-tamas, Ralf Hannemann-Tamas, ralf.ht@gmail.com
\ No newline at end of file
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
new file mode 100644
index 00000000000..1592212e146
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNInvalidAltNumber int
+
+type ATN struct {
+ // DecisionToState is the decision points for all rules, subrules, optional
+ // blocks, ()+, ()*, etc. Used to build DFA predictors for them.
+ DecisionToState []DecisionState
+
+ // grammarType is the ATN type and is used for deserializing ATNs from strings.
+ grammarType int
+
+ // lexerActions is referenced by action transitions in the ATN for lexer ATNs.
+ lexerActions []LexerAction
+
+ // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
+ maxTokenType int
+
+ modeNameToStartState map[string]*TokensStartState
+
+ modeToStartState []*TokensStartState
+
+ // ruleToStartState maps from rule index to starting state number.
+ ruleToStartState []*RuleStartState
+
+ // ruleToStopState maps from rule index to stop state number.
+ ruleToStopState []*RuleStopState
+
+ // ruleToTokenType maps the rule index to the resulting token type for lexer
+ // ATNs. For parser ATNs, it maps the rule index to the generated bypass token
+ // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
+ // specified, and otherwise is nil.
+ ruleToTokenType []int
+
+ states []ATNState
+}
+
+func NewATN(grammarType int, maxTokenType int) *ATN {
+ return &ATN{
+ grammarType: grammarType,
+ maxTokenType: maxTokenType,
+ modeNameToStartState: make(map[string]*TokensStartState),
+ }
+}
+
+// NextTokensInContext computes the set of valid tokens that can occur starting
+// in state s. If ctx is nil, the set of tokens will not include what can follow
+// the rule surrounding s. In other words, the set will be restricted to tokens
+// reachable staying within the rule of s.
+func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
+ return NewLL1Analyzer(a).Look(s, nil, ctx)
+}
+
+// NextTokensNoContext computes the set of valid tokens that can occur starting
+// in s and staying in same rule. Token.EPSILON is in set if we reach end of
+// rule.
+func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
+ if s.GetNextTokenWithinRule() != nil {
+ return s.GetNextTokenWithinRule()
+ }
+
+ s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil))
+ s.GetNextTokenWithinRule().readOnly = true
+
+ return s.GetNextTokenWithinRule()
+}
+
+func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
+ if ctx == nil {
+ return a.NextTokensNoContext(s)
+ }
+
+ return a.NextTokensInContext(s, ctx)
+}
+
+func (a *ATN) addState(state ATNState) {
+ if state != nil {
+ state.SetATN(a)
+ state.SetStateNumber(len(a.states))
+ }
+
+ a.states = append(a.states, state)
+}
+
+func (a *ATN) removeState(state ATNState) {
+ a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
+}
+
+func (a *ATN) defineDecisionState(s DecisionState) int {
+ a.DecisionToState = append(a.DecisionToState, s)
+ s.setDecision(len(a.DecisionToState) - 1)
+
+ return s.getDecision()
+}
+
+func (a *ATN) getDecisionState(decision int) DecisionState {
+ if len(a.DecisionToState) == 0 {
+ return nil
+ }
+
+ return a.DecisionToState[decision]
+}
+
+// getExpectedTokens computes the set of input symbols which could follow ATN
+// state number stateNumber in the specified full parse context ctx and returns
+// the set of potentially valid input symbols which could follow the specified
+// state in the specified context. This method considers the complete parser
+// context, but does not evaluate semantic predicates (i.e. all predicates
+// encountered during the calculation are assumed true). If a path in the ATN
+// exists from the starting state to the RuleStopState of the outermost context
+// without Matching any symbols, Token.EOF is added to the returned set.
+//
+// A nil ctx defaults to ParserRuleContext.EMPTY.
+//
+// It panics if the ATN does not contain state stateNumber.
+func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
+ if stateNumber < 0 || stateNumber >= len(a.states) {
+ panic("Invalid state number.")
+ }
+
+ s := a.states[stateNumber]
+ following := a.NextTokens(s, nil)
+
+ if !following.contains(TokenEpsilon) {
+ return following
+ }
+
+ expected := NewIntervalSet()
+
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := a.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+
+ following = a.NextTokens(rt.(*RuleTransition).followState, nil)
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+ ctx = ctx.GetParent().(RuleContext)
+ }
+
+ if following.contains(TokenEpsilon) {
+ expected.addOne(TokenEOF)
+ }
+
+ return expected
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
new file mode 100644
index 00000000000..0535d5246c5
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
@@ -0,0 +1,295 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+type comparable interface {
+ equals(other interface{}) bool
+}
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive at the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig interface {
+ comparable
+
+ hash() int
+
+ GetState() ATNState
+ GetAlt() int
+ GetSemanticContext() SemanticContext
+
+ GetContext() PredictionContext
+ SetContext(PredictionContext)
+
+ GetReachesIntoOuterContext() int
+ SetReachesIntoOuterContext(int)
+
+ String() string
+
+ getPrecedenceFilterSuppressed() bool
+ setPrecedenceFilterSuppressed(bool)
+}
+
+type BaseATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+}
+
+func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
+ return &BaseATNConfig{
+ state: old.state,
+ alt: old.alt,
+ context: old.context,
+ semanticContext: old.semanticContext,
+ reachesIntoOuterContext: old.reachesIntoOuterContext,
+ }
+}
+
+func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
+}
+
+func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil")
+ }
+
+ return &BaseATNConfig{
+ state: state,
+ alt: c.GetAlt(),
+ context: context,
+ semanticContext: semanticContext,
+ reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
+ precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
+ }
+}
+
+func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
+ return b.precedenceFilterSuppressed
+}
+
+func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ b.precedenceFilterSuppressed = v
+}
+
+func (b *BaseATNConfig) GetState() ATNState {
+ return b.state
+}
+
+func (b *BaseATNConfig) GetAlt() int {
+ return b.alt
+}
+
+func (b *BaseATNConfig) SetContext(v PredictionContext) {
+ b.context = v
+}
+func (b *BaseATNConfig) GetContext() PredictionContext {
+ return b.context
+}
+
+func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
+ return b.semanticContext
+}
+
+func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
+ return b.reachesIntoOuterContext
+}
+
+func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
+ b.reachesIntoOuterContext = v
+}
+
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (b *BaseATNConfig) equals(o interface{}) bool {
+ if b == o {
+ return true
+ }
+
+ var other, ok = o.(*BaseATNConfig)
+
+ if !ok {
+ return false
+ }
+
+ var equal bool
+
+ if b.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = b.context.equals(other.context)
+ }
+
+ var (
+ nums = b.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = b.alt == other.alt
+ cons = b.semanticContext.equals(other.semanticContext)
+ sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+func (b *BaseATNConfig) hash() int {
+ var c int
+ if b.context != nil {
+ c = b.context.hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, b.state.GetStateNumber())
+ h = murmurUpdate(h, b.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, b.semanticContext.hash())
+ return murmurFinish(h, 4)
+}
+
+func (b *BaseATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if b.context != nil {
+ s1 = ",[" + fmt.Sprint(b.context) + "]"
+ }
+
+ if b.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(b.semanticContext)
+ }
+
+ if b.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
+}
+
+type LexerATNConfig struct {
+ *BaseATNConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
+ lexerActionExecutor: lexerActionExecutor,
+ }
+}
+
+func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func (l *LexerATNConfig) hash() int {
+ var f int
+ if l.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, l.state.hash())
+ h = murmurUpdate(h, l.alt)
+ h = murmurUpdate(h, l.context.hash())
+ h = murmurUpdate(h, l.semanticContext.hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, l.lexerActionExecutor.hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+func (l *LexerATNConfig) equals(other interface{}) bool {
+ var othert, ok = other.(*LexerATNConfig)
+
+ if l == other {
+ return true
+ } else if !ok {
+ return false
+ } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ var b bool
+
+ if l.lexerActionExecutor != nil {
+ b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
+ } else {
+ b = othert.lexerActionExecutor != nil
+ }
+
+ if b {
+ return false
+ }
+
+ return l.BaseATNConfig.equals(othert.BaseATNConfig)
+}
+
+
+func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
new file mode 100644
index 00000000000..d9f74755fa7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
@@ -0,0 +1,387 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+type ATNConfigSet interface {
+ hash() int
+ Add(ATNConfig, *DoubleDict) bool
+ AddAll([]ATNConfig) bool
+
+ GetStates() *Set
+ GetPredicates() []SemanticContext
+ GetItems() []ATNConfig
+
+ OptimizeConfigs(interpreter *BaseATNSimulator)
+
+ Equals(other interface{}) bool
+
+ Length() int
+ IsEmpty() bool
+ Contains(ATNConfig) bool
+ ContainsFast(ATNConfig) bool
+ Clear()
+ String() string
+
+ HasSemanticContext() bool
+ SetHasSemanticContext(v bool)
+
+ ReadOnly() bool
+ SetReadOnly(bool)
+
+ GetConflictingAlts() *BitSet
+ SetConflictingAlts(*BitSet)
+
+ FullContext() bool
+
+ GetUniqueAlt() int
+ SetUniqueAlt(int)
+
+ GetDipsIntoOuterContext() bool
+ SetDipsIntoOuterContext(bool)
+}
+
+// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type BaseATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two BaseATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *Set
+
+ // configs is the added elements.
+ configs []ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from a.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
+ return &BaseATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewSet(nil, equalATNConfigs),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _), where s is the
+// ATNConfig.state, i is the ATNConfig.alt, and pi is the
+// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
+// dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing := b.configLookup.add(config).(ATNConfig)
+
+ if existing == config {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+func (b *BaseATNConfigSet) GetStates() *Set {
+ states := NewSet(nil, nil)
+
+ for i := 0; i < len(b.configs); i++ {
+ states.add(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *BaseATNConfigSet) HasSemanticContext() bool {
+ return b.hasSemanticContext
+}
+
+func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
+ b.hasSemanticContext = v
+}
+
+func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
+ preds := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ preds = append(preds, c)
+ }
+ }
+
+ return preds
+}
+
+func (b *BaseATNConfigSet) GetItems() []ATNConfig {
+ return b.configs
+}
+
+func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if b.configLookup.length() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+func (b *BaseATNConfigSet) Equals(other interface{}) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*BaseATNConfigSet)
+
+ return b.configs != nil &&
+ // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ b.conflictingAlts == other2.conflictingAlts &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *BaseATNConfigSet) hashCodeConfigs() int {
+ h := murmurInit(1)
+ for _, c := range b.configs {
+ if c != nil {
+ h = murmurUpdate(h, c.hash())
+ }
+ }
+ return murmurFinish(h, len(b.configs))
+}
+
+func (b *BaseATNConfigSet) Length() int {
+ return len(b.configs)
+}
+
+func (b *BaseATNConfigSet) IsEmpty() bool {
+ return len(b.configs) == 0
+}
+
+func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.contains(item)
+}
+
+func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.contains(item) // TODO: containsFast is not implemented for Set
+}
+
+func (b *BaseATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ b.configs = make([]ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewSet(nil, equalATNConfigs)
+}
+
+func (b *BaseATNConfigSet) FullContext() bool {
+ return b.fullCtx
+}
+
+func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
+ return b.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
+ b.dipsIntoOuterContext = v
+}
+
+func (b *BaseATNConfigSet) GetUniqueAlt() int {
+ return b.uniqueAlt
+}
+
+func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
+ b.uniqueAlt = v
+}
+
+func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
+ return b.conflictingAlts
+}
+
+func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
+ b.conflictingAlts = v
+}
+
+func (b *BaseATNConfigSet) ReadOnly() bool {
+ return b.readOnly
+}
+
+func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
+ b.readOnly = readOnly
+
+ if readOnly {
+ b.configLookup = nil // Read only, so no need for the lookup cache
+ }
+}
+
+func (b *BaseATNConfigSet) String() string {
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+type OrderedATNConfigSet struct {
+ *BaseATNConfigSet
+}
+
+func NewOrderedATNConfigSet() *OrderedATNConfigSet {
+ b := NewBaseATNConfigSet(false)
+
+ b.configLookup = NewSet(nil, nil)
+
+ return &OrderedATNConfigSet{BaseATNConfigSet: b}
+}
+
+func equalATNConfigs(a, b interface{}) bool {
+ if a == nil || b == nil {
+ return false
+ }
+
+ if a == b {
+ return true
+ }
+
+ var ai, ok = a.(ATNConfig)
+ var bi, ok1 = b.(ATNConfig)
+
+ if !ok || !ok1 {
+ return false
+ }
+
+ nums := ai.GetState().GetStateNumber() == bi.GetState().GetStateNumber()
+ alts := ai.GetAlt() == bi.GetAlt()
+ cons := ai.GetSemanticContext().equals(bi.GetSemanticContext())
+
+ return nums && alts && cons
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
new file mode 100644
index 00000000000..18b89efafb2
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}
+
+type ATNDeserializationOptions struct {
+ readOnly bool
+ verifyATN bool
+ generateRuleBypassTransitions bool
+}
+
+func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions {
+ o := new(ATNDeserializationOptions)
+
+ if CopyFrom != nil {
+ o.readOnly = CopyFrom.readOnly
+ o.verifyATN = CopyFrom.verifyATN
+ o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions
+ }
+
+ return o
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
new file mode 100644
index 00000000000..884d39cf7cd
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
@@ -0,0 +1,828 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "encoding/hex"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf16"
+)
+
+// This is the earliest supported serialized UUID.
+// stick to serialized version for now, we don't need a UUID instance
+var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"
+var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089"
+
+// This list contains all of the currently supported UUIDs, ordered by when
+// the feature first appeared in this branch.
+var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP}
+
+var SerializedVersion = 3
+
+// This is the current serialized UUID.
+var SerializedUUID = AddedUnicodeSMP
+
+type LoopEndStateIntPair struct {
+ item0 *LoopEndState
+ item1 int
+}
+
+type BlockStartStateIntPair struct {
+ item0 BlockStartState
+ item1 int
+}
+
+type ATNDeserializer struct {
+ deserializationOptions *ATNDeserializationOptions
+ data []rune
+ pos int
+ uuid string
+}
+
+func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
+ if options == nil {
+ options = ATNDeserializationOptionsdefaultOptions
+ }
+
+ return &ATNDeserializer{deserializationOptions: options}
+}
+
+func stringInSlice(a string, list []string) int {
+ for i, b := range list {
+ if b == a {
+ return i
+ }
+ }
+
+ return -1
+}
+
+// isFeatureSupported determines if a particular serialized representation of an
+// ATN supports a particular feature, identified by the UUID used for
+// serializing the ATN at the time the feature was first introduced. Feature is
+// the UUID marking the first time the feature was supported in the serialized
+// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently
+// being deserialized. It returns true if actualUuid represents a serialized ATN
+// at or after the feature identified by feature was introduced, and otherwise
+// false.
+func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool {
+ idx1 := stringInSlice(feature, SupportedUUIDs)
+
+ if idx1 < 0 {
+ return false
+ }
+
+ idx2 := stringInSlice(actualUUID, SupportedUUIDs)
+
+ return idx2 >= idx1
+}
+
+func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
+ a.reset(utf16.Decode(data))
+ a.checkVersion()
+ a.checkUUID()
+
+ atn := a.readATN()
+
+ a.readStates(atn)
+ a.readRules(atn)
+ a.readModes(atn)
+
+ sets := make([]*IntervalSet, 0)
+
+ // First, deserialize sets with 16-bit arguments <= U+FFFF.
+ sets = a.readSets(atn, sets, a.readInt)
+ // Next, if the ATN was serialized with the Unicode SMP feature,
+ // deserialize sets with 32-bit arguments <= U+10FFFF.
+ if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) {
+ sets = a.readSets(atn, sets, a.readInt32)
+ }
+
+ a.readEdges(atn, sets)
+ a.readDecisions(atn)
+ a.readLexerActions(atn)
+ a.markPrecedenceDecisions(atn)
+ a.verifyATN(atn)
+
+ if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser {
+ a.generateRuleBypassTransitions(atn)
+ // Re-verify after modification
+ a.verifyATN(atn)
+ }
+
+ return atn
+
+}
+
+func (a *ATNDeserializer) reset(data []rune) {
+ temp := make([]rune, len(data))
+
+ for i, c := range data {
+ // Don't adjust the first value since that's the version number
+ if i == 0 {
+ temp[i] = c
+ } else if c > 1 {
+ temp[i] = c - 2
+ } else {
+ temp[i] = c + 65533
+ }
+ }
+
+ a.data = temp
+ a.pos = 0
+}
+
+func (a *ATNDeserializer) checkVersion() {
+ version := a.readInt()
+
+ if version != SerializedVersion {
+ panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").")
+ }
+}
+
+func (a *ATNDeserializer) checkUUID() {
+ uuid := a.readUUID()
+
+ if stringInSlice(uuid, SupportedUUIDs) < 0 {
+ panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).")
+ }
+
+ a.uuid = uuid
+}
+
+func (a *ATNDeserializer) readATN() *ATN {
+ grammarType := a.readInt()
+ maxTokenType := a.readInt()
+
+ return NewATN(grammarType, maxTokenType)
+}
+
+func (a *ATNDeserializer) readStates(atn *ATN) {
+ loopBackStateNumbers := make([]LoopEndStateIntPair, 0)
+ endStateNumbers := make([]BlockStartStateIntPair, 0)
+
+ nstates := a.readInt()
+
+ for i := 0; i < nstates; i++ {
+ stype := a.readInt()
+
+ // Ignore bad types of states
+ if stype == ATNStateInvalidType {
+ atn.addState(nil)
+
+ continue
+ }
+
+ ruleIndex := a.readInt()
+
+ if ruleIndex == 0xFFFF {
+ ruleIndex = -1
+ }
+
+ s := a.stateFactory(stype, ruleIndex)
+
+ if stype == ATNStateLoopEnd {
+ loopBackStateNumber := a.readInt()
+
+ loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
+ } else if s2, ok := s.(BlockStartState); ok {
+ endStateNumber := a.readInt()
+
+ endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber})
+ }
+
+ atn.addState(s)
+ }
+
+ // Delay the assignment of loop back and end states until we know all the state
+ // instances have been initialized
+ for j := 0; j < len(loopBackStateNumbers); j++ {
+ pair := loopBackStateNumbers[j]
+
+ pair.item0.loopBackState = atn.states[pair.item1]
+ }
+
+ for j := 0; j < len(endStateNumbers); j++ {
+ pair := endStateNumbers[j]
+
+ pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
+ }
+
+ numNonGreedyStates := a.readInt()
+
+ for j := 0; j < numNonGreedyStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(DecisionState).setNonGreedy(true)
+ }
+
+ numPrecedenceStates := a.readInt()
+
+ for j := 0; j < numPrecedenceStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
+ }
+}
+
+func (a *ATNDeserializer) readRules(atn *ATN) {
+ nrules := a.readInt()
+
+ if atn.grammarType == ATNTypeLexer {
+ atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0)
+ }
+
+ atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0)
+
+ for i := 0; i < nrules; i++ {
+ s := a.readInt()
+ startState := atn.states[s].(*RuleStartState)
+
+ atn.ruleToStartState[i] = startState
+
+ if atn.grammarType == ATNTypeLexer {
+ tokenType := a.readInt()
+
+ if tokenType == 0xFFFF {
+ tokenType = TokenEOF
+ }
+
+ atn.ruleToTokenType[i] = tokenType
+ }
+ }
+
+ atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0)
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if s2, ok := state.(*RuleStopState); ok {
+ atn.ruleToStopState[s2.ruleIndex] = s2
+ atn.ruleToStartState[s2.ruleIndex].stopState = s2
+ }
+ }
+}
+
+func (a *ATNDeserializer) readModes(atn *ATN) {
+ nmodes := a.readInt()
+
+ for i := 0; i < nmodes; i++ {
+ s := a.readInt()
+
+ atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState))
+ }
+}
+
+func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet {
+ m := a.readInt()
+
+ for i := 0; i < m; i++ {
+ iset := NewIntervalSet()
+
+ sets = append(sets, iset)
+
+ n := a.readInt()
+ containsEOF := a.readInt()
+
+ if containsEOF != 0 {
+ iset.addOne(-1)
+ }
+
+ for j := 0; j < n; j++ {
+ i1 := readUnicode()
+ i2 := readUnicode()
+
+ iset.addRange(i1, i2)
+ }
+ }
+
+ return sets
+}
+
+func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
+ nedges := a.readInt()
+
+ for i := 0; i < nedges; i++ {
+ var (
+ src = a.readInt()
+ trg = a.readInt()
+ ttype = a.readInt()
+ arg1 = a.readInt()
+ arg2 = a.readInt()
+ arg3 = a.readInt()
+ trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
+ srcState = atn.states[src]
+ )
+
+ srcState.AddTransition(trans, -1)
+ }
+
+ // Edges for rule stop states can be derived, so they are not serialized
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ for j := 0; j < len(state.GetTransitions()); j++ {
+ var t, ok = state.GetTransitions()[j].(*RuleTransition)
+
+ if !ok {
+ continue
+ }
+
+ outermostPrecedenceReturn := -1
+
+ if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule {
+ if t.precedence == 0 {
+ outermostPrecedenceReturn = t.getTarget().GetRuleIndex()
+ }
+ }
+
+ trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn)
+
+ atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1)
+ }
+ }
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if s2, ok := state.(*BaseBlockStartState); ok {
+ // We need to know the end state to set its start state
+ if s2.endState == nil {
+ panic("IllegalState")
+ }
+
+ // Block end states can only be associated to a single block start state
+ if s2.endState.startState != nil {
+ panic("IllegalState")
+ }
+
+ s2.endState.startState = state
+ }
+
+ if s2, ok := state.(*PlusLoopbackState); ok {
+ for j := 0; j < len(s2.GetTransitions()); j++ {
+ target := s2.GetTransitions()[j].getTarget()
+
+ if t2, ok := target.(*PlusBlockStartState); ok {
+ t2.loopBackState = state
+ }
+ }
+ } else if s2, ok := state.(*StarLoopbackState); ok {
+ for j := 0; j < len(s2.GetTransitions()); j++ {
+ target := s2.GetTransitions()[j].getTarget()
+
+ if t2, ok := target.(*StarLoopEntryState); ok {
+ t2.loopBackState = state
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) readDecisions(atn *ATN) {
+ ndecisions := a.readInt()
+
+ for i := 0; i < ndecisions; i++ {
+ s := a.readInt()
+ decState := atn.states[s].(DecisionState)
+
+ atn.DecisionToState = append(atn.DecisionToState, decState)
+ decState.setDecision(i)
+ }
+}
+
+func (a *ATNDeserializer) readLexerActions(atn *ATN) {
+ if atn.grammarType == ATNTypeLexer {
+ count := a.readInt()
+
+ atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil)
+
+ for i := 0; i < count; i++ {
+ actionType := a.readInt()
+ data1 := a.readInt()
+
+ if data1 == 0xFFFF {
+ data1 = -1
+ }
+
+ data2 := a.readInt()
+
+ if data2 == 0xFFFF {
+ data2 = -1
+ }
+
+ lexerAction := a.lexerActionFactory(actionType, data1, data2)
+
+ atn.lexerActions[i] = lexerAction
+ }
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
+ count := len(atn.ruleToStartState)
+
+ for i := 0; i < count; i++ {
+ atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
+ }
+
+ for i := 0; i < count; i++ {
+ a.generateRuleBypassTransition(atn, i)
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
+ bypassStart := NewBasicBlockStartState()
+
+ bypassStart.ruleIndex = idx
+ atn.addState(bypassStart)
+
+ bypassStop := NewBlockEndState()
+
+ bypassStop.ruleIndex = idx
+ atn.addState(bypassStop)
+
+ bypassStart.endState = bypassStop
+
+ atn.defineDecisionState(bypassStart.BaseDecisionState)
+
+ bypassStop.startState = bypassStart
+
+ var excludeTransition Transition
+ var endState ATNState
+
+ if atn.ruleToStartState[idx].isPrecedenceRule {
+ // Wrap from the beginning of the rule to the StarLoopEntryState
+ endState = nil
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if a.stateIsEndStateFor(state, idx) != nil {
+ endState = state
+ excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
+
+ break
+ }
+ }
+
+ if excludeTransition == nil {
+ panic("Couldn't identify final state of the precedence rule prefix section.")
+ }
+ } else {
+ endState = atn.ruleToStopState[idx]
+ }
+
+ // All non-excluded transitions that currently target end state need to target
+ // blockEnd instead
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ for j := 0; j < len(state.GetTransitions()); j++ {
+ transition := state.GetTransitions()[j]
+
+ if transition == excludeTransition {
+ continue
+ }
+
+ if transition.getTarget() == endState {
+ transition.setTarget(bypassStop)
+ }
+ }
+ }
+
+ // All transitions leaving the rule start state need to leave blockStart instead
+ ruleToStartState := atn.ruleToStartState[idx]
+ count := len(ruleToStartState.GetTransitions())
+
+ for count > 0 {
+ bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
+ ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
+ }
+
+ // Link the new states
+ atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
+ bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
+
+ MatchState := NewBasicState()
+
+ atn.addState(MatchState)
+ MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
+ bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
+}
+
+func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
+ if state.GetRuleIndex() != idx {
+ return nil
+ }
+
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ return nil
+ }
+
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
+ return nil
+ }
+
+ var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
+ return state
+ }
+
+ return nil
+}
+
+// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
+// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
+// the correct value.
+func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
+ for _, state := range atn.states {
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ continue
+ }
+
+ // We analyze the ATN to determine if a ATN decision state is the
+ // decision for the closure block that determines whether a
+ // precedence rule should continue or complete.
+ if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
+ var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if s3.epsilonOnlyTransitions && ok2 {
+ state.(*StarLoopEntryState).precedenceRuleDecision = true
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) verifyATN(atn *ATN) {
+ if !a.deserializationOptions.verifyATN {
+ return
+ }
+
+ // Verify assumptions
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if state == nil {
+ continue
+ }
+
+ a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
+
+ switch s2 := state.(type) {
+ case *PlusBlockStartState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *StarLoopEntryState:
+ a.checkCondition(s2.loopBackState != nil, "")
+ a.checkCondition(len(s2.GetTransitions()) == 2, "")
+
+ switch s2 := state.(type) {
+ case *StarBlockStartState:
+ var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState)
+
+ a.checkCondition(ok2, "")
+ a.checkCondition(!s2.nonGreedy, "")
+
+ case *LoopEndState:
+ var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState)
+
+ a.checkCondition(ok2, "")
+ a.checkCondition(s3.nonGreedy, "")
+
+ default:
+ panic("IllegalState")
+ }
+
+ case *StarLoopbackState:
+ a.checkCondition(len(state.GetTransitions()) == 1, "")
+
+ var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
+
+ a.checkCondition(ok2, "")
+
+ case *LoopEndState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *RuleStartState:
+ a.checkCondition(s2.stopState != nil, "")
+
+ case *BaseBlockStartState:
+ a.checkCondition(s2.endState != nil, "")
+
+ case *BlockEndState:
+ a.checkCondition(s2.startState != nil, "")
+
+ case DecisionState:
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
+
+ default:
+ var _, ok = s2.(*RuleStopState)
+
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
+ }
+ }
+}
+
+func (a *ATNDeserializer) checkCondition(condition bool, message string) {
+ if !condition {
+ if message == "" {
+ message = "IllegalState"
+ }
+
+ panic(message)
+ }
+}
+
+func (a *ATNDeserializer) readInt() int {
+ v := a.data[a.pos]
+
+ a.pos++
+
+ return int(v)
+}
+
+func (a *ATNDeserializer) readInt32() int {
+ var low = a.readInt()
+ var high = a.readInt()
+ return low | (high << 16)
+}
+
+//TODO
+//func (a *ATNDeserializer) readLong() int64 {
+// panic("Not implemented")
+// var low = a.readInt32()
+// var high = a.readInt32()
+// return (low & 0x00000000FFFFFFFF) | (high << int32)
+//}
+
+func createByteToHex() []string {
+ bth := make([]string, 256)
+
+ for i := 0; i < 256; i++ {
+ bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)}))
+ }
+
+ return bth
+}
+
+var byteToHex = createByteToHex()
+
+func (a *ATNDeserializer) readUUID() string {
+ bb := make([]int, 16)
+
+ for i := 7; i >= 0; i-- {
+ integer := a.readInt()
+
+ bb[(2*i)+1] = integer & 0xFF
+ bb[2*i] = (integer >> 8) & 0xFF
+ }
+
+ return byteToHex[bb[0]] + byteToHex[bb[1]] +
+ byteToHex[bb[2]] + byteToHex[bb[3]] + "-" +
+ byteToHex[bb[4]] + byteToHex[bb[5]] + "-" +
+ byteToHex[bb[6]] + byteToHex[bb[7]] + "-" +
+ byteToHex[bb[8]] + byteToHex[bb[9]] + "-" +
+ byteToHex[bb[10]] + byteToHex[bb[11]] +
+ byteToHex[bb[12]] + byteToHex[bb[13]] +
+ byteToHex[bb[14]] + byteToHex[bb[15]]
+}
+
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+ target := atn.states[trg]
+
+ switch typeIndex {
+ case TransitionEPSILON:
+ return NewEpsilonTransition(target, -1)
+
+ case TransitionRANGE:
+ if arg3 != 0 {
+ return NewRangeTransition(target, TokenEOF, arg2)
+ }
+
+ return NewRangeTransition(target, arg1, arg2)
+
+ case TransitionRULE:
+ return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
+
+ case TransitionPREDICATE:
+ return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionPRECEDENCE:
+ return NewPrecedencePredicateTransition(target, arg1)
+
+ case TransitionATOM:
+ if arg3 != 0 {
+ return NewAtomTransition(target, TokenEOF)
+ }
+
+ return NewAtomTransition(target, arg1)
+
+ case TransitionACTION:
+ return NewActionTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionSET:
+ return NewSetTransition(target, sets[arg1])
+
+ case TransitionNOTSET:
+ return NewNotSetTransition(target, sets[arg1])
+
+ case TransitionWILDCARD:
+ return NewWildcardTransition(target)
+ }
+
+ panic("The specified transition type is not valid.")
+}
+
+func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
+ var s ATNState
+
+ switch typeIndex {
+ case ATNStateInvalidType:
+ return nil
+
+ case ATNStateBasic:
+ s = NewBasicState()
+
+ case ATNStateRuleStart:
+ s = NewRuleStartState()
+
+ case ATNStateBlockStart:
+ s = NewBasicBlockStartState()
+
+ case ATNStatePlusBlockStart:
+ s = NewPlusBlockStartState()
+
+ case ATNStateStarBlockStart:
+ s = NewStarBlockStartState()
+
+ case ATNStateTokenStart:
+ s = NewTokensStartState()
+
+ case ATNStateRuleStop:
+ s = NewRuleStopState()
+
+ case ATNStateBlockEnd:
+ s = NewBlockEndState()
+
+ case ATNStateStarLoopBack:
+ s = NewStarLoopbackState()
+
+ case ATNStateStarLoopEntry:
+ s = NewStarLoopEntryState()
+
+ case ATNStatePlusLoopBack:
+ s = NewPlusLoopbackState()
+
+ case ATNStateLoopEnd:
+ s = NewLoopEndState()
+
+ default:
+ panic(fmt.Sprintf("state type %d is invalid", typeIndex))
+ }
+
+ s.SetRuleIndex(ruleIndex)
+
+ return s
+}
+
+func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
+ switch typeIndex {
+ case LexerActionTypeChannel:
+ return NewLexerChannelAction(data1)
+
+ case LexerActionTypeCustom:
+ return NewLexerCustomAction(data1, data2)
+
+ case LexerActionTypeMode:
+ return NewLexerModeAction(data1)
+
+ case LexerActionTypeMore:
+ return LexerMoreActionINSTANCE
+
+ case LexerActionTypePopMode:
+ return LexerPopModeActionINSTANCE
+
+ case LexerActionTypePushMode:
+ return NewLexerPushModeAction(data1)
+
+ case LexerActionTypeSkip:
+ return LexerSkipActionINSTANCE
+
+ case LexerActionTypeType:
+ return NewLexerTypeAction(data1)
+
+ default:
+ panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
new file mode 100644
index 00000000000..d5454d6d5dd
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
+
+type IATNSimulator interface {
+ SharedContextCache() *PredictionContextCache
+ ATN() *ATN
+ DecisionToDFA() []*DFA
+}
+
+type BaseATNSimulator struct {
+ atn *ATN
+ sharedContextCache *PredictionContextCache
+ decisionToDFA []*DFA
+}
+
+func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
+ b := new(BaseATNSimulator)
+
+ b.atn = atn
+ b.sharedContextCache = sharedContextCache
+
+ return b
+}
+
+func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
+ if b.sharedContextCache == nil {
+ return context
+ }
+
+ visited := make(map[PredictionContext]PredictionContext)
+
+ return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
+}
+
+func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
+ return b.sharedContextCache
+}
+
+func (b *BaseATNSimulator) ATN() *ATN {
+ return b.atn
+}
+
+func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
+ return b.decisionToDFA
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
new file mode 100644
index 00000000000..563d5db38d4
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
@@ -0,0 +1,386 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+// Constants for serialization.
+const (
+ ATNStateInvalidType = 0
+ ATNStateBasic = 1
+ ATNStateRuleStart = 2
+ ATNStateBlockStart = 3
+ ATNStatePlusBlockStart = 4
+ ATNStateStarBlockStart = 5
+ ATNStateTokenStart = 6
+ ATNStateRuleStop = 7
+ ATNStateBlockEnd = 8
+ ATNStateStarLoopBack = 9
+ ATNStateStarLoopEntry = 10
+ ATNStatePlusLoopBack = 11
+ ATNStateLoopEnd = 12
+
+ ATNStateInvalidStateNumber = -1
+)
+
+var ATNStateInitialNumTransitions = 4
+
+type ATNState interface {
+ GetEpsilonOnlyTransitions() bool
+
+ GetRuleIndex() int
+ SetRuleIndex(int)
+
+ GetNextTokenWithinRule() *IntervalSet
+ SetNextTokenWithinRule(*IntervalSet)
+
+ GetATN() *ATN
+ SetATN(*ATN)
+
+ GetStateType() int
+
+ GetStateNumber() int
+ SetStateNumber(int)
+
+ GetTransitions() []Transition
+ SetTransitions([]Transition)
+ AddTransition(Transition, int)
+
+ String() string
+ hash() int
+}
+
+type BaseATNState struct {
+ // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
+ NextTokenWithinRule *IntervalSet
+
+ // atn is the current ATN.
+ atn *ATN
+
+ epsilonOnlyTransitions bool
+
+ // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
+ ruleIndex int
+
+ stateNumber int
+
+ stateType int
+
+ // Track the transitions emanating from this ATN state.
+ transitions []Transition
+}
+
+func NewBaseATNState() *BaseATNState {
+ return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
+}
+
+func (as *BaseATNState) GetRuleIndex() int {
+ return as.ruleIndex
+}
+
+func (as *BaseATNState) SetRuleIndex(v int) {
+ as.ruleIndex = v
+}
+func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
+ return as.epsilonOnlyTransitions
+}
+
+func (as *BaseATNState) GetATN() *ATN {
+ return as.atn
+}
+
+func (as *BaseATNState) SetATN(atn *ATN) {
+ as.atn = atn
+}
+
+func (as *BaseATNState) GetTransitions() []Transition {
+ return as.transitions
+}
+
+func (as *BaseATNState) SetTransitions(t []Transition) {
+ as.transitions = t
+}
+
+func (as *BaseATNState) GetStateType() int {
+ return as.stateType
+}
+
+func (as *BaseATNState) GetStateNumber() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) SetStateNumber(stateNumber int) {
+ as.stateNumber = stateNumber
+}
+
+func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
+ return as.NextTokenWithinRule
+}
+
+func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
+ as.NextTokenWithinRule = v
+}
+
+func (as *BaseATNState) hash() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) String() string {
+ return strconv.Itoa(as.stateNumber)
+}
+
+func (as *BaseATNState) equals(other interface{}) bool {
+ if ot, ok := other.(ATNState); ok {
+ return as.stateNumber == ot.GetStateNumber()
+ }
+
+ return false
+}
+
+func (as *BaseATNState) isNonGreedyExitState() bool {
+ return false
+}
+
+func (as *BaseATNState) AddTransition(trans Transition, index int) {
+ if len(as.transitions) == 0 {
+ as.epsilonOnlyTransitions = trans.getIsEpsilon()
+ } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
+ as.epsilonOnlyTransitions = false
+ }
+
+ if index == -1 {
+ as.transitions = append(as.transitions, trans)
+ } else {
+ as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
+ // TODO: as.transitions.splice(index, 1, trans)
+ }
+}
+
+type BasicState struct {
+ *BaseATNState
+}
+
+func NewBasicState() *BasicState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBasic
+
+ return &BasicState{BaseATNState: b}
+}
+
+type DecisionState interface {
+ ATNState
+
+ getDecision() int
+ setDecision(int)
+
+ getNonGreedy() bool
+ setNonGreedy(bool)
+}
+
+type BaseDecisionState struct {
+ *BaseATNState
+ decision int
+ nonGreedy bool
+}
+
+func NewBaseDecisionState() *BaseDecisionState {
+ return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
+}
+
+func (s *BaseDecisionState) getDecision() int {
+ return s.decision
+}
+
+func (s *BaseDecisionState) setDecision(b int) {
+ s.decision = b
+}
+
+func (s *BaseDecisionState) getNonGreedy() bool {
+ return s.nonGreedy
+}
+
+func (s *BaseDecisionState) setNonGreedy(b bool) {
+ s.nonGreedy = b
+}
+
+type BlockStartState interface {
+ DecisionState
+
+ getEndState() *BlockEndState
+ setEndState(*BlockEndState)
+}
+
+// BaseBlockStartState is the start of a regular (...) block.
+type BaseBlockStartState struct {
+ *BaseDecisionState
+ endState *BlockEndState
+}
+
+func NewBlockStartState() *BaseBlockStartState {
+ return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
+}
+
+func (s *BaseBlockStartState) getEndState() *BlockEndState {
+ return s.endState
+}
+
+func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
+ s.endState = b
+}
+
+type BasicBlockStartState struct {
+ *BaseBlockStartState
+}
+
+func NewBasicBlockStartState() *BasicBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateBlockStart
+
+ return &BasicBlockStartState{BaseBlockStartState: b}
+}
+
+// BlockEndState is a terminal node of a simple (a|b|c) block.
+type BlockEndState struct {
+ *BaseATNState
+ startState ATNState
+}
+
+func NewBlockEndState() *BlockEndState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBlockEnd
+
+ return &BlockEndState{BaseATNState: b}
+}
+
+// RuleStopState is the last node in the ATN for a rule, unless that rule is the
+// start symbol. In that case, there is one transition to EOF. Later, we might
+// encode references to all calls to this rule to compute FOLLOW sets for error
+// handling.
+type RuleStopState struct {
+ *BaseATNState
+}
+
+func NewRuleStopState() *RuleStopState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStop
+
+ return &RuleStopState{BaseATNState: b}
+}
+
+type RuleStartState struct {
+ *BaseATNState
+ stopState ATNState
+ isPrecedenceRule bool
+}
+
+func NewRuleStartState() *RuleStartState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStart
+
+ return &RuleStartState{BaseATNState: b}
+}
+
+// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
+// transitions: one to the loop back to start of the block, and one to exit.
+type PlusLoopbackState struct {
+ *BaseDecisionState
+}
+
+func NewPlusLoopbackState() *PlusLoopbackState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStatePlusLoopBack
+
+ return &PlusLoopbackState{BaseDecisionState: b}
+}
+
+// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
+// decision state; we don't use it for code generation. Somebody might need it,
+// it is included for completeness. In reality, PlusLoopbackState is the real
+// decision-making node for A+.
+type PlusBlockStartState struct {
+ *BaseBlockStartState
+ loopBackState ATNState
+}
+
+func NewPlusBlockStartState() *PlusBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStatePlusBlockStart
+
+ return &PlusBlockStartState{BaseBlockStartState: b}
+}
+
+// StarBlockStartState is the block that begins a closure loop.
+type StarBlockStartState struct {
+ *BaseBlockStartState
+}
+
+func NewStarBlockStartState() *StarBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateStarBlockStart
+
+ return &StarBlockStartState{BaseBlockStartState: b}
+}
+
+type StarLoopbackState struct {
+ *BaseATNState
+}
+
+func NewStarLoopbackState() *StarLoopbackState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateStarLoopBack
+
+ return &StarLoopbackState{BaseATNState: b}
+}
+
+type StarLoopEntryState struct {
+ *BaseDecisionState
+ loopBackState ATNState
+ precedenceRuleDecision bool
+}
+
+func NewStarLoopEntryState() *StarLoopEntryState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateStarLoopEntry
+
+ // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
+ return &StarLoopEntryState{BaseDecisionState: b}
+}
+
+// LoopEndState marks the end of a * or + loop.
+type LoopEndState struct {
+ *BaseATNState
+ loopBackState ATNState
+}
+
+func NewLoopEndState() *LoopEndState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateLoopEnd
+
+ return &LoopEndState{BaseATNState: b}
+}
+
+// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
+type TokensStartState struct {
+ *BaseDecisionState
+}
+
+func NewTokensStartState() *TokensStartState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateTokenStart
+
+ return &TokensStartState{BaseDecisionState: b}
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
new file mode 100644
index 00000000000..a7b48976b31
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represent the type of recognizer an ATN applies to.
+const (
+ ATNTypeLexer = 0
+ ATNTypeParser = 1
+)
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
new file mode 100644
index 00000000000..70c1207f7ff
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type CharStream interface {
+ IntStream
+ GetText(int, int) string
+ GetTextFromTokens(start, end Token) string
+ GetTextFromInterval(*Interval) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
new file mode 100644
index 00000000000..330ff8f31f8
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// TokenFactory creates CommonToken objects.
+type TokenFactory interface {
+ Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
+}
+
+// CommonTokenFactory is the default TokenFactory implementation.
+type CommonTokenFactory struct {
+ // copyText indicates whether CommonToken.setText should be called after
+ // constructing tokens to explicitly set the text. This is useful for cases
+ // where the input stream might not be able to provide arbitrary substrings of
+ // text from the input after the lexer creates a token (e.g. the
+ // implementation of CharStream.GetText in UnbufferedCharStream panics an
+ // UnsupportedOperationException). Explicitly setting the token text allows
+ // Token.GetText to be called at any time regardless of the input stream
+ // implementation.
+ //
+ // The default value is false to avoid the performance and memory overhead of
+ // copying text for every token unless explicitly requested.
+ copyText bool
+}
+
+func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
+ return &CommonTokenFactory{copyText: copyText}
+}
+
+// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
+// explicitly copy token text when constructing tokens.
+var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
+
+func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
+ t := NewCommonToken(source, ttype, channel, start, stop)
+
+ t.line = line
+ t.column = column
+
+ if text != "" {
+ t.SetText(text)
+ } else if c.copyText && source.charStream != nil {
+ t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
+ }
+
+ return t
+}
+
+func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
+ t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
+ t.SetText(text)
+
+ return t
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
new file mode 100644
index 00000000000..c90e9b8904c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
@@ -0,0 +1,447 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// CommonTokenStream is an implementation of TokenStream that loads tokens from
+// a TokenSource on-demand and places the tokens in a buffer to provide access
+// to any previous token by index. This token stream ignores the value of
+// Token.getChannel. If your parser requires the token stream filter tokens to
+// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
+// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
+type CommonTokenStream struct {
+ channel int
+
+ // fetchedEOF indicates whether the Token.EOF token has been fetched from
+ // tokenSource and added to tokens. This field improves performance for the
+ // following cases:
+ //
+ // consume: The lookahead check in consume to preven consuming the EOF symbol is
+ // optimized by checking the values of fetchedEOF and p instead of calling LA.
+ //
+ // fetch: The check to prevent adding multiple EOF symbols into tokens is
+ // trivial with bt field.
+ fetchedEOF bool
+
+ // index indexs into tokens of the current token (next token to consume).
+ // tokens[p] should be LT(1). It is set to -1 when the stream is first
+ // constructed or when SetTokenSource is called, indicating that the first token
+ // has not yet been fetched from the token source. For additional information,
+ // see the documentation of IntStream for a description of initializing methods.
+ index int
+
+ // tokenSource is the TokenSource from which tokens for the bt stream are
+ // fetched.
+ tokenSource TokenSource
+
+ // tokens is all tokens fetched from the token source. The list is considered a
+ // complete view of the input once fetchedEOF is set to true.
+ tokens []Token
+}
+
+func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
+ return &CommonTokenStream{
+ channel: channel,
+ index: -1,
+ tokenSource: lexer,
+ tokens: make([]Token, 0),
+ }
+}
+
+func (c *CommonTokenStream) GetAllTokens() []Token {
+ return c.tokens
+}
+
+func (c *CommonTokenStream) Mark() int {
+ return 0
+}
+
+func (c *CommonTokenStream) Release(marker int) {}
+
+func (c *CommonTokenStream) reset() {
+ c.Seek(0)
+}
+
+func (c *CommonTokenStream) Seek(index int) {
+ c.lazyInit()
+ c.index = c.adjustSeekIndex(index)
+}
+
+func (c *CommonTokenStream) Get(index int) Token {
+ c.lazyInit()
+
+ return c.tokens[index]
+}
+
+func (c *CommonTokenStream) Consume() {
+ SkipEOFCheck := false
+
+ if c.index >= 0 {
+ if c.fetchedEOF {
+ // The last token in tokens is EOF. Skip the check if p indexes any fetched.
+ // token except the last.
+ SkipEOFCheck = c.index < len(c.tokens)-1
+ } else {
+ // No EOF token in tokens. Skip the check if p indexes a fetched token.
+ SkipEOFCheck = c.index < len(c.tokens)
+ }
+ } else {
+ // Not yet initialized
+ SkipEOFCheck = false
+ }
+
+ if !SkipEOFCheck && c.LA(1) == TokenEOF {
+ panic("cannot consume EOF")
+ }
+
+ if c.Sync(c.index + 1) {
+ c.index = c.adjustSeekIndex(c.index + 1)
+ }
+}
+
+// Sync makes sure index i in tokens has a token and returns true if a token is
+// located at index i and otherwise false.
+func (c *CommonTokenStream) Sync(i int) bool {
+ n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
+
+ if n > 0 {
+ fetched := c.fetch(n)
+ return fetched >= n
+ }
+
+ return true
+}
+
+// fetch adds n elements to buffer and returns the actual number of elements
+// added to the buffer.
+func (c *CommonTokenStream) fetch(n int) int {
+ if c.fetchedEOF {
+ return 0
+ }
+
+ for i := 0; i < n; i++ {
+ t := c.tokenSource.NextToken()
+
+ t.SetTokenIndex(len(c.tokens))
+ c.tokens = append(c.tokens, t)
+
+ if t.GetTokenType() == TokenEOF {
+ c.fetchedEOF = true
+
+ return i + 1
+ }
+ }
+
+ return n
+}
+
+// GetTokens gets all tokens from start to stop inclusive.
+func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
+ if start < 0 || stop < 0 {
+ return nil
+ }
+
+ c.lazyInit()
+
+ subset := make([]Token, 0)
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ for i := start; i < stop; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ if types == nil || types.contains(t.GetTokenType()) {
+ subset = append(subset, t)
+ }
+ }
+
+ return subset
+}
+
+func (c *CommonTokenStream) LA(i int) int {
+ return c.LT(i).GetTokenType()
+}
+
+func (c *CommonTokenStream) lazyInit() {
+ if c.index == -1 {
+ c.setup()
+ }
+}
+
+func (c *CommonTokenStream) setup() {
+ c.Sync(0)
+ c.index = c.adjustSeekIndex(0)
+}
+
+func (c *CommonTokenStream) GetTokenSource() TokenSource {
+ return c.tokenSource
+}
+
+// SetTokenSource resets the c token stream by setting its token source.
+func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
+ c.tokenSource = tokenSource
+ c.tokens = make([]Token, 0)
+ c.index = -1
+}
+
+// NextTokenOnChannel returns the index of the next token on channel given a
+// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
+// no tokens on channel between i and EOF.
+func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
+ c.Sync(i)
+
+ if i >= len(c.tokens) {
+ return -1
+ }
+
+ token := c.tokens[i]
+
+ for token.GetChannel() != c.channel {
+ if token.GetTokenType() == TokenEOF {
+ return -1
+ }
+
+ i++
+ c.Sync(i)
+ token = c.tokens[i]
+ }
+
+ return i
+}
+
+// previousTokenOnChannel returns the index of the previous token on channel
+// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
+// there are no tokens on channel between i and 0.
+func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
+ for i >= 0 && c.tokens[i].GetChannel() != channel {
+ i--
+ }
+
+ return i
+}
+
+// GetHiddenTokensToRight collects all tokens on a specified channel to the
+// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
+// or EOF. If channel is -1, it finds any non-default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
+ from := tokenIndex + 1
+
+ // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
+ var to int
+
+ if nextOnChannel == -1 {
+ to = len(c.tokens) - 1
+ } else {
+ to = nextOnChannel
+ }
+
+ return c.filterForChannel(from, to, channel)
+}
+
+// GetHiddenTokensToLeft collects all tokens on channel to the left of the
+// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
+// -1, it finds any non default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
+
+ if prevOnChannel == tokenIndex-1 {
+ return nil
+ }
+
+ // If there are none on channel to the left and prevOnChannel == -1 then from = 0
+ from := prevOnChannel + 1
+ to := tokenIndex - 1
+
+ return c.filterForChannel(from, to, channel)
+}
+
+func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
+ hidden := make([]Token, 0)
+
+ for i := left; i < right+1; i++ {
+ t := c.tokens[i]
+
+ if channel == -1 {
+ if t.GetChannel() != LexerDefaultTokenChannel {
+ hidden = append(hidden, t)
+ }
+ } else if t.GetChannel() == channel {
+ hidden = append(hidden, t)
+ }
+ }
+
+ if len(hidden) == 0 {
+ return nil
+ }
+
+ return hidden
+}
+
+func (c *CommonTokenStream) GetSourceName() string {
+ return c.tokenSource.GetSourceName()
+}
+
+func (c *CommonTokenStream) Size() int {
+ return len(c.tokens)
+}
+
+func (c *CommonTokenStream) Index() int {
+ return c.index
+}
+
+func (c *CommonTokenStream) GetAllText() string {
+ return c.GetTextFromInterval(nil)
+}
+
+func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
+ if start == nil || end == nil {
+ return ""
+ }
+
+ return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
+}
+
+func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
+ return c.GetTextFromInterval(interval.GetSourceInterval())
+}
+
+func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
+ c.lazyInit()
+ c.Fill()
+
+ if interval == nil {
+ interval = NewInterval(0, len(c.tokens)-1)
+ }
+
+ start := interval.Start
+ stop := interval.Stop
+
+ if start < 0 || stop < 0 {
+ return ""
+ }
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ s := ""
+
+ for i := start; i < stop+1; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ s += t.GetText()
+ }
+
+ return s
+}
+
+// Fill gets all tokens from the lexer until EOF.
+func (c *CommonTokenStream) Fill() {
+ c.lazyInit()
+
+ for c.fetch(1000) == 1000 {
+ continue
+ }
+}
+
+func (c *CommonTokenStream) adjustSeekIndex(i int) int {
+ return c.NextTokenOnChannel(i, c.channel)
+}
+
+func (c *CommonTokenStream) LB(k int) Token {
+ if k == 0 || c.index-k < 0 {
+ return nil
+ }
+
+ i := c.index
+ n := 1
+
+ // Find k good tokens looking backward
+ for n <= k {
+ // Skip off-channel tokens
+ i = c.previousTokenOnChannel(i-1, c.channel)
+ n++
+ }
+
+ if i < 0 {
+ return nil
+ }
+
+ return c.tokens[i]
+}
+
+func (c *CommonTokenStream) LT(k int) Token {
+ c.lazyInit()
+
+ if k == 0 {
+ return nil
+ }
+
+ if k < 0 {
+ return c.LB(-k)
+ }
+
+ i := c.index
+ n := 1 // We know tokens[n] is valid
+
+ // Find k good tokens
+ for n < k {
+ // Skip off-channel tokens, but make sure to not look past EOF
+ if c.Sync(i + 1) {
+ i = c.NextTokenOnChannel(i+1, c.channel)
+ }
+
+ n++
+ }
+
+ return c.tokens[i]
+}
+
+// getNumberOfOnChannelTokens counts EOF once.
+func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
+ var n int
+
+ c.Fill()
+
+ for i := 0; i < len(c.tokens); i++ {
+ t := c.tokens[i]
+
+ if t.GetChannel() == c.channel {
+ n++
+ }
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+ }
+
+ return n
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
new file mode 100644
index 00000000000..d6079aa203b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "sort"
+ "sync"
+)
+
+type DFA struct {
+ // atnStartState is the ATN state in which this was created
+ atnStartState DecisionState
+
+ decision int
+
+ // states is all the DFA states. Use Map to get the old state back; Set can only
+ // indicate whether it is there.
+ states map[int]*DFAState
+ statesMu sync.RWMutex
+
+ s0 *DFAState
+ s0Mu sync.RWMutex
+
+ // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
+ // True if the DFA is for a precedence decision and false otherwise.
+ precedenceDfa bool
+}
+
+func NewDFA(atnStartState DecisionState, decision int) *DFA {
+ return &DFA{
+ atnStartState: atnStartState,
+ decision: decision,
+ states: make(map[int]*DFAState),
+ }
+}
+
+// getPrecedenceStartState gets the start state for the current precedence and
+// returns the start state corresponding to the specified precedence if a start
+// state exists for the specified precedence and nil otherwise. d must be a
+// precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
+ if !d.precedenceDfa {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ d.s0Mu.RLock()
+ defer d.s0Mu.RUnlock()
+
+ // s0.edges is never nil for a precedence DFA
+ if precedence < 0 || precedence >= len(d.s0.edges) {
+ return nil
+ }
+
+ return d.s0.edges[precedence]
+}
+
+// setPrecedenceStartState sets the start state for the current precedence. d
+// must be a precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
+ if !d.precedenceDfa {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ if precedence < 0 {
+ return
+ }
+
+ d.s0Mu.Lock()
+ defer d.s0Mu.Unlock()
+
+ // Synchronization on s0 here is ok. When the DFA is turned into a
+ // precedence DFA, s0 will be initialized once and not updated again. s0.edges
+ // is never nil for a precedence DFA.
+ if precedence >= len(d.s0.edges) {
+ d.s0.edges = append(d.s0.edges, make([]*DFAState, precedence+1-len(d.s0.edges))...)
+ }
+
+ d.s0.edges[precedence] = startState
+}
+
+// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
+// from the current DFA configuration, then d.states is cleared, the initial
+// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
+// store the start states for individual precedence values if precedenceDfa is
+// true or nil otherwise, and d.precedenceDfa is updated.
+func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
+ if d.precedenceDfa != precedenceDfa {
+ d.states = make(map[int]*DFAState)
+
+ if precedenceDfa {
+ precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
+
+ precedenceState.edges = make([]*DFAState, 0)
+ precedenceState.isAcceptState = false
+ precedenceState.requiresFullContext = false
+ d.s0 = precedenceState
+ } else {
+ d.s0 = nil
+ }
+
+ d.precedenceDfa = precedenceDfa
+ }
+}
+
+func (d *DFA) getS0() *DFAState {
+ d.s0Mu.RLock()
+ defer d.s0Mu.RUnlock()
+ return d.s0
+}
+
+func (d *DFA) setS0(s *DFAState) {
+ d.s0Mu.Lock()
+ defer d.s0Mu.Unlock()
+ d.s0 = s
+}
+
+func (d *DFA) getState(hash int) (*DFAState, bool) {
+ d.statesMu.RLock()
+ defer d.statesMu.RUnlock()
+ s, ok := d.states[hash]
+ return s, ok
+}
+
+func (d *DFA) setState(hash int, state *DFAState) {
+ d.statesMu.Lock()
+ defer d.statesMu.Unlock()
+ d.states[hash] = state
+}
+
+func (d *DFA) numStates() int {
+ d.statesMu.RLock()
+ defer d.statesMu.RUnlock()
+ return len(d.states)
+}
+
+type dfaStateList []*DFAState
+
+func (d dfaStateList) Len() int { return len(d) }
+func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
+func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
+
+// sortedStates returns the states in d sorted by their state number.
+func (d *DFA) sortedStates() []*DFAState {
+ vs := make([]*DFAState, 0, len(d.states))
+
+ for _, v := range d.states {
+ vs = append(vs, v)
+ }
+
+ sort.Sort(dfaStateList(vs))
+
+ return vs
+}
+
+func (d *DFA) String(literalNames []string, symbolicNames []string) string {
+ if d.s0 == nil {
+ return ""
+ }
+
+ return NewDFASerializer(d, literalNames, symbolicNames).String()
+}
+
+func (d *DFA) ToLexerString() string {
+ if d.s0 == nil {
+ return ""
+ }
+
+ return NewLexerDFASerializer(d).String()
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
new file mode 100644
index 00000000000..4c0f690229a
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// DFASerializer is a DFA walker that knows how to dump them to serialized
+// strings.
+type DFASerializer struct {
+ dfa *DFA
+ literalNames []string
+ symbolicNames []string
+}
+
+func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
+ if literalNames == nil {
+ literalNames = make([]string, 0)
+ }
+
+ if symbolicNames == nil {
+ symbolicNames = make([]string, 0)
+ }
+
+ return &DFASerializer{
+ dfa: dfa,
+ literalNames: literalNames,
+ symbolicNames: symbolicNames,
+ }
+}
+
+func (d *DFASerializer) String() string {
+ if d.dfa.s0 == nil {
+ return ""
+ }
+
+ buf := ""
+ states := d.dfa.sortedStates()
+
+ for _, s := range states {
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += d.GetStateString(s)
+ buf += "-"
+ buf += d.getEdgeLabel(j)
+ buf += "->"
+ buf += d.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
+
+func (d *DFASerializer) getEdgeLabel(i int) string {
+ if i == 0 {
+ return "EOF"
+ } else if d.literalNames != nil && i-1 < len(d.literalNames) {
+ return d.literalNames[i-1]
+ } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
+ return d.symbolicNames[i-1]
+ }
+
+ return strconv.Itoa(i - 1)
+}
+
+func (d *DFASerializer) GetStateString(s *DFAState) string {
+ var a, b string
+
+ if s.isAcceptState {
+ a = ":"
+ }
+
+ if s.requiresFullContext {
+ b = "^"
+ }
+
+ baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
+
+ if s.isAcceptState {
+ if s.predicates != nil {
+ return baseStateStr + "=>" + fmt.Sprint(s.predicates)
+ }
+
+ return baseStateStr + "=>" + fmt.Sprint(s.prediction)
+ }
+
+ return baseStateStr
+}
+
+type LexerDFASerializer struct {
+ *DFASerializer
+}
+
+func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
+ return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
+}
+
+func (l *LexerDFASerializer) getEdgeLabel(i int) string {
+ return "'" + string(i) + "'"
+}
+
+func (l *LexerDFASerializer) String() string {
+ if l.dfa.s0 == nil {
+ return ""
+ }
+
+ buf := ""
+ states := l.dfa.sortedStates()
+
+ for i := 0; i < len(states); i++ {
+ s := states[i]
+
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += l.GetStateString(s)
+ buf += "-"
+ buf += l.getEdgeLabel(j)
+ buf += "->"
+ buf += l.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
new file mode 100644
index 00000000000..38e918ad91e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
@@ -0,0 +1,166 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// PredPrediction maps a predicate to a predicted alternative.
+type PredPrediction struct {
+ alt int
+ pred SemanticContext
+}
+
+func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
+ return &PredPrediction{alt: alt, pred: pred}
+}
+
+func (p *PredPrediction) String() string {
+ return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
+}
+
+// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
+// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
+// states the ATN can be in after reading each input symbol. That is to say,
+// after reading input a1a2..an, the DFA is in a state that represents the
+// subset T of the states of the ATN that are reachable from the ATN's start
+// state along some path labeled a1a2..an." In conventional NFA-to-DFA
+// conversion, therefore, the subset T would be a bitset representing the set of
+// states the ATN could be in. We need to track the alt predicted by each state
+// as well, however. More importantly, we need to maintain a stack of states,
+// tracking the closure operations as they jump from rule to rule, emulating
+// rule invocations (method calls). I have to add a stack to simulate the proper
+// lookahead sequences for the underlying LL grammar from which the ATN was
+// derived.
+//
+// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
+// state (ala normal conversion) and a RuleContext describing the chain of rules
+// (if any) followed to arrive at that state.
+//
+// A DFAState may have multiple references to a particular state, but with
+// different ATN contexts (with same or different alts) meaning that state was
+// reached via a different set of rule invocations.
+type DFAState struct {
+ stateNumber int
+ configs ATNConfigSet
+
+ // edges elements point to the target of the symbol. Shift up by 1 so (-1)
+ // Token.EOF maps to the first element.
+ edges []*DFAState
+
+ isAcceptState bool
+
+ // prediction is the ttype we match or alt we predict if the state is accept.
+ // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
+ // requiresFullContext.
+ prediction int
+
+ lexerActionExecutor *LexerActionExecutor
+
+ // requiresFullContext indicates it was created during an SLL prediction that
+ // discovered a conflict between the configurations in the state. Future
+ // ParserATNSimulator.execATN invocations immediately jump doing
+ // full context prediction if true.
+ requiresFullContext bool
+
+ // predicates is the predicates associated with the ATN configurations of the
+ // DFA state during SLL parsing. When we have predicates, requiresFullContext
+ // is false, since full context prediction evaluates predicates on-the-fly. If
+ // d is
+ // not nil, then prediction is ATN.INVALID_ALT_NUMBER.
+ //
+ // We only use these for non-requiresFullContext but conflicting states. That
+ // means we know from the context (it's $ or we don't dip into outer context)
+ // that it's an ambiguity not a conflict.
+ //
+ // This list is computed by
+ // ParserATNSimulator.predicateDFAState.
+ predicates []*PredPrediction
+}
+
+func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
+ if configs == nil {
+ configs = NewBaseATNConfigSet(false)
+ }
+
+ return &DFAState{configs: configs, stateNumber: stateNumber}
+}
+
+// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
+func (d *DFAState) GetAltSet() *Set {
+ alts := NewSet(nil, nil)
+
+ if d.configs != nil {
+ for _, c := range d.configs.GetItems() {
+ alts.add(c.GetAlt())
+ }
+ }
+
+ if alts.length() == 0 {
+ return nil
+ }
+
+ return alts
+}
+
+func (d *DFAState) setPrediction(v int) {
+ d.prediction = v
+}
+
+// equals returns whether d equals other. Two DFAStates are equal if their ATN
+// configuration sets are the same. This method is used to see if a state
+// already exists.
+//
+// Because the number of alternatives and number of ATN configurations are
+// finite, there is a finite number of DFA states that can be processed. This is
+// necessary to show that the algorithm terminates.
+//
+// Cannot test the DFA state numbers here because in
+// ParserATNSimulator.addDFAState we need to know if any other state exists that
+// has d exact set of ATN configurations. The stateNumber is irrelevant.
+func (d *DFAState) equals(other interface{}) bool {
+ if d == other {
+ return true
+ } else if _, ok := other.(*DFAState); !ok {
+ return false
+ }
+
+ return d.configs.Equals(other.(*DFAState).configs)
+}
+
+func (d *DFAState) String() string {
+ var s string
+ if d.isAcceptState {
+ if d.predicates != nil {
+ s = "=>" + fmt.Sprint(d.predicates)
+ } else {
+ s = "=>" + fmt.Sprint(d.prediction)
+ }
+ }
+
+ return fmt.Sprintf("%d:%s%s", fmt.Sprint(d.configs), s)
+}
+
+func (d *DFAState) hash() int {
+ h := murmurInit(11)
+
+ c := 1
+ if d.isAcceptState {
+ if d.predicates != nil {
+ for _, p := range d.predicates {
+ h = murmurUpdate(h, p.alt)
+ h = murmurUpdate(h, p.pred.hash())
+ c += 2
+ }
+ } else {
+ h = murmurUpdate(h, d.prediction)
+ c += 1
+ }
+ }
+
+ h = murmurUpdate(h, d.configs.hash())
+ return murmurFinish(h, c)
+}
\ No newline at end of file
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
new file mode 100644
index 00000000000..1fec43d9dca
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+//
+// This implementation of {@link ANTLRErrorListener} can be used to identify
+// certain potential correctness and performance problems in grammars. "reports"
+// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
+// message.
+//
+//
+// Ambiguities : These are cases where more than one path through the
+// grammar can Match the input.
+// Weak context sensitivity : These are cases where full-context
+// prediction resolved an SLL conflict to a unique alternative which equaled the
+// minimum alternative of the SLL conflict.
+// Strong (forced) context sensitivity : These are cases where the
+// full-context prediction resolved an SLL conflict to a unique alternative,
+// and the minimum alternative of the SLL conflict was found to not be
+// a truly viable alternative. Two-stage parsing cannot be used for inputs where
+// d situation occurs.
+//
+
+type DiagnosticErrorListener struct {
+ *DefaultErrorListener
+
+ exactOnly bool
+}
+
+func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
+
+ n := new(DiagnosticErrorListener)
+
+ // whether all ambiguities or only exact ambiguities are Reported.
+ n.exactOnly = exactOnly
+ return n
+}
+
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if d.exactOnly && !exact {
+ return
+ }
+ msg := "reportAmbiguity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ": ambigAlts=" +
+ d.getConflictingAlts(ambigAlts, configs).String() +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+
+ msg := "reportAttemptingFullContext d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+ msg := "reportContextSensitivity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
+ decision := dfa.decision
+ ruleIndex := dfa.atnStartState.GetRuleIndex()
+
+ ruleNames := recognizer.GetRuleNames()
+ if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
+ return strconv.Itoa(decision)
+ }
+ ruleName := ruleNames[ruleIndex]
+ if ruleName == "" {
+ return strconv.Itoa(decision)
+ }
+ return strconv.Itoa(decision) + " (" + ruleName + ")"
+}
+
+//
+// Computes the set of conflicting or ambiguous alternatives from a
+// configuration set, if that information was not already provided by the
+// parser.
+//
+// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
+// Reported by the parser.
+// @param configs The conflicting or ambiguous configuration set.
+// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
+// returns the set of alternatives represented in {@code configs}.
+//
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
+ if ReportedAlts != nil {
+ return ReportedAlts
+ }
+ result := NewBitSet()
+ for _, c := range set.GetItems() {
+ result.add(c.GetAlt())
+ }
+
+ return result
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
new file mode 100644
index 00000000000..028e1a9d7f0
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Provides an empty default implementation of {@link ANTLRErrorListener}. The
+// default implementation of each method does nothing, but can be overridden as
+// necessary.
+
+type ErrorListener interface {
+ SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
+}
+
+type DefaultErrorListener struct {
+}
+
+func NewDefaultErrorListener() *DefaultErrorListener {
+ return new(DefaultErrorListener)
+}
+
+func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+}
+
+func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+}
+
+type ConsoleErrorListener struct {
+ *DefaultErrorListener
+}
+
+func NewConsoleErrorListener() *ConsoleErrorListener {
+ return new(ConsoleErrorListener)
+}
+
+//
+// Provides a default instance of {@link ConsoleErrorListener}.
+//
+var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
+
+//
+// {@inheritDoc}
+//
+//
+// This implementation prints messages to {@link System//err} containing the
+// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
+// the following format.
+//
+//
+// line line :charPositionInLine msg
+//
+//
+func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+}
+
+type ProxyErrorListener struct {
+ *DefaultErrorListener
+ delegates []ErrorListener
+}
+
+func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
+ if delegates == nil {
+ panic("delegates is not provided")
+ }
+ l := new(ProxyErrorListener)
+ l.delegates = delegates
+ return l
+}
+
+func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ for _, d := range p.delegates {
+ d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
new file mode 100644
index 00000000000..977a6e45496
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
@@ -0,0 +1,758 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type ErrorStrategy interface {
+ reset(Parser)
+ RecoverInline(Parser) Token
+ Recover(Parser, RecognitionException)
+ Sync(Parser)
+ inErrorRecoveryMode(Parser) bool
+ ReportError(Parser, RecognitionException)
+ ReportMatch(Parser)
+}
+
+// This is the default implementation of {@link ANTLRErrorStrategy} used for
+// error Reporting and recovery in ANTLR parsers.
+//
+type DefaultErrorStrategy struct {
+ errorRecoveryMode bool
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+
+ d := new(DefaultErrorStrategy)
+
+ // Indicates whether the error strategy is currently "recovering from an
+ // error". This is used to suppress Reporting multiple error messages while
+ // attempting to recover from a detected syntax error.
+ //
+ // @see //inErrorRecoveryMode
+ //
+ d.errorRecoveryMode = false
+
+ // The index into the input stream where the last error occurred.
+ // This is used to prevent infinite loops where an error is found
+ // but no token is consumed during recovery...another error is found,
+ // ad nauseum. This is a failsafe mechanism to guarantee that at least
+ // one token/tree node is consumed for two errors.
+ //
+ d.lastErrorIndex = -1
+ d.lastErrorStates = nil
+ return d
+}
+
+// The default implementation simply calls {@link //endErrorCondition} to
+// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+//
+// This method is called to enter error recovery mode when a recognition
+// exception is Reported.
+//
+// @param recognizer the parser instance
+//
+func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
+ d.errorRecoveryMode = true
+}
+
+func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool {
+ return d.errorRecoveryMode
+}
+
+//
+// This method is called to leave error recovery mode after recovering from
+// a recognition exception.
+//
+// @param recognizer
+//
+func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
+ d.errorRecoveryMode = false
+ d.lastErrorStates = nil
+ d.lastErrorIndex = -1
+}
+
+//
+// {@inheritDoc}
+//
+// The default implementation simply calls {@link //endErrorCondition}.
+//
+func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+//
+// {@inheritDoc}
+//
+// The default implementation returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
+// and dispatches the Reporting task based on the runtime type of {@code e}
+// according to the following table.
+//
+//
+// {@link NoViableAltException}: Dispatches the call to
+// {@link //ReportNoViableAlternative}
+// {@link InputMisMatchException}: Dispatches the call to
+// {@link //ReportInputMisMatch}
+// {@link FailedPredicateException}: Dispatches the call to
+// {@link //ReportFailedPredicate}
+// All other types: calls {@link Parser//NotifyErrorListeners} to Report
+// the exception
+//
+//
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
+ // if we've already Reported an error and have not Matched a token
+ // yet successfully, don't Report any errors.
+ if d.inErrorRecoveryMode(recognizer) {
+ return // don't Report spurious errors
+ }
+ d.beginErrorCondition(recognizer)
+
+ switch t := e.(type) {
+ default:
+ fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
+ // fmt.Println(e.stack)
+ recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
+ case *NoViableAltException:
+ d.ReportNoViableAlternative(recognizer, t)
+ case *InputMisMatchException:
+ d.ReportInputMisMatch(recognizer, t)
+ case *FailedPredicateException:
+ d.ReportFailedPredicate(recognizer, t)
+ }
+}
+
+// {@inheritDoc}
+//
+// The default implementation reSynchronizes the parser by consuming tokens
+// until we find one in the reSynchronization set--loosely the set of tokens
+// that can follow the current rule.
+//
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+
+ if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
+ d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
+ // uh oh, another error at same token index and previously-Visited
+ // state in ATN must be a case where LT(1) is in the recovery
+ // token set so nothing got consumed. Consume a single token
+ // at least to prevent an infinite loop d is a failsafe.
+ recognizer.Consume()
+ }
+ d.lastErrorIndex = recognizer.GetInputStream().Index()
+ if d.lastErrorStates == nil {
+ d.lastErrorStates = NewIntervalSet()
+ }
+ d.lastErrorStates.addOne(recognizer.GetState())
+ followSet := d.getErrorRecoverySet(recognizer)
+ d.consumeUntil(recognizer, followSet)
+}
+
+// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
+// that the current lookahead symbol is consistent with what were expecting
+// at d point in the ATN. You can call d anytime but ANTLR only
+// generates code to check before subrules/loops and each iteration.
+//
+// Implements Jim Idle's magic Sync mechanism in closures and optional
+// subrules. E.g.,
+//
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
+//
+// At the start of a sub rule upon error, {@link //Sync} performs single
+// token deletion, if possible. If it can't do that, it bails on the current
+// rule and uses the default error recovery, which consumes until the
+// reSynchronization set of the current rule.
+//
+// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
+// with an empty alternative), then the expected set includes what follows
+// the subrule.
+//
+// During loop iteration, it consumes until it sees a token that can start a
+// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
+//
+// ORIGINS
+//
+// Previous versions of ANTLR did a poor job of their recovery within loops.
+// A single mismatch token or missing token would force the parser to bail
+// out of the entire rules surrounding the loop. So, for rule
+//
+//
+// classfunc : 'class' ID '{' member* '}'
+//
+//
+// input with an extra token between members would force the parser to
+// consume until it found the next class definition rather than the next
+// member definition of the current class.
+//
+// This functionality cost a little bit of effort because the parser has to
+// compare token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off d
+// functionality by simply overriding d method as a blank { }.
+//
+func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
+ // If already recovering, don't try to Sync
+ if d.inErrorRecoveryMode(recognizer) {
+ return
+ }
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ la := recognizer.GetTokenStream().LA(1)
+
+ // try cheaper subset first might get lucky. seems to shave a wee bit off
+ nextTokens := recognizer.GetATN().NextTokens(s, nil)
+ if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
+ return
+ }
+
+ switch s.GetStateType() {
+ case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
+ // Report error and recover if possible
+ if d.SingleTokenDeletion(recognizer) != nil {
+ return
+ }
+ panic(NewInputMisMatchException(recognizer))
+ case ATNStatePlusLoopBack, ATNStateStarLoopBack:
+ d.ReportUnwantedToken(recognizer)
+ expecting := NewIntervalSet()
+ expecting.addSet(recognizer.GetExpectedTokens())
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
+ d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
+ default:
+ // do nothing if we can't identify the exact kind of ATN state
+ }
+}
+
+// This is called by {@link //ReportError} when the exception is a
+// {@link NoViableAltException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+//
+func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
+ tokens := recognizer.GetTokenStream()
+ var input string
+ if tokens != nil {
+ if e.startToken.GetTokenType() == TokenEOF {
+ input = ""
+ } else {
+ input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
+ }
+ } else {
+ input = ""
+ }
+ msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+//
+// This is called by {@link //ReportError} when the exception is an
+// {@link InputMisMatchException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+//
+func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
+ " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+//
+// This is called by {@link //ReportError} when the exception is a
+// {@link FailedPredicateException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+//
+func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
+ ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
+ msg := "rule " + ruleName + " " + e.message
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// This method is called to Report a syntax error which requires the removal
+// of a token from the input stream. At the time d method is called, the
+// erroneous symbol is current {@code LT(1)} symbol and has not yet been
+// removed from the input stream. When d method returns,
+// {@code recognizer} is in error recovery mode.
+//
+// This method is called when {@link //singleTokenDeletion} identifies
+// single-token deletion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
+//
+func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
+ if d.inErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ tokenName := d.GetTokenErrorDisplay(t)
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "extraneous input " + tokenName + " expecting " +
+ expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// This method is called to Report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time d
+// method is called, the missing token has not yet been inserted. When d
+// method returns, {@code recognizer} is in error recovery mode.
+//
+// This method is called when {@link //singleTokenInsertion} identifies
+// single-token insertion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
+//
+func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
+ if d.inErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
+ " at " + d.GetTokenErrorDisplay(t)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// The default implementation attempts to recover from the mismatched input
+// by using single token insertion and deletion as described below. If the
+// recovery attempt fails, d method panics an
+// {@link InputMisMatchException}.
+//
+// EXTRA TOKEN (single token deletion)
+//
+// {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
+// right token, however, then assume {@code LA(1)} is some extra spurious
+// token and delete it. Then consume and return the next token (which was
+// the {@code LA(2)} token) as the successful result of the Match operation.
+//
+// This recovery strategy is implemented by {@link
+// //singleTokenDeletion}.
+//
+// MISSING TOKEN (single token insertion)
+//
+// If current token (at {@code LA(1)}) is consistent with what could come
+// after the expected {@code LA(1)} token, then assume the token is missing
+// and use the parser's {@link TokenFactory} to create it on the fly. The
+// "insertion" is performed by returning the created token as the successful
+// result of the Match operation.
+//
+// This recovery strategy is implemented by {@link
+// //singleTokenInsertion}.
+//
+// EXAMPLE
+//
+// For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
+// the parser returns from the nested call to {@code expr}, it will have
+// call chain:
+//
+//
+// stat &rarr expr &rarr atom
+//
+//
+// and it will be trying to Match the {@code ')'} at d point in the
+// derivation:
+//
+//
+// => ID '=' '(' INT ')' ('+' atom)* ''
+// ^
+//
+//
+// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
+// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
+// is in the set of tokens that can follow the {@code ')'} token reference
+// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
+//
+func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
+ // SINGLE TOKEN DELETION
+ MatchedSymbol := d.SingleTokenDeletion(recognizer)
+ if MatchedSymbol != nil {
+ // we have deleted the extra token.
+ // now, move past ttype token as if all were ok
+ recognizer.Consume()
+ return MatchedSymbol
+ }
+ // SINGLE TOKEN INSERTION
+ if d.SingleTokenInsertion(recognizer) {
+ return d.GetMissingSymbol(recognizer)
+ }
+ // even that didn't work must panic the exception
+ panic(NewInputMisMatchException(recognizer))
+}
+
+//
+// This method implements the single-token insertion inline error recovery
+// strategy. It is called by {@link //recoverInline} if the single-token
+// deletion strategy fails to recover from the mismatched input. If this
+// method returns {@code true}, {@code recognizer} will be in error recovery
+// mode.
+//
+// This method determines whether or not single-token insertion is viable by
+// checking if the {@code LA(1)} input symbol could be successfully Matched
+// if it were instead the {@code LA(2)} symbol. If d method returns
+// {@code true}, the caller is responsible for creating and inserting a
+// token with the correct type to produce d behavior.
+//
+// @param recognizer the parser instance
+// @return {@code true} if single-token insertion is a viable recovery
+// strategy for the current mismatched input, otherwise {@code false}
+//
+func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
+ currentSymbolType := recognizer.GetTokenStream().LA(1)
+ // if current token is consistent with what could come after current
+ // ATN state, then we know we're missing a token error recovery
+ // is free to conjure up and insert the missing token
+ atn := recognizer.GetInterpreter().atn
+ currentState := atn.states[recognizer.GetState()]
+ next := currentState.GetTransitions()[0].getTarget()
+ expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
+ if expectingAtLL2.contains(currentSymbolType) {
+ d.ReportMissingToken(recognizer)
+ return true
+ }
+
+ return false
+}
+
+// This method implements the single-token deletion inline error recovery
+// strategy. It is called by {@link //recoverInline} to attempt to recover
+// from mismatched input. If this method returns nil, the parser and error
+// handler state will not have changed. If this method returns non-nil,
+// {@code recognizer} will not be in error recovery mode since the
+// returned token was a successful Match.
+//
+// If the single-token deletion is successful, d method calls
+// {@link //ReportUnwantedToken} to Report the error, followed by
+// {@link Parser//consume} to actually "delete" the extraneous token. Then,
+// before returning {@link //ReportMatch} is called to signal a successful
+// Match.
+//
+// @param recognizer the parser instance
+// @return the successfully Matched {@link Token} instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise
+// {@code nil}
+//
+func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
+ NextTokenType := recognizer.GetTokenStream().LA(2)
+ expecting := d.GetExpectedTokens(recognizer)
+ if expecting.contains(NextTokenType) {
+ d.ReportUnwantedToken(recognizer)
+ // print("recoverFromMisMatchedToken deleting " \
+ // + str(recognizer.GetTokenStream().LT(1)) \
+ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
+ // + " is what we want", file=sys.stderr)
+ recognizer.Consume() // simply delete extra token
+ // we want to return the token we're actually Matching
+ MatchedSymbol := recognizer.GetCurrentToken()
+ d.ReportMatch(recognizer) // we know current token is correct
+ return MatchedSymbol
+ }
+
+ return nil
+}
+
+// Conjure up a missing token during error recovery.
+//
+// The recognizer attempts to recover from single missing
+// symbols. But, actions might refer to that missing symbol.
+// For example, x=ID {f($x)}. The action clearly assumes
+// that there has been an identifier Matched previously and that
+// $x points at that token. If that token is missing, but
+// the next token in the stream is what we want we assume that
+// d token is missing and we keep going. Because we
+// have to return some token to replace the missing token,
+// we have to conjure one up. This method gives the user control
+// over the tokens returned for missing tokens. Mostly,
+// you will want to create something special for identifier
+// tokens. For literals such as '{' and ',', the default
+// action in the parser or tree parser works. It simply creates
+// a CommonToken of the appropriate type. The text will be the token.
+// If you change what tokens must be created by the lexer,
+// override d method to create the appropriate tokens.
+//
+func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
+ currentSymbol := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ expectedTokenType := expecting.first()
+ var tokenText string
+
+ if expectedTokenType == TokenEOF {
+ tokenText = ""
+ } else {
+ ln := recognizer.GetLiteralNames()
+ if expectedTokenType > 0 && expectedTokenType < len(ln) {
+ tokenText = ""
+ } else {
+ tokenText = "" // TODO matches the JS impl
+ }
+ }
+ current := currentSymbol
+ lookback := recognizer.GetTokenStream().LT(-1)
+ if current.GetTokenType() == TokenEOF && lookback != nil {
+ current = lookback
+ }
+
+ tf := recognizer.GetTokenFactory()
+
+ return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
+}
+
+func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
+ return recognizer.GetExpectedTokens()
+}
+
+// How should a token be displayed in an error message? The default
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ return d.escapeWSAndQuote(s)
+}
+
+func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ return "'" + s + "'"
+}
+
+// Compute the error recovery set for the current rule. During
+// rule invocation, the parser pushes the set of tokens that can
+// follow that rule reference on the stack d amounts to
+// computing FIRST of what follows the rule reference in the
+// enclosing rule. See LinearApproximator.FIRST().
+// This local follow set only includes tokens
+// from within the rule i.e., the FIRST computation done by
+// ANTLR stops at the end of a rule.
+//
+// EXAMPLE
+//
+// When you find a "no viable alt exception", the input is not
+// consistent with any of the alternatives for rule r. The best
+// thing to do is to consume tokens until you see something that
+// can legally follow a call to r//or* any rule that called r.
+// You don't want the exact set of viable next tokens because the
+// input might just be missing a token--you might consume the
+// rest of the input looking for one of the missing tokens.
+//
+// Consider grammar:
+//
+// a : '[' b ']'
+// | '(' b ')'
+//
+// b : c '^' INT
+// c : ID
+// | INT
+//
+//
+// At each rule invocation, the set of tokens that could follow
+// that rule is pushed on a stack. Here are the various
+// context-sensitive follow sets:
+//
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
+//
+// Upon erroneous input "[]", the call chain is
+//
+// a -> b -> c
+//
+// and, hence, the follow context stack is:
+//
+// depth follow set start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
+//
+// Notice that ')' is not included, because b would have to have
+// been called from a different context in rule a for ')' to be
+// included.
+//
+// For error recovery, we cannot consider FOLLOW(c)
+// (context-sensitive or otherwise). We need the combined set of
+// all context-sensitive FOLLOW sets--the set of all tokens that
+// could follow any reference in the call chain. We need to
+// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
+// we reSync'd to that token, we'd consume until EOF. We need to
+// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+// In this case, for input "[]", LA(1) is ']' and in the set, so we would
+// not consume anything. After printing an error, rule c would
+// return normally. Rule b would not find the required '^' though.
+// At this point, it gets a mismatched token error and panics an
+// exception (since LA(1) is not in the viable following token
+// set). The rule exception handler tries to recover, but finds
+// the same recovery set and doesn't consume anything. Rule b
+// exits normally returning to rule a. Now it finds the ']' (and
+// with the successful Match exits errorRecovery mode).
+//
+// So, you can see that the parser walks up the call chain looking
+// for the token that was a member of the recovery set.
+//
+// Errors are not generated in errorRecovery mode.
+//
+// ANTLR's error recovery mechanism is based upon original ideas:
+//
+// "Algorithms + Data Structures = Programs" by Niklaus Wirth
+//
+// and
+//
+// "A note on error recovery in recursive descent parsers":
+// http://portal.acm.org/citation.cfm?id=947902.947905
+//
+// Later, Josef Grosch had some good ideas:
+//
+// "Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers":
+// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+//
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined
+// at run-time upon error to avoid overhead during parsing.
+//
+func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
+ atn := recognizer.GetInterpreter().atn
+ ctx := recognizer.GetParserRuleContext()
+ recoverSet := NewIntervalSet()
+ for ctx != nil && ctx.GetInvokingState() >= 0 {
+ // compute what follows who invoked us
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ recoverSet.addSet(follow)
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ recoverSet.removeOne(TokenEpsilon)
+ return recoverSet
+}
+
+// Consume tokens until one Matches the given token set.//
+func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
+ ttype := recognizer.GetTokenStream().LA(1)
+ for ttype != TokenEOF && !set.contains(ttype) {
+ recognizer.Consume()
+ ttype = recognizer.GetTokenStream().LA(1)
+ }
+}
+
+//
+// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
+// by immediately canceling the parse operation with a
+// {@link ParseCancellationException}. The implementation ensures that the
+// {@link ParserRuleContext//exception} field is set for all parse tree nodes
+// that were not completed prior to encountering the error.
+//
+//
+// This error strategy is useful in the following scenarios.
+//
+//
+// Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of {@link BailErrorStrategy//Sync} improves the performance of
+// the first stage.
+// Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
+//
+//
+// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
+//
+// @see Parser//setErrorHandler(ANTLRErrorStrategy)
+
+type BailErrorStrategy struct {
+ *DefaultErrorStrategy
+}
+
+var _ ErrorStrategy = &BailErrorStrategy{}
+
+func NewBailErrorStrategy() *BailErrorStrategy {
+
+ b := new(BailErrorStrategy)
+
+ b.DefaultErrorStrategy = NewDefaultErrorStrategy()
+
+ return b
+}
+
+// Instead of recovering from exception {@code e}, re-panic it wrapped
+// in a {@link ParseCancellationException} so it is not caught by the
+// rule func catches. Use {@link Exception//getCause()} to get the
+// original {@link RecognitionException}.
+//
+func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+ context := recognizer.GetParserRuleContext()
+ for context != nil {
+ context.SetException(e)
+ context = context.GetParent().(ParserRuleContext)
+ }
+ panic(NewParseCancellationException()) // TODO we don't emit e properly
+}
+
+// Make sure we don't attempt to recover inline if the parser
+// successfully recovers, it won't panic an exception.
+//
+func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
+ b.Recover(recognizer, NewInputMisMatchException(recognizer))
+
+ return nil
+}
+
+// Make sure we don't attempt to recover from problems in subrules.//
+func (b *BailErrorStrategy) Sync(recognizer Parser) {
+ // pass
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
new file mode 100644
index 00000000000..2ef74926ecb
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
@@ -0,0 +1,241 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+// 3 kinds of errors: prediction errors, failed predicate errors, and
+// mismatched input errors. In each case, the parser knows where it is
+// in the input, where it is in the ATN, the rule invocation stack,
+// and what kind of problem occurred.
+
+type RecognitionException interface {
+ GetOffendingToken() Token
+ GetMessage() string
+ GetInputStream() IntStream
+}
+
+type BaseRecognitionException struct {
+ message string
+ recognizer Recognizer
+ offendingToken Token
+ offendingState int
+ ctx RuleContext
+ input IntStream
+}
+
+func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
+
+ // todo
+ // Error.call(this)
+ //
+ // if (!!Error.captureStackTrace) {
+ // Error.captureStackTrace(this, RecognitionException)
+ // } else {
+ // stack := NewError().stack
+ // }
+ // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
+
+ t := new(BaseRecognitionException)
+
+ t.message = message
+ t.recognizer = recognizer
+ t.input = input
+ t.ctx = ctx
+ // The current {@link Token} when an error occurred. Since not all streams
+ // support accessing symbols by index, we have to track the {@link Token}
+ // instance itself.
+ t.offendingToken = nil
+ // Get the ATN state number the parser was in at the time the error
+ // occurred. For {@link NoViableAltException} and
+ // {@link LexerNoViableAltException} exceptions, this is the
+ // {@link DecisionState} number. For others, it is the state whose outgoing
+ // edge we couldn't Match.
+ t.offendingState = -1
+ if t.recognizer != nil {
+ t.offendingState = t.recognizer.GetState()
+ }
+
+ return t
+}
+
+func (b *BaseRecognitionException) GetMessage() string {
+ return b.message
+}
+
+func (b *BaseRecognitionException) GetOffendingToken() Token {
+ return b.offendingToken
+}
+
+func (b *BaseRecognitionException) GetInputStream() IntStream {
+ return b.input
+}
+
+// If the state number is not known, b method returns -1.
+
+//
+// Gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time b exception was panicn.
+//
+// If the set of expected tokens is not known and could not be computed,
+// b method returns {@code nil}.
+//
+// @return The set of token types that could potentially follow the current
+// state in the ATN, or {@code nil} if the information is not available.
+// /
+func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
+ if b.recognizer != nil {
+ return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
+ }
+
+ return nil
+}
+
+func (b *BaseRecognitionException) String() string {
+ return b.message
+}
+
+type LexerNoViableAltException struct {
+ *BaseRecognitionException
+
+ startIndex int
+ deadEndConfigs ATNConfigSet
+}
+
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
+
+ l := new(LexerNoViableAltException)
+
+ l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
+
+ l.startIndex = startIndex
+ l.deadEndConfigs = deadEndConfigs
+
+ return l
+}
+
+func (l *LexerNoViableAltException) String() string {
+ symbol := ""
+ if l.startIndex >= 0 && l.startIndex < l.input.Size() {
+ symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
+ }
+ return "LexerNoViableAltException" + symbol
+}
+
+type NoViableAltException struct {
+ *BaseRecognitionException
+
+ startToken Token
+ offendingToken Token
+ ctx ParserRuleContext
+ deadEndConfigs ATNConfigSet
+}
+
+// Indicates that the parser could not decide which of two or more paths
+// to take based upon the remaining input. It tracks the starting token
+// of the offending input and also knows where the parser was
+// in the various paths when the error. Reported by ReportNoViableAlternative()
+//
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+
+ if ctx == nil {
+ ctx = recognizer.GetParserRuleContext()
+ }
+
+ if offendingToken == nil {
+ offendingToken = recognizer.GetCurrentToken()
+ }
+
+ if startToken == nil {
+ startToken = recognizer.GetCurrentToken()
+ }
+
+ if input == nil {
+ input = recognizer.GetInputStream().(TokenStream)
+ }
+
+ n := new(NoViableAltException)
+ n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
+
+ // Which configurations did we try at input.Index() that couldn't Match
+ // input.LT(1)?//
+ n.deadEndConfigs = deadEndConfigs
+ // The token object at the start index the input stream might
+ // not be buffering tokens so get a reference to it. (At the
+ // time the error occurred, of course the stream needs to keep a
+ // buffer all of the tokens but later we might not have access to those.)
+ n.startToken = startToken
+ n.offendingToken = offendingToken
+
+ return n
+}
+
+type InputMisMatchException struct {
+ *BaseRecognitionException
+}
+
+// This signifies any kind of mismatched input exceptions such as
+// when the current input does not Match the expected token.
+//
+func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
+
+ i := new(InputMisMatchException)
+ i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ i.offendingToken = recognizer.GetCurrentToken()
+
+ return i
+
+}
+
+// A semantic predicate failed during validation. Validation of predicates
+// occurs when normally parsing the alternative just like Matching a token.
+// Disambiguating predicate evaluation occurs when we test a predicate during
+// prediction.
+
+type FailedPredicateException struct {
+ *BaseRecognitionException
+
+ ruleIndex int
+ predicateIndex int
+ predicate string
+}
+
+func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
+
+ f := new(FailedPredicateException)
+
+ f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ trans := s.GetTransitions()[0]
+ if trans2, ok := trans.(*PredicateTransition); ok {
+ f.ruleIndex = trans2.ruleIndex
+ f.predicateIndex = trans2.predIndex
+ } else {
+ f.ruleIndex = 0
+ f.predicateIndex = 0
+ }
+ f.predicate = predicate
+ f.offendingToken = recognizer.GetCurrentToken()
+
+ return f
+}
+
+func (f *FailedPredicateException) formatMessage(predicate, message string) string {
+ if message != "" {
+ return message
+ }
+
+ return "failed predicate: {" + predicate + "}?"
+}
+
+type ParseCancellationException struct {
+}
+
+func NewParseCancellationException() *ParseCancellationException {
+ // Error.call(this)
+ // Error.captureStackTrace(this, ParseCancellationException)
+ return new(ParseCancellationException)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
new file mode 100644
index 00000000000..842170c086c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "io"
+ "os"
+)
+
+// This is an InputStream that is loaded from a file all at once
+// when you construct the object.
+
+type FileStream struct {
+ *InputStream
+
+ filename string
+}
+
+func NewFileStream(fileName string) (*FileStream, error) {
+
+ buf := bytes.NewBuffer(nil)
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ _, err = io.Copy(buf, f)
+ if err != nil {
+ return nil, err
+ }
+
+ fs := new(FileStream)
+
+ fs.filename = fileName
+ s := string(buf.Bytes())
+
+ fs.InputStream = NewInputStream(s)
+
+ return fs, nil
+
+}
+
+func (f *FileStream) GetSourceName() string {
+ return f.filename
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
new file mode 100644
index 00000000000..5ff270f5368
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+func NewInputStream(data string) *InputStream {
+
+ is := new(InputStream)
+
+ is.name = ""
+ is.index = 0
+ is.data = []rune(data)
+ is.size = len(is.data) // number of runes
+
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// mark/release do nothing we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+func (is *InputStream) Release(marker int) {
+}
+
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i *Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return "Obtained from string"
+}
+
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
new file mode 100644
index 00000000000..438e0ea6e75
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type IntStream interface {
+ Consume()
+ LA(int) int
+ Mark() int
+ Release(marker int)
+ Index() int
+ Seek(index int)
+ Size() int
+ GetSourceName() string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
new file mode 100644
index 00000000000..510d9091141
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
@@ -0,0 +1,296 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type Interval struct {
+ Start int
+ Stop int
+}
+
+/* stop is not included! */
+func NewInterval(start, stop int) *Interval {
+ i := new(Interval)
+
+ i.Start = start
+ i.Stop = stop
+ return i
+}
+
+func (i *Interval) Contains(item int) bool {
+ return item >= i.Start && item < i.Stop
+}
+
+func (i *Interval) String() string {
+ if i.Start == i.Stop-1 {
+ return strconv.Itoa(i.Start)
+ }
+
+ return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
+}
+
+func (i *Interval) length() int {
+ return i.Stop - i.Start
+}
+
+type IntervalSet struct {
+ intervals []*Interval
+ readOnly bool
+}
+
+func NewIntervalSet() *IntervalSet {
+
+ i := new(IntervalSet)
+
+ i.intervals = nil
+ i.readOnly = false
+
+ return i
+}
+
+func (i *IntervalSet) first() int {
+ if len(i.intervals) == 0 {
+ return TokenInvalidType
+ }
+
+ return i.intervals[0].Start
+}
+
+func (i *IntervalSet) addOne(v int) {
+ i.addInterval(NewInterval(v, v+1))
+}
+
+func (i *IntervalSet) addRange(l, h int) {
+ i.addInterval(NewInterval(l, h+1))
+}
+
+func (i *IntervalSet) addInterval(v *Interval) {
+ if i.intervals == nil {
+ i.intervals = make([]*Interval, 0)
+ i.intervals = append(i.intervals, v)
+ } else {
+ // find insert pos
+ for k, interval := range i.intervals {
+ // distinct range -> insert
+ if v.Stop < interval.Start {
+ i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
+ return
+ } else if v.Stop == interval.Start {
+ i.intervals[k].Start = v.Start
+ return
+ } else if v.Start <= interval.Stop {
+ i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
+
+ // if not applying to end, merge potential overlaps
+ if k < len(i.intervals)-1 {
+ l := i.intervals[k]
+ r := i.intervals[k+1]
+ // if r contained in l
+ if l.Stop >= r.Stop {
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ } else if l.Stop >= r.Start { // partial overlap
+ i.intervals[k] = NewInterval(l.Start, r.Stop)
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ }
+ }
+ return
+ }
+ }
+ // greater than any exiting
+ i.intervals = append(i.intervals, v)
+ }
+}
+
+func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
+ if other.intervals != nil {
+ for k := 0; k < len(other.intervals); k++ {
+ i2 := other.intervals[k]
+ i.addInterval(NewInterval(i2.Start, i2.Stop))
+ }
+ }
+ return i
+}
+
+func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
+ result := NewIntervalSet()
+ result.addInterval(NewInterval(start, stop+1))
+ for j := 0; j < len(i.intervals); j++ {
+ result.removeRange(i.intervals[j])
+ }
+ return result
+}
+
+func (i *IntervalSet) contains(item int) bool {
+ if i.intervals == nil {
+ return false
+ }
+ for k := 0; k < len(i.intervals); k++ {
+ if i.intervals[k].Contains(item) {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *IntervalSet) length() int {
+ len := 0
+
+ for _, v := range i.intervals {
+ len += v.length()
+ }
+
+ return len
+}
+
+func (i *IntervalSet) removeRange(v *Interval) {
+ if v.Start == v.Stop-1 {
+ i.removeOne(v.Start)
+ } else if i.intervals != nil {
+ k := 0
+ for n := 0; n < len(i.intervals); n++ {
+ ni := i.intervals[k]
+ // intervals are ordered
+ if v.Stop <= ni.Start {
+ return
+ } else if v.Start > ni.Start && v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ x := NewInterval(v.Stop, ni.Stop)
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ return
+ } else if v.Start <= ni.Start && v.Stop >= ni.Stop {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ k = k - 1 // need another pass
+ } else if v.Start < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ } else if v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(v.Stop, ni.Stop)
+ }
+ k++
+ }
+ }
+}
+
+func (i *IntervalSet) removeOne(v int) {
+ if i.intervals != nil {
+ for k := 0; k < len(i.intervals); k++ {
+ ki := i.intervals[k]
+ // intervals i ordered
+ if v < ki.Start {
+ return
+ } else if v == ki.Start && v == ki.Stop-1 {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ return
+ } else if v == ki.Start {
+ i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
+ return
+ } else if v == ki.Stop-1 {
+ i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
+ return
+ } else if v < ki.Stop-1 {
+ x := NewInterval(ki.Start, v)
+ ki.Start = v + 1
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ return
+ }
+ }
+ }
+}
+
+func (i *IntervalSet) String() string {
+ return i.StringVerbose(nil, nil, false)
+}
+
+func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
+
+ if i.intervals == nil {
+ return "{}"
+ } else if literalNames != nil || symbolicNames != nil {
+ return i.toTokenString(literalNames, symbolicNames)
+ } else if elemsAreChar {
+ return i.toCharString()
+ }
+
+ return i.toIndexString()
+}
+
+func (i *IntervalSet) toCharString() string {
+ names := make([]string, len(i.intervals))
+
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ names = append(names, ("'" + string(v.Start) + "'"))
+ }
+ } else {
+ names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'")
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toIndexString() string {
+
+ names := make([]string, 0)
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ names = append(names, strconv.Itoa(v.Start))
+ }
+ } else {
+ names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
+ names := make([]string, 0)
+ for _, v := range i.intervals {
+ for j := v.Start; j < v.Stop; j++ {
+ names = append(names, i.elementName(literalNames, symbolicNames, j))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
+ if a == TokenEOF {
+ return ""
+ } else if a == TokenEpsilon {
+ return ""
+ } else {
+ if a < len(literalNames) && literalNames[a] != "" {
+ return literalNames[a]
+ }
+
+ return symbolicNames[a]
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
new file mode 100644
index 00000000000..02deaf99cdf
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
@@ -0,0 +1,417 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A lexer is recognizer that draws input symbols from a character stream.
+// lexer grammars result in a subclass of this object. A Lexer object
+// uses simplified Match() and error recovery mechanisms in the interest
+// of speed.
+///
+
+type Lexer interface {
+ TokenSource
+ Recognizer
+
+ Emit() Token
+
+ SetChannel(int)
+ PushMode(int)
+ PopMode() int
+ SetType(int)
+ SetMode(int)
+}
+
+type BaseLexer struct {
+ *BaseRecognizer
+
+ Interpreter ILexerATNSimulator
+ TokenStartCharIndex int
+ TokenStartLine int
+ TokenStartColumn int
+ ActionType int
+ Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
+
+ input CharStream
+ factory TokenFactory
+ tokenFactorySourcePair *TokenSourceCharStreamPair
+ token Token
+ hitEOF bool
+ channel int
+ thetype int
+ modeStack IntStack
+ mode int
+ text string
+}
+
+func NewBaseLexer(input CharStream) *BaseLexer {
+
+ lexer := new(BaseLexer)
+
+ lexer.BaseRecognizer = NewBaseRecognizer()
+
+ lexer.input = input
+ lexer.factory = CommonTokenFactoryDEFAULT
+ lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
+
+ lexer.Virt = lexer
+
+ lexer.Interpreter = nil // child classes must populate it
+
+ // The goal of all lexer rules/methods is to create a token object.
+ // l is an instance variable as multiple rules may collaborate to
+ // create a single token. NextToken will return l object after
+ // Matching lexer rule(s). If you subclass to allow multiple token
+ // emissions, then set l to the last token to be Matched or
+ // something nonnil so that the auto token emit mechanism will not
+ // emit another token.
+ lexer.token = nil
+
+ // What character index in the stream did the current token start at?
+ // Needed, for example, to get the text for current token. Set at
+ // the start of NextToken.
+ lexer.TokenStartCharIndex = -1
+
+ // The line on which the first character of the token resides///
+ lexer.TokenStartLine = -1
+
+ // The character position of first character within the line///
+ lexer.TokenStartColumn = -1
+
+ // Once we see EOF on char stream, next token will be EOF.
+ // If you have DONE : EOF then you see DONE EOF.
+ lexer.hitEOF = false
+
+ // The channel number for the current token///
+ lexer.channel = TokenDefaultChannel
+
+ // The token type for the current token///
+ lexer.thetype = TokenInvalidType
+
+ lexer.modeStack = make([]int, 0)
+ lexer.mode = LexerDefaultMode
+
+ // You can set the text for the current token to override what is in
+ // the input char buffer. Use setText() or can set l instance var.
+ // /
+ lexer.text = ""
+
+ return lexer
+}
+
+const (
+ LexerDefaultMode = 0
+ LexerMore = -2
+ LexerSkip = -3
+)
+
+const (
+ LexerDefaultTokenChannel = TokenDefaultChannel
+ LexerHidden = TokenHiddenChannel
+ LexerMinCharValue = 0x0000
+ LexerMaxCharValue = 0x10FFFF
+)
+
+func (b *BaseLexer) reset() {
+ // wack Lexer state variables
+ if b.input != nil {
+ b.input.Seek(0) // rewind the input
+ }
+ b.token = nil
+ b.thetype = TokenInvalidType
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = -1
+ b.TokenStartColumn = -1
+ b.TokenStartLine = -1
+ b.text = ""
+
+ b.hitEOF = false
+ b.mode = LexerDefaultMode
+ b.modeStack = make([]int, 0)
+
+ b.Interpreter.reset()
+}
+
+func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
+ return b.Interpreter
+}
+
+func (b *BaseLexer) GetInputStream() CharStream {
+ return b.input
+}
+
+func (b *BaseLexer) GetSourceName() string {
+ return b.GrammarFileName
+}
+
+func (b *BaseLexer) SetChannel(v int) {
+ b.channel = v
+}
+
+func (b *BaseLexer) GetTokenFactory() TokenFactory {
+ return b.factory
+}
+
+func (b *BaseLexer) setTokenFactory(f TokenFactory) {
+ b.factory = f
+}
+
+func (b *BaseLexer) safeMatch() (ret int) {
+ defer func() {
+ if e := recover(); e != nil {
+ if re, ok := e.(RecognitionException); ok {
+ b.notifyListeners(re) // Report error
+ b.Recover(re)
+ ret = LexerSkip // default
+ }
+ }
+ }()
+
+ return b.Interpreter.Match(b.input, b.mode)
+}
+
+// Return a token from l source i.e., Match a token on the char stream.
+func (b *BaseLexer) NextToken() Token {
+ if b.input == nil {
+ panic("NextToken requires a non-nil input stream.")
+ }
+
+ tokenStartMarker := b.input.Mark()
+
+ // previously in finally block
+ defer func() {
+ // make sure we release marker after Match or
+ // unbuffered char stream will keep buffering
+ b.input.Release(tokenStartMarker)
+ }()
+
+ for {
+ if b.hitEOF {
+ b.EmitEOF()
+ return b.token
+ }
+ b.token = nil
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = b.input.Index()
+ b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
+ b.TokenStartLine = b.Interpreter.GetLine()
+ b.text = ""
+ continueOuter := false
+ for {
+ b.thetype = TokenInvalidType
+ ttype := LexerSkip
+
+ ttype = b.safeMatch()
+
+ if b.input.LA(1) == TokenEOF {
+ b.hitEOF = true
+ }
+ if b.thetype == TokenInvalidType {
+ b.thetype = ttype
+ }
+ if b.thetype == LexerSkip {
+ continueOuter = true
+ break
+ }
+ if b.thetype != LexerMore {
+ break
+ }
+ }
+
+ if continueOuter {
+ continue
+ }
+ if b.token == nil {
+ b.Virt.Emit()
+ }
+ return b.token
+ }
+
+ return nil
+}
+
+// Instruct the lexer to Skip creating a token for current lexer rule
+// and look for another token. NextToken() knows to keep looking when
+// a lexer rule finishes with token set to SKIPTOKEN. Recall that
+// if token==nil at end of any token rule, it creates one for you
+// and emits it.
+// /
+func (b *BaseLexer) Skip() {
+ b.thetype = LexerSkip
+}
+
+func (b *BaseLexer) More() {
+ b.thetype = LexerMore
+}
+
+func (b *BaseLexer) SetMode(m int) {
+ b.mode = m
+}
+
+func (b *BaseLexer) PushMode(m int) {
+ if LexerATNSimulatorDebug {
+ fmt.Println("pushMode " + strconv.Itoa(m))
+ }
+ b.modeStack.Push(b.mode)
+ b.mode = m
+}
+
+func (b *BaseLexer) PopMode() int {
+ if len(b.modeStack) == 0 {
+ panic("Empty Stack")
+ }
+ if LexerATNSimulatorDebug {
+ fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
+ }
+ i, _ := b.modeStack.Pop()
+ b.mode = i
+ return b.mode
+}
+
+func (b *BaseLexer) inputStream() CharStream {
+ return b.input
+}
+
+func (b *BaseLexer) setInputStream(input CharStream) {
+ b.input = nil
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+ b.reset()
+ b.input = input
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+}
+
+func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
+ return b.tokenFactorySourcePair
+}
+
+// By default does not support multiple emits per NextToken invocation
+// for efficiency reasons. Subclass and override l method, NextToken,
+// and GetToken (to push tokens into a list and pull from that list
+// rather than a single variable as l implementation does).
+// /
+func (b *BaseLexer) EmitToken(token Token) {
+ b.token = token
+}
+
+// The standard method called to automatically emit a token at the
+// outermost lexical rule. The token object should point into the
+// char buffer start..stop. If there is a text override in 'text',
+// use that to set the token's text. Override l method to emit
+// custom Token objects or provide a Newfactory.
+// /
+func (b *BaseLexer) Emit() Token {
+ t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
+ b.EmitToken(t)
+ return t
+}
+
+func (b *BaseLexer) EmitEOF() Token {
+ cpos := b.GetCharPositionInLine()
+ lpos := b.GetLine()
+ eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
+ b.EmitToken(eof)
+ return eof
+}
+
+func (b *BaseLexer) GetCharPositionInLine() int {
+ return b.Interpreter.GetCharPositionInLine()
+}
+
+func (b *BaseLexer) GetLine() int {
+ return b.Interpreter.GetLine()
+}
+
+func (b *BaseLexer) GetType() int {
+ return b.thetype
+}
+
+func (b *BaseLexer) SetType(t int) {
+ b.thetype = t
+}
+
+// What is the index of the current character of lookahead?///
+func (b *BaseLexer) GetCharIndex() int {
+ return b.input.Index()
+}
+
+// Return the text Matched so far for the current token or any text override.
+//Set the complete text of l token it wipes any previous changes to the text.
+func (b *BaseLexer) GetText() string {
+ if b.text != "" {
+ return b.text
+ }
+
+ return b.Interpreter.GetText(b.input)
+}
+
+func (b *BaseLexer) SetText(text string) {
+ b.text = text
+}
+
+func (b *BaseLexer) GetATN() *ATN {
+ return b.Interpreter.ATN()
+}
+
+// Return a list of all Token objects in input char stream.
+// Forces load of all tokens. Does not include EOF token.
+// /
+func (b *BaseLexer) GetAllTokens() []Token {
+ vl := b.Virt
+ tokens := make([]Token, 0)
+ t := vl.NextToken()
+ for t.GetTokenType() != TokenEOF {
+ tokens = append(tokens, t)
+ t = vl.NextToken()
+ }
+ return tokens
+}
+
+func (b *BaseLexer) notifyListeners(e RecognitionException) {
+ start := b.TokenStartCharIndex
+ stop := b.input.Index()
+ text := b.input.GetTextFromInterval(NewInterval(start, stop))
+ msg := "token recognition error at: '" + text + "'"
+ listener := b.GetErrorListenerDispatch()
+ listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
+}
+
+func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
+ if c == TokenEOF {
+ return ""
+ } else if c == '\n' {
+ return "\\n"
+ } else if c == '\t' {
+ return "\\t"
+ } else if c == '\r' {
+ return "\\r"
+ } else {
+ return string(c)
+ }
+}
+
+func (b *BaseLexer) getCharErrorDisplay(c rune) string {
+ return "'" + b.getErrorDisplayForChar(c) + "'"
+}
+
+// Lexers can normally Match any char in it's vocabulary after Matching
+// a token, so do the easy thing and just kill a character and hope
+// it all works out. You can instead use the rule invocation stack
+// to do sophisticated error recovery if you are in a fragment rule.
+// /
+func (b *BaseLexer) Recover(re RecognitionException) {
+ if b.input.LA(1) != TokenEOF {
+ if _, ok := re.(*LexerNoViableAltException); ok {
+ // Skip a char and try again
+ b.Interpreter.Consume(b.input)
+ } else {
+ // TODO: Do we lose character or line position information?
+ b.input.Consume()
+ }
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
new file mode 100644
index 00000000000..20df84f9437
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
@@ -0,0 +1,431 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+const (
+ LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
+ LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
+ LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
+ LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
+ LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
+ LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
+ LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
+ LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
+)
+
+type LexerAction interface {
+ getActionType() int
+ getIsPositionDependent() bool
+ execute(lexer Lexer)
+ hash() int
+ equals(other LexerAction) bool
+}
+
+type BaseLexerAction struct {
+ actionType int
+ isPositionDependent bool
+}
+
+func NewBaseLexerAction(action int) *BaseLexerAction {
+ la := new(BaseLexerAction)
+
+ la.actionType = action
+ la.isPositionDependent = false
+
+ return la
+}
+
+func (b *BaseLexerAction) execute(lexer Lexer) {
+ panic("Not implemented")
+}
+
+func (b *BaseLexerAction) getActionType() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) getIsPositionDependent() bool {
+ return b.isPositionDependent
+}
+
+func (b *BaseLexerAction) hash() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) equals(other LexerAction) bool {
+ return b == other
+}
+
+//
+// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
+//
+// The {@code Skip} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+type LexerSkipAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerSkipAction() *LexerSkipAction {
+ la := new(LexerSkipAction)
+ la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
+ return la
+}
+
+// Provides a singleton instance of l parameterless lexer action.
+var LexerSkipActionINSTANCE = NewLexerSkipAction()
+
+func (l *LexerSkipAction) execute(lexer Lexer) {
+ lexer.Skip()
+}
+
+func (l *LexerSkipAction) String() string {
+ return "skip"
+}
+
+// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+// with the assigned type.
+type LexerTypeAction struct {
+ *BaseLexerAction
+
+ thetype int
+}
+
+func NewLexerTypeAction(thetype int) *LexerTypeAction {
+ l := new(LexerTypeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
+ l.thetype = thetype
+ return l
+}
+
+func (l *LexerTypeAction) execute(lexer Lexer) {
+ lexer.SetType(l.thetype)
+}
+
+func (l *LexerTypeAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.thetype)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerTypeAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerTypeAction); !ok {
+ return false
+ } else {
+ return l.thetype == other.(*LexerTypeAction).thetype
+ }
+}
+
+func (l *LexerTypeAction) String() string {
+ return "actionType(" + strconv.Itoa(l.thetype) + ")"
+}
+
+// Implements the {@code pushMode} lexer action by calling
+// {@link Lexer//pushMode} with the assigned mode.
+type LexerPushModeAction struct {
+ *BaseLexerAction
+
+ mode int
+}
+
+func NewLexerPushModeAction(mode int) *LexerPushModeAction {
+
+ l := new(LexerPushModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
+
+ l.mode = mode
+ return l
+}
+
+// This action is implemented by calling {@link Lexer//pushMode} with the
+// value provided by {@link //getMode}.
+func (l *LexerPushModeAction) execute(lexer Lexer) {
+ lexer.PushMode(l.mode)
+}
+
+func (l *LexerPushModeAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerPushModeAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerPushModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerPushModeAction).mode
+ }
+}
+
+func (l *LexerPushModeAction) String() string {
+ return "pushMode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
+//
+// The {@code popMode} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+type LexerPopModeAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerPopModeAction() *LexerPopModeAction {
+
+ l := new(LexerPopModeAction)
+
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
+
+ return l
+}
+
+var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
+
+// This action is implemented by calling {@link Lexer//popMode}.
+func (l *LexerPopModeAction) execute(lexer Lexer) {
+ lexer.PopMode()
+}
+
+func (l *LexerPopModeAction) String() string {
+ return "popMode"
+}
+
+// Implements the {@code more} lexer action by calling {@link Lexer//more}.
+//
+// The {@code more} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+
+type LexerMoreAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerMoreAction() *LexerMoreAction {
+ l := new(LexerMoreAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
+
+ return l
+}
+
+var LexerMoreActionINSTANCE = NewLexerMoreAction()
+
+// This action is implemented by calling {@link Lexer//popMode}.
+func (l *LexerMoreAction) execute(lexer Lexer) {
+ lexer.More()
+}
+
+func (l *LexerMoreAction) String() string {
+ return "more"
+}
+
+// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
+// the assigned mode.
+type LexerModeAction struct {
+ *BaseLexerAction
+
+ mode int
+}
+
+func NewLexerModeAction(mode int) *LexerModeAction {
+ l := new(LexerModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
+ l.mode = mode
+ return l
+}
+
+// This action is implemented by calling {@link Lexer//mode} with the
+// value provided by {@link //getMode}.
+func (l *LexerModeAction) execute(lexer Lexer) {
+ lexer.SetMode(l.mode)
+}
+
+func (l *LexerModeAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerModeAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerModeAction).mode
+ }
+}
+
+func (l *LexerModeAction) String() string {
+ return "mode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Executes a custom lexer action by calling {@link Recognizer//action} with the
+// rule and action indexes assigned to the custom action. The implementation of
+// a custom action is added to the generated code for the lexer in an override
+// of {@link Recognizer//action} when the grammar is compiled.
+//
+// This class may represent embedded actions created with the {...}
+// syntax in ANTLR 4, as well as actions created for lexer commands where the
+// command argument could not be evaluated when the grammar was compiled.
+
+// Constructs a custom lexer action with the specified rule and action
+// indexes.
+//
+// @param ruleIndex The rule index to use for calls to
+// {@link Recognizer//action}.
+// @param actionIndex The action index to use for calls to
+// {@link Recognizer//action}.
+
+type LexerCustomAction struct {
+ *BaseLexerAction
+ ruleIndex, actionIndex int
+}
+
+func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
+ l := new(LexerCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
+ l.ruleIndex = ruleIndex
+ l.actionIndex = actionIndex
+ l.isPositionDependent = true
+ return l
+}
+
+// Custom actions are implemented by calling {@link Lexer//action} with the
+// appropriate rule and action indexes.
+func (l *LexerCustomAction) execute(lexer Lexer) {
+ lexer.Action(nil, l.ruleIndex, l.actionIndex)
+}
+
+func (l *LexerCustomAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.ruleIndex)
+ h = murmurUpdate(h, l.actionIndex)
+ return murmurFinish(h, 3)
+}
+
+func (l *LexerCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerCustomAction); !ok {
+ return false
+ } else {
+ return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
+ }
+}
+
+// Implements the {@code channel} lexer action by calling
+// {@link Lexer//setChannel} with the assigned channel.
+// Constructs a New{@code channel} action with the specified channel value.
+// @param channel The channel value to pass to {@link Lexer//setChannel}.
+type LexerChannelAction struct {
+ *BaseLexerAction
+
+ channel int
+}
+
+func NewLexerChannelAction(channel int) *LexerChannelAction {
+ l := new(LexerChannelAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
+ l.channel = channel
+ return l
+}
+
+// This action is implemented by calling {@link Lexer//setChannel} with the
+// value provided by {@link //getChannel}.
+func (l *LexerChannelAction) execute(lexer Lexer) {
+ lexer.SetChannel(l.channel)
+}
+
+func (l *LexerChannelAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.channel)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerChannelAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerChannelAction); !ok {
+ return false
+ } else {
+ return l.channel == other.(*LexerChannelAction).channel
+ }
+}
+
+func (l *LexerChannelAction) String() string {
+ return "channel(" + strconv.Itoa(l.channel) + ")"
+}
+
+// This implementation of {@link LexerAction} is used for tracking input offsets
+// for position-dependent actions within a {@link LexerActionExecutor}.
+//
+// This action is not serialized as part of the ATN, and is only required for
+// position-dependent lexer actions which appear at a location other than the
+// end of a rule. For more information about DFA optimizations employed for
+// lexer actions, see {@link LexerActionExecutor//append} and
+// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+
+// Constructs a Newindexed custom action by associating a character offset
+// with a {@link LexerAction}.
+//
+// Note: This class is only required for lexer actions for which
+// {@link LexerAction//isPositionDependent} returns {@code true}.
+//
+// @param offset The offset into the input {@link CharStream}, relative to
+// the token start index, at which the specified lexer action should be
+// executed.
+// @param action The lexer action to execute at a particular offset in the
+// input {@link CharStream}.
+type LexerIndexedCustomAction struct {
+ *BaseLexerAction
+
+ offset int
+ lexerAction LexerAction
+ isPositionDependent bool
+}
+
+func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
+
+ l := new(LexerIndexedCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
+
+ l.offset = offset
+ l.lexerAction = lexerAction
+ l.isPositionDependent = true
+
+ return l
+}
+
+// This method calls {@link //execute} on the result of {@link //getAction}
+// using the provided {@code lexer}.
+func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
+ // assume the input stream position was properly set by the calling code
+ l.lexerAction.execute(lexer)
+}
+
+func (l *LexerIndexedCustomAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.offset)
+ h = murmurUpdate(h, l.lexerAction.hash())
+ return murmurFinish(h, 3)
+}
+
+func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerIndexedCustomAction); !ok {
+ return false
+ } else {
+ return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
new file mode 100644
index 00000000000..80b949a1a54
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represents an executor for a sequence of lexer actions which traversed during
+// the Matching operation of a lexer rule (token).
+//
+// The executor tracks position information for position-dependent lexer actions
+// efficiently, ensuring that actions appearing only at the end of the rule do
+// not cause bloating of the {@link DFA} created for the lexer.
+
+type LexerActionExecutor struct {
+ lexerActions []LexerAction
+ cachedHash int
+}
+
+func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
+
+ if lexerActions == nil {
+ lexerActions = make([]LexerAction, 0)
+ }
+
+ l := new(LexerActionExecutor)
+
+ l.lexerActions = lexerActions
+
+ // Caches the result of {@link //hashCode} since the hash code is an element
+ // of the performance-critical {@link LexerATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(57)
+ for _, a := range lexerActions {
+ l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
+ }
+
+ return l
+}
+
+// Creates a {@link LexerActionExecutor} which executes the actions for
+// the input {@code lexerActionExecutor} followed by a specified
+// {@code lexerAction}.
+//
+// @param lexerActionExecutor The executor for actions already traversed by
+// the lexer while Matching a token within a particular
+// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
+// though it were an empty executor.
+// @param lexerAction The lexer action to execute after the actions
+// specified in {@code lexerActionExecutor}.
+//
+// @return A {@link LexerActionExecutor} for executing the combine actions
+// of {@code lexerActionExecutor} and {@code lexerAction}.
+func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
+ if lexerActionExecutor == nil {
+ return NewLexerActionExecutor([]LexerAction{lexerAction})
+ }
+
+ return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
+}
+
+// Creates a {@link LexerActionExecutor} which encodes the current offset
+// for position-dependent lexer actions.
+//
+// Normally, when the executor encounters lexer actions where
+// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
+// {@link IntStream//seek} on the input {@link CharStream} to set the input
+// position to the end of the current token. This behavior provides
+// for efficient DFA representation of lexer actions which appear at the end
+// of a lexer rule, even when the lexer rule Matches a variable number of
+// characters.
+//
+// Prior to traversing a Match transition in the ATN, the current offset
+// from the token start index is assigned to all position-dependent lexer
+// actions which have not already been assigned a fixed offset. By storing
+// the offsets relative to the token start index, the DFA representation of
+// lexer actions which appear in the middle of tokens remains efficient due
+// to sharing among tokens of the same length, regardless of their absolute
+// position in the input stream.
+//
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns {@code this}.
+//
+// @param offset The current offset to assign to all position-dependent
+// lexer actions which do not already have offsets assigned.
+//
+// @return A {@link LexerActionExecutor} which stores input stream offsets
+// for all position-dependent lexer actions.
+// /
+func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
+ var updatedLexerActions []LexerAction
+ for i := 0; i < len(l.lexerActions); i++ {
+ _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
+ if l.lexerActions[i].getIsPositionDependent() && !ok {
+ if updatedLexerActions == nil {
+ updatedLexerActions = make([]LexerAction, 0)
+
+ for _, a := range l.lexerActions {
+ updatedLexerActions = append(updatedLexerActions, a)
+ }
+ }
+
+ updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
+ }
+ }
+ if updatedLexerActions == nil {
+ return l
+ }
+
+ return NewLexerActionExecutor(updatedLexerActions)
+}
+
+// Execute the actions encapsulated by l executor within the context of a
+// particular {@link Lexer}.
+//
+// This method calls {@link IntStream//seek} to set the position of the
+// {@code input} {@link CharStream} prior to calling
+// {@link LexerAction//execute} on a position-dependent action. Before the
+// method returns, the input position will be restored to the same position
+// it was in when the method was invoked.
+//
+// @param lexer The lexer instance.
+// @param input The input stream which is the source for the current token.
+// When l method is called, the current {@link IntStream//index} for
+// {@code input} should be the start of the following token, i.e. 1
+// character past the end of the current token.
+// @param startIndex The token start index. This value may be passed to
+// {@link IntStream//seek} to set the {@code input} position to the beginning
+// of the token.
+// /
+func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
+ requiresSeek := false
+ stopIndex := input.Index()
+
+ defer func() {
+ if requiresSeek {
+ input.Seek(stopIndex)
+ }
+ }()
+
+ for i := 0; i < len(l.lexerActions); i++ {
+ lexerAction := l.lexerActions[i]
+ if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
+ offset := la.offset
+ input.Seek(startIndex + offset)
+ lexerAction = la.lexerAction
+ requiresSeek = (startIndex + offset) != stopIndex
+ } else if lexerAction.getIsPositionDependent() {
+ input.Seek(stopIndex)
+ requiresSeek = false
+ }
+ lexerAction.execute(lexer)
+ }
+}
+
+func (l *LexerActionExecutor) hash() int {
+ if l == nil {
+ return 61
+ }
+ return l.cachedHash
+}
+
+func (l *LexerActionExecutor) equals(other interface{}) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerActionExecutor); !ok {
+ return false
+ } else {
+ return l.cachedHash == other.(*LexerActionExecutor).cachedHash &&
+ &l.lexerActions == &other.(*LexerActionExecutor).lexerActions
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
new file mode 100644
index 00000000000..131364f75cc
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
@@ -0,0 +1,658 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+var (
+ LexerATNSimulatorDebug = false
+ LexerATNSimulatorDFADebug = false
+
+ LexerATNSimulatorMinDFAEdge = 0
+ LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
+
+ LexerATNSimulatorMatchCalls = 0
+)
+
+type ILexerATNSimulator interface {
+ IATNSimulator
+
+ reset()
+ Match(input CharStream, mode int) int
+ GetCharPositionInLine() int
+ GetLine() int
+ GetText(input CharStream) string
+ Consume(input CharStream)
+}
+
+type LexerATNSimulator struct {
+ *BaseATNSimulator
+
+ recog Lexer
+ predictionMode int
+ mergeCache DoubleDict
+ startIndex int
+ Line int
+ CharPositionInLine int
+ mode int
+ prevAccept *SimState
+ MatchCalls int
+}
+
+func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
+ l := new(LexerATNSimulator)
+
+ l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+
+ l.decisionToDFA = decisionToDFA
+ l.recog = recog
+ // The current token's starting index into the character stream.
+ // Shared across DFA to ATN simulation in case the ATN fails and the
+ // DFA did not have a previous accept state. In l case, we use the
+ // ATN-generated exception object.
+ l.startIndex = -1
+ // line number 1..n within the input///
+ l.Line = 1
+ // The index of the character relative to the beginning of the line
+ // 0..n-1///
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+ // Used during DFA/ATN exec to record the most recent accept configuration
+ // info
+ l.prevAccept = NewSimState()
+ // done
+ return l
+}
+
+func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
+ l.CharPositionInLine = simulator.CharPositionInLine
+ l.Line = simulator.Line
+ l.mode = simulator.mode
+ l.startIndex = simulator.startIndex
+}
+
+func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
+ l.MatchCalls++
+ l.mode = mode
+ mark := input.Mark()
+
+ defer func() {
+ input.Release(mark)
+ }()
+
+ l.startIndex = input.Index()
+ l.prevAccept.reset()
+
+ dfa := l.decisionToDFA[mode]
+
+ if dfa.s0 == nil {
+ return l.MatchATN(input)
+ }
+
+ return l.execATN(input, dfa.s0)
+}
+
+func (l *LexerATNSimulator) reset() {
+ l.prevAccept.reset()
+ l.startIndex = -1
+ l.Line = 1
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+}
+
+func (l *LexerATNSimulator) MatchATN(input CharStream) int {
+ startState := l.atn.modeToStartState[l.mode]
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
+ }
+ oldMode := l.mode
+ s0Closure := l.computeStartState(input, startState)
+ suppressEdge := s0Closure.hasSemanticContext
+ s0Closure.hasSemanticContext = false
+
+ next := l.addDFAState(s0Closure)
+
+ if !suppressEdge {
+ l.decisionToDFA[l.mode].setS0(next)
+ }
+
+ predict := l.execATN(input, next)
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
+ }
+ return predict
+}
+
+func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("start state closure=" + ds0.configs.String())
+ }
+ if ds0.isAcceptState {
+ // allow zero-length tokens
+ l.captureSimState(l.prevAccept, input, ds0)
+ }
+ t := input.LA(1)
+ s := ds0 // s is current/from DFA state
+
+ for { // while more work
+ if LexerATNSimulatorDebug {
+ fmt.Println("execATN loop starting closure: " + s.configs.String())
+ }
+
+ // As we move src->trg, src->trg, we keep track of the previous trg to
+ // avoid looking up the DFA state again, which is expensive.
+ // If the previous target was already part of the DFA, we might
+ // be able to avoid doing a reach operation upon t. If s!=nil,
+ // it means that semantic predicates didn't prevent us from
+ // creating a DFA state. Once we know s!=nil, we check to see if
+ // the DFA state has an edge already for t. If so, we can just reuse
+ // it's configuration set there's no point in re-computing it.
+ // This is kind of like doing DFA simulation within the ATN
+ // simulation because DFA simulation is really just a way to avoid
+ // computing reach/closure sets. Technically, once we know that
+ // we have a previously added DFA state, we could jump over to
+ // the DFA simulator. But, that would mean popping back and forth
+ // a lot and making things more complicated algorithmically.
+ // This optimization makes a lot of sense for loops within DFA.
+ // A character will take us back to an existing DFA state
+ // that already has lots of edges out of it. e.g., .* in comments.
+ target := l.getExistingTargetState(s, t)
+ if target == nil {
+ target = l.computeTargetState(input, s, t)
+ // print("Computed:" + str(target))
+ }
+ if target == ATNSimulatorError {
+ break
+ }
+ // If l is a consumable input element, make sure to consume before
+ // capturing the accept state so the input index, line, and char
+ // position accurately reflect the state of the interpreter at the
+ // end of the token.
+ if t != TokenEOF {
+ l.Consume(input)
+ }
+ if target.isAcceptState {
+ l.captureSimState(l.prevAccept, input, target)
+ if t == TokenEOF {
+ break
+ }
+ }
+ t = input.LA(1)
+ s = target // flip current DFA target becomes Newsrc/from state
+ }
+
+ return l.failOrAccept(l.prevAccept, input, s.configs, t)
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// l method returns {@code nil}.
+//
+// @param s The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for l edge is not
+// already cached
+func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
+ if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
+ return nil
+ }
+
+ target := s.edges[t-LexerATNSimulatorMinDFAEdge]
+ if LexerATNSimulatorDebug && target != nil {
+ fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
+ }
+ return target
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param input The input stream
+// @param s The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, l method
+// returns {@link //ERROR}.
+func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
+ reach := NewOrderedATNConfigSet()
+
+ // if we don't find an existing DFA state
+ // Fill reach starting from closure, following t transitions
+ l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
+
+ if len(reach.configs) == 0 { // we got nowhere on t from s
+ if !reach.hasSemanticContext {
+ // we got nowhere on t, don't panic out l knowledge it'd
+ // cause a failover from DFA later.
+ l.addDFAEdge(s, t, ATNSimulatorError, nil)
+ }
+ // stop when we can't Match any more char
+ return ATNSimulatorError
+ }
+ // Add an edge from s to target DFA found/created for reach
+ return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
+}
+
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
+ if l.prevAccept.dfaState != nil {
+ lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
+ l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
+ return prevAccept.dfaState.prediction
+ }
+
+ // if no accept and EOF is first char, return EOF
+ if t == TokenEOF && input.Index() == l.startIndex {
+ return TokenEOF
+ }
+
+ panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
+}
+
+// Given a starting configuration set, figure out all ATN configurations
+// we can reach upon input {@code t}. Parameter {@code reach} is a return
+// parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
+ // l is used to Skip processing for configs which have a lower priority
+ // than a config that already reached an accept state for the same rule
+ SkipAlt := ATNInvalidAltNumber
+
+ for _, cfg := range closure.GetItems() {
+ currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
+ if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
+ continue
+ }
+
+ if LexerATNSimulatorDebug {
+
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
+ }
+
+ for _, trans := range cfg.GetState().GetTransitions() {
+ target := l.getReachableTarget(trans, t)
+ if target != nil {
+ lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
+ if lexerActionExecutor != nil {
+ lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
+ }
+ treatEOFAsEpsilon := (t == TokenEOF)
+ config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
+ if l.closure(input, config, reach,
+ currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
+ // any remaining configs for l alt have a lower priority
+ // than the one that just reached an accept state.
+ SkipAlt = cfg.GetAlt()
+ }
+ }
+ }
+ }
+}
+
+func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
+ if LexerATNSimulatorDebug {
+ fmt.Printf("ACTION %s\n", lexerActionExecutor)
+ }
+ // seek to after last char in token
+ input.Seek(index)
+ l.Line = line
+ l.CharPositionInLine = charPos
+ if lexerActionExecutor != nil && l.recog != nil {
+ lexerActionExecutor.execute(l.recog, input, startIndex)
+ }
+}
+
+func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
+ if trans.Matches(t, 0, LexerMaxCharValue) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
+ configs := NewOrderedATNConfigSet()
+ for i := 0; i < len(p.GetTransitions()); i++ {
+ target := p.GetTransitions()[i].getTarget()
+ cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
+ l.closure(input, cfg, configs, false, false, false)
+ }
+
+ return configs
+}
+
+// Since the alternatives within any lexer decision are ordered by
+// preference, l method stops pursuing the closure as soon as an accept
+// state is reached. After the first accept state is reached by depth-first
+// search from {@code config}, all other (potentially reachable) states for
+// l rule would have a lower priority.
+//
+// @return {@code true} if an accept state is reached, otherwise
+// {@code false}.
+func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
+ }
+
+ _, ok := config.state.(*RuleStopState)
+ if ok {
+
+ if LexerATNSimulatorDebug {
+ if l.recog != nil {
+ fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
+ } else {
+ fmt.Printf("closure at rule stop %s\n", config)
+ }
+ }
+
+ if config.context == nil || config.context.hasEmptyPath() {
+ if config.context == nil || config.context.isEmpty() {
+ configs.Add(config, nil)
+ return true
+ }
+
+ configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
+ currentAltReachedAcceptState = true
+ }
+ if config.context != nil && !config.context.isEmpty() {
+ for i := 0; i < config.context.length(); i++ {
+ if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
+ newContext := config.context.GetParent(i) // "pop" return state
+ returnState := l.atn.states[config.context.getReturnState(i)]
+ cfg := NewLexerATNConfig2(config, returnState, newContext)
+ currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ }
+ return currentAltReachedAcceptState
+ }
+ // optimization
+ if !config.state.GetEpsilonOnlyTransitions() {
+ if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
+ configs.Add(config, nil)
+ }
+ }
+ for j := 0; j < len(config.state.GetTransitions()); j++ {
+ trans := config.state.GetTransitions()[j]
+ cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
+ if cfg != nil {
+ currentAltReachedAcceptState = l.closure(input, cfg, configs,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ return currentAltReachedAcceptState
+}
+
+// side-effect: can alter configs.hasSemanticContext
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
+ configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
+
+ var cfg *LexerATNConfig
+
+ if trans.getSerializationType() == TransitionRULE {
+
+ rt := trans.(*RuleTransition)
+ newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
+ cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
+
+ } else if trans.getSerializationType() == TransitionPRECEDENCE {
+ panic("Precedence predicates are not supported in lexers.")
+ } else if trans.getSerializationType() == TransitionPREDICATE {
+ // Track traversing semantic predicates. If we traverse,
+ // we cannot add a DFA state for l "reach" computation
+ // because the DFA would not test the predicate again in the
+ // future. Rather than creating collections of semantic predicates
+ // like v3 and testing them on prediction, v4 will test them on the
+ // fly all the time using the ATN not the DFA. This is slower but
+ // semantically it's not used that often. One of the key elements to
+ // l predicate mechanism is not adding DFA states that see
+ // predicates immediately afterwards in the ATN. For example,
+
+ // a : ID {p1}? | ID {p2}?
+
+ // should create the start state for rule 'a' (to save start state
+ // competition), but should not create target of ID state. The
+ // collection of ATN states the following ID references includes
+ // states reached by traversing predicates. Since l is when we
+ // test them, we cannot cash the DFA state target of ID.
+
+ pt := trans.(*PredicateTransition)
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
+ }
+ configs.SetHasSemanticContext(true)
+ if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionACTION {
+ if config.context == nil || config.context.hasEmptyPath() {
+ // execute actions anywhere in the start rule for a token.
+ //
+ // TODO: if the entry rule is invoked recursively, some
+ // actions may be executed during the recursive call. The
+ // problem can appear when hasEmptyPath() is true but
+ // isEmpty() is false. In l case, the config needs to be
+ // split into two contexts - one with just the empty path
+ // and another with everything but the empty path.
+ // Unfortunately, the current algorithm does not allow
+ // getEpsilonTarget to return two configurations, so
+ // additional modifications are needed before we can support
+ // the split operation.
+ lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
+ cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
+ } else {
+ // ignore actions in referenced rules
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionEPSILON {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ } else if trans.getSerializationType() == TransitionATOM ||
+ trans.getSerializationType() == TransitionRANGE ||
+ trans.getSerializationType() == TransitionSET {
+ if treatEOFAsEpsilon {
+ if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ }
+ }
+ return cfg
+}
+
+// Evaluate a predicate specified in the lexer.
+//
+// If {@code speculative} is {@code true}, l method was called before
+// {@link //consume} for the Matched character. This method should call
+// {@link //consume} before evaluating the predicate to ensure position
+// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
+// and {@link Lexer//getcolumn}, properly reflect the current
+// lexer state. This method should restore {@code input} and the simulator
+// to the original state before returning (i.e. undo the actions made by the
+// call to {@link //consume}.
+//
+// @param input The input stream.
+// @param ruleIndex The rule containing the predicate.
+// @param predIndex The index of the predicate within the rule.
+// @param speculative {@code true} if the current index in {@code input} is
+// one character before the predicate's location.
+//
+// @return {@code true} if the specified predicate evaluates to
+// {@code true}.
+// /
+func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
+ // assume true if no recognizer was provided
+ if l.recog == nil {
+ return true
+ }
+ if !speculative {
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+ }
+ savedcolumn := l.CharPositionInLine
+ savedLine := l.Line
+ index := input.Index()
+ marker := input.Mark()
+
+ defer func() {
+ l.CharPositionInLine = savedcolumn
+ l.Line = savedLine
+ input.Seek(index)
+ input.Release(marker)
+ }()
+
+ l.Consume(input)
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+}
+
+func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
+ settings.index = input.Index()
+ settings.line = l.Line
+ settings.column = l.CharPositionInLine
+ settings.dfaState = dfaState
+}
+
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
+ if to == nil && cfgs != nil {
+ // leading to l call, ATNConfigSet.hasSemanticContext is used as a
+ // marker indicating dynamic predicate evaluation makes l edge
+ // dependent on the specific input sequence, so the static edge in the
+ // DFA should be omitted. The target DFAState is still created since
+ // execATN has the ability to reSynchronize with the DFA state cache
+ // following the predicate evaluation step.
+ //
+ // TJP notes: next time through the DFA, we see a pred again and eval.
+ // If that gets us to a previously created (but dangling) DFA
+ // state, we can continue in pure DFA mode from there.
+ // /
+ suppressEdge := cfgs.HasSemanticContext()
+ cfgs.SetHasSemanticContext(false)
+
+ to = l.addDFAState(cfgs)
+
+ if suppressEdge {
+ return to
+ }
+ }
+ // add the edge
+ if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
+ // Only track edges within the DFA bounds
+ return to
+ }
+ if LexerATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
+ }
+ if from.edges == nil {
+ // make room for tokens 1..n and -1 masquerading as index 0
+ from.edges = make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1)
+ }
+ from.edges[tk-LexerATNSimulatorMinDFAEdge] = to // connect
+
+ return to
+}
+
+// Add a NewDFA state if there isn't one with l set of
+// configurations already. This method also detects the first
+// configuration containing an ATN rule stop state. Later, when
+// traversing the DFA, we will know which rule to accept.
+func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState {
+
+ proposed := NewDFAState(-1, configs)
+ var firstConfigWithRuleStopState ATNConfig
+
+ for _, cfg := range configs.GetItems() {
+
+ _, ok := cfg.GetState().(*RuleStopState)
+
+ if ok {
+ firstConfigWithRuleStopState = cfg
+ break
+ }
+ }
+ if firstConfigWithRuleStopState != nil {
+ proposed.isAcceptState = true
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
+ proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
+ }
+ hash := proposed.hash()
+ dfa := l.decisionToDFA[l.mode]
+ existing, ok := dfa.getState(hash)
+ if ok {
+ return existing
+ }
+ newState := proposed
+ newState.stateNumber = dfa.numStates()
+ configs.SetReadOnly(true)
+ newState.configs = configs
+ dfa.setState(hash, newState)
+ return newState
+}
+
+func (l *LexerATNSimulator) getDFA(mode int) *DFA {
+ return l.decisionToDFA[mode]
+}
+
+// Get the text Matched so far for the current token.
+func (l *LexerATNSimulator) GetText(input CharStream) string {
+ // index is first lookahead char, don't include.
+ return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
+}
+
+func (l *LexerATNSimulator) Consume(input CharStream) {
+ curChar := input.LA(1)
+ if curChar == int('\n') {
+ l.Line++
+ l.CharPositionInLine = 0
+ } else {
+ l.CharPositionInLine++
+ }
+ input.Consume()
+}
+
+func (l *LexerATNSimulator) GetCharPositionInLine() int {
+ return l.CharPositionInLine
+}
+
+func (l *LexerATNSimulator) GetLine() int {
+ return l.Line
+}
+
+func (l *LexerATNSimulator) GetTokenName(tt int) string {
+ if tt == -1 {
+ return "EOF"
+ }
+
+ return "'" + string(tt) + "'"
+}
+
+func resetSimState(sim *SimState) {
+ sim.index = -1
+ sim.line = 0
+ sim.column = -1
+ sim.dfaState = nil
+}
+
+type SimState struct {
+ index int
+ line int
+ column int
+ dfaState *DFAState
+}
+
+func NewSimState() *SimState {
+ s := new(SimState)
+ resetSimState(s)
+ return s
+}
+
+func (s *SimState) reset() {
+ resetSimState(s)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
new file mode 100644
index 00000000000..f5afd09b393
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
@@ -0,0 +1,215 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type LL1Analyzer struct {
+ atn *ATN
+}
+
+func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
+ la := new(LL1Analyzer)
+ la.atn = atn
+ return la
+}
+
+//* Special value added to the lookahead sets to indicate that we hit
+// a predicate during analysis if {@code seeThruPreds==false}.
+///
+const (
+ LL1AnalyzerHitPred = TokenInvalidType
+)
+
+//*
+// Calculates the SLL(1) expected lookahead set for each outgoing transition
+// of an {@link ATNState}. The returned array has one element for each
+// outgoing transition in {@code s}. If the closure from transition
+// i leads to a semantic predicate before Matching a symbol, the
+// element at index i of the result will be {@code nil}.
+//
+// @param s the ATN state
+// @return the expected symbols for each outgoing transition of {@code s}.
+func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
+ if s == nil {
+ return nil
+ }
+ count := len(s.GetTransitions())
+ look := make([]*IntervalSet, count)
+ for alt := 0; alt < count; alt++ {
+ look[alt] = NewIntervalSet()
+ lookBusy := NewSet(nil, nil)
+ seeThruPreds := false // fail to get lookahead upon pred
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
+ // Wipe out lookahead for la alternative if we found nothing
+ // or we had a predicate when we !seeThruPreds
+ if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
+ look[alt] = nil
+ }
+ }
+ return look
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+// If {@code ctx} is {@code nil} and the end of the rule containing
+// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
+// If {@code ctx} is not {@code nil} and the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx the complete parser context, or {@code nil} if the context
+// should be ignored
+//
+// @return The set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+///
+func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
+ r := NewIntervalSet()
+ seeThruPreds := true // ignore preds get all lookahead
+ var lookContext PredictionContext
+ if ctx != nil {
+ lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
+ }
+ la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true)
+ return r
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+// If {@code ctx} is {@code nil} and {@code stopState} or the end of the
+// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
+// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
+// {@code true} and {@code stopState} or the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state.
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx The outer context, or {@code nil} if the outer context should
+// not be used.
+// @param look The result lookahead set.
+// @param lookBusy A set used for preventing epsilon closures in the ATN
+// from causing a stack overflow. Outside code should pass
+// {@code NewSet} for la argument.
+// @param calledRuleStack A set used for preventing left recursion in the
+// ATN from causing a stack overflow. Outside code should pass
+// {@code NewBitSet()} for la argument.
+// @param seeThruPreds {@code true} to true semantic predicates as
+// implicitly {@code true} and "see through them", otherwise {@code false}
+// to treat semantic predicates as opaque and add {@link //HitPred} to the
+// result if one is encountered.
+// @param addEOF Add {@link Token//EOF} to the result if the end of the
+// outermost context is reached. This parameter has no effect if {@code ctx}
+// is {@code nil}.
+
+func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+
+ returnState := la.atn.states[ctx.getReturnState(i)]
+
+ removed := calledRuleStack.contains(returnState.GetRuleIndex())
+
+ defer func() {
+ if removed {
+ calledRuleStack.add(returnState.GetRuleIndex())
+ }
+ }()
+
+ calledRuleStack.remove(returnState.GetRuleIndex())
+ la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
+
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+
+ c := NewBaseATNConfig6(s, 0, ctx)
+
+ if lookBusy.contains(c) {
+ return
+ }
+
+ lookBusy.add(c)
+
+ if s == stopState {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+ }
+
+ _, ok := s.(*RuleStopState)
+
+ if ok {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+
+ if ctx != BasePredictionContextEMPTY {
+ // run thru all possible stack tops in ctx
+ for i := 0; i < ctx.length(); i++ {
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
+ }
+ return
+ }
+ }
+
+ n := len(s.GetTransitions())
+
+ for i := 0; i < n; i++ {
+ t := s.GetTransitions()[i]
+
+ if t1, ok := t.(*RuleTransition); ok {
+ if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
+ continue
+ }
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+ la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
+ } else if t2, ok := t.(AbstractPredicateTransition); ok {
+ if seeThruPreds {
+ la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else {
+ look.addOne(LL1AnalyzerHitPred)
+ }
+ } else if t.getIsEpsilon() {
+ la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else if _, ok := t.(*WildcardTransition); ok {
+ look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
+ } else {
+ set := t.getLabel()
+ if set != nil {
+ if _, ok := t.(*NotSetTransition); ok {
+ set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
+ }
+ look.addSet(set)
+ }
+ }
+ }
+}
+
+func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+
+ defer func() {
+ calledRuleStack.remove(t1.getTarget().GetRuleIndex())
+ }()
+
+ calledRuleStack.add(t1.getTarget().GetRuleIndex())
+ la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
new file mode 100644
index 00000000000..fb60258e331
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
@@ -0,0 +1,718 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type Parser interface {
+ Recognizer
+
+ GetInterpreter() *ParserATNSimulator
+
+ GetTokenStream() TokenStream
+ GetTokenFactory() TokenFactory
+ GetParserRuleContext() ParserRuleContext
+ SetParserRuleContext(ParserRuleContext)
+ Consume() Token
+ GetParseListeners() []ParseTreeListener
+
+ GetErrorHandler() ErrorStrategy
+ SetErrorHandler(ErrorStrategy)
+ GetInputStream() IntStream
+ GetCurrentToken() Token
+ GetExpectedTokens() *IntervalSet
+ NotifyErrorListeners(string, Token, RecognitionException)
+ IsExpectedToken(int) bool
+ GetPrecedence() int
+ GetRuleInvocationStack(ParserRuleContext) []string
+}
+
+type BaseParser struct {
+ *BaseRecognizer
+
+ Interpreter *ParserATNSimulator
+ BuildParseTrees bool
+
+ input TokenStream
+ errHandler ErrorStrategy
+ precedenceStack IntStack
+ ctx ParserRuleContext
+
+ tracer *TraceListener
+ parseListeners []ParseTreeListener
+ _SyntaxErrors int
+}
+
+// p.is all the parsing support code essentially most of it is error
+// recovery stuff.//
+func NewBaseParser(input TokenStream) *BaseParser {
+
+ p := new(BaseParser)
+
+ p.BaseRecognizer = NewBaseRecognizer()
+
+ // The input stream.
+ p.input = nil
+ // The error handling strategy for the parser. The default value is a new
+ // instance of {@link DefaultErrorStrategy}.
+ p.errHandler = NewDefaultErrorStrategy()
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ // The {@link ParserRuleContext} object for the currently executing rule.
+ // p.is always non-nil during the parsing process.
+ p.ctx = nil
+ // Specifies whether or not the parser should construct a parse tree during
+ // the parsing process. The default value is {@code true}.
+ p.BuildParseTrees = true
+ // When {@link //setTrace}{@code (true)} is called, a reference to the
+ // {@link TraceListener} is stored here so it can be easily removed in a
+ // later call to {@link //setTrace}{@code (false)}. The listener itself is
+ // implemented as a parser listener so p.field is not directly used by
+ // other parser methods.
+ p.tracer = nil
+ // The list of {@link ParseTreeListener} listeners registered to receive
+ // events during the parse.
+ p.parseListeners = nil
+ // The number of syntax errors Reported during parsing. p.value is
+ // incremented each time {@link //NotifyErrorListeners} is called.
+ p._SyntaxErrors = 0
+ p.SetInputStream(input)
+
+ return p
+}
+
+// p.field maps from the serialized ATN string to the deserialized {@link
+// ATN} with
+// bypass alternatives.
+//
+// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
+//
+var bypassAltsAtnCache = make(map[string]int)
+
+// reset the parser's state//
+func (p *BaseParser) reset() {
+ if p.input != nil {
+ p.input.Seek(0)
+ }
+ p.errHandler.reset(p)
+ p.ctx = nil
+ p._SyntaxErrors = 0
+ p.SetTrace(nil)
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ if p.Interpreter != nil {
+ p.Interpreter.reset()
+ }
+}
+
+func (p *BaseParser) GetErrorHandler() ErrorStrategy {
+ return p.errHandler
+}
+
+func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
+ p.errHandler = e
+}
+
+// Match current input symbol against {@code ttype}. If the symbol type
+// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
+// called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @param ttype the token type to Match
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// {@code ttype} and the error strategy could not recover from the
+// mismatched symbol
+
+func (p *BaseParser) Match(ttype int) Token {
+
+ t := p.GetCurrentToken()
+
+ if t.GetTokenType() == ttype {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+
+ return t
+}
+
+// Match current input symbol as a wildcard. If the symbol type Matches
+// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
+// and {@link //consume} are called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// a wildcard and the error strategy could not recover from the mismatched
+// symbol
+
+func (p *BaseParser) MatchWildcard() Token {
+ t := p.GetCurrentToken()
+ if t.GetTokenType() > 0 {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+ return t
+}
+
+func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
+ return p.ctx
+}
+
+func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
+ p.ctx = v
+}
+
+func (p *BaseParser) GetParseListeners() []ParseTreeListener {
+ if p.parseListeners == nil {
+ return make([]ParseTreeListener, 0)
+ }
+ return p.parseListeners
+}
+
+// Registers {@code listener} to receive events during the parsing process.
+//
+// To support output-preserving grammar transformations (including but not
+// limited to left-recursion removal, automated left-factoring, and
+// optimized code generation), calls to listener methods during the parse
+// may differ substantially from calls made by
+// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
+// particular, rule entry and exit events may occur in a different order
+// during the parse than after the parser. In addition, calls to certain
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic , i.e. for identical input the calls to listener
+// methods will be the same.
+//
+//
+// Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+//
+//
+// @param listener the listener to add
+//
+// @panics nilPointerException if {@code} listener is {@code nil}
+//
+func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
+ if listener == nil {
+ panic("listener")
+ }
+ if p.parseListeners == nil {
+ p.parseListeners = make([]ParseTreeListener, 0)
+ }
+ p.parseListeners = append(p.parseListeners, listener)
+}
+
+//
+// Remove {@code listener} from the list of parse listeners.
+//
+// If {@code listener} is {@code nil} or has not been added as a parse
+// listener, p.method does nothing.
+// @param listener the listener to remove
+//
+func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
+
+ if p.parseListeners != nil {
+
+ idx := -1
+ for i, v := range p.parseListeners {
+ if v == listener {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return
+ }
+
+ // remove the listener from the slice
+ p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
+
+ if len(p.parseListeners) == 0 {
+ p.parseListeners = nil
+ }
+ }
+}
+
+// Remove all parse listeners.
+func (p *BaseParser) removeParseListeners() {
+ p.parseListeners = nil
+}
+
+// Notify any parse listeners of an enter rule event.
+func (p *BaseParser) TriggerEnterRuleEvent() {
+ if p.parseListeners != nil {
+ ctx := p.ctx
+ for _, listener := range p.parseListeners {
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+ }
+ }
+}
+
+//
+// Notify any parse listeners of an exit rule event.
+//
+// @see //addParseListener
+//
+func (p *BaseParser) TriggerExitRuleEvent() {
+ if p.parseListeners != nil {
+ // reverse order walk of listeners
+ ctx := p.ctx
+ l := len(p.parseListeners) - 1
+
+ for i := range p.parseListeners {
+ listener := p.parseListeners[l-i]
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+ }
+ }
+}
+
+func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
+ return p.Interpreter
+}
+
+func (p *BaseParser) GetATN() *ATN {
+ return p.Interpreter.atn
+}
+
+func (p *BaseParser) GetTokenFactory() TokenFactory {
+ return p.input.GetTokenSource().GetTokenFactory()
+}
+
+// Tell our token source and error strategy about a Newway to create tokens.//
+func (p *BaseParser) setTokenFactory(factory TokenFactory) {
+ p.input.GetTokenSource().setTokenFactory(factory)
+}
+
+// The ATN with bypass alternatives is expensive to create so we create it
+// lazily.
+//
+// @panics UnsupportedOperationException if the current parser does not
+// implement the {@link //getSerializedATN()} method.
+//
+func (p *BaseParser) GetATNWithBypassAlts() {
+
+ // TODO
+ panic("Not implemented!")
+
+ // serializedAtn := p.getSerializedATN()
+ // if (serializedAtn == nil) {
+ // panic("The current parser does not support an ATN with bypass alternatives.")
+ // }
+ // result := p.bypassAltsAtnCache[serializedAtn]
+ // if (result == nil) {
+ // deserializationOptions := NewATNDeserializationOptions(nil)
+ // deserializationOptions.generateRuleBypassTransitions = true
+ // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
+ // p.bypassAltsAtnCache[serializedAtn] = result
+ // }
+ // return result
+}
+
+// The preferred method of getting a tree pattern. For example, here's a
+// sample use:
+//
+//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// Set the token stream and reset the parser.//
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// Match needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+//
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.inErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have Newlocalctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+//
+// Checks whether or not {@code symbol} can follow the current state in the
+// ATN. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+//
+// return getExpectedTokens().contains(symbol)
+//
+//
+// @param symbol the symbol type to check
+// @return {@code true} if {@code symbol} can follow the current state in
+// the ATN, otherwise {@code false}.
+
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// Computes the set of input symbols which could follow the current parser
+// state and context, as given by {@link //GetState} and {@link //GetContext},
+// respectively.
+//
+// @see ATN//getExpectedTokens(int, RuleContext)
+//
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// Return List<String> of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+//
+// this very useful for error messages.
+
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// For debugging and other purposes.//
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// For debugging and other purposes.//
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.numStates() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// During a parse is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. p.is for quick and dirty debugging.
+//
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
new file mode 100644
index 00000000000..128b9a96d4b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
@@ -0,0 +1,1473 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ ParserATNSimulatorDebug = false
+ ParserATNSimulatorListATNDecisions = false
+ ParserATNSimulatorDFADebug = false
+ ParserATNSimulatorRetryDebug = false
+)
+
+type ParserATNSimulator struct {
+ *BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *DoubleDict
+ outerContext ParserRuleContext
+}
+
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := new(ParserATNSimulator)
+
+ p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
+ // isn't Synchronized but we're ok since two threads shouldn't reuse same
+ // parser/atnsim object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a)&rarrc should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // wack cache after each prediction
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ if dfa.precedenceDfa {
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.s0
+ }
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = RuleContextEmpty
+ }
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ // If p is not a precedence DFA, we check the ATN start state
+ // to determine if p ATN start state is the decision for the
+ // closure block that determines whether a precedence rule
+ // should continue or complete.
+
+ t2 := dfa.atnStartState
+ t, ok := t2.(*StarLoopEntryState)
+ if !dfa.precedenceDfa && ok {
+ if t.precedenceRuleDecision {
+ dfa.setPrecedenceDfa(true)
+ }
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx)
+
+ if dfa.precedenceDfa {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.s0 = s0
+ }
+ }
+ alt := p.execATN(dfa, s0, input, index, outerContext)
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// Performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+// if the set is empty, there is no viable alternative for current symbol
+// does the state uniquely predict an alternative?
+// does the state have a conflict that would prevent us from
+// putting it on the work list?
+
+// We also have some key operations to do:
+// add an edge from previous DFA state to potentially NewDFA state, D,
+// upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// collecting predicates and adding semantic context to DFA accept states
+// adding rule context to context-sensitive DFA accept states
+// consuming an input symbol
+// Reporting a conflict
+// Reporting an ambiguity
+// Reporting a context sensitivity
+// Reporting insufficient predicates
+
+// cover these cases:
+// dead end
+// single alt
+// single alt + preds
+// conflict
+// conflict + preds
+//
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt
+ }
+
+ panic(e)
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.GetConflictingAlts()
+ if D.predicates != nil {
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL failover")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if ParserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue()
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if ParserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+ if alts.length() == 0 {
+ panic(p.noViableAlt(input, outerContext, D.configs, startIndex))
+ } else if alts.length() == 1 {
+ return alts.minValue()
+ } else {
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue()
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+
+ panic("Should not have reached p state")
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ edges := previousD.edges
+ if edges == nil {
+ return nil
+ }
+
+ return edges[t+1]
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create Newtarget state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if ParserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.SetUniqueAlt(predictedAlt)
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.SetConflictingAlts(p.getConflictingAlts(reach))
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.GetConflictingAlts().minValue())
+ }
+ if D.isAcceptState && D.configs.HasSemanticContext() {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previous, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt
+ }
+
+ panic(e)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if ParserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.SetUniqueAlt(p.getUniqueAlt(reach))
+ // unique prediction?
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ predictedAlt = reach.GetUniqueAlt()
+ break
+ } else if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have nonconflicting alt subsets as in:
+
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, nil, reach)
+
+ return predictedAlt
+}
+
+func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
+ if ParserATNSimulatorDebug {
+ fmt.Println("in computeReachSet, starting closure: " + closure.String())
+ }
+ if p.mergeCache == nil {
+ p.mergeCache = NewDoubleDict()
+ }
+ intermediate := NewBaseATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var SkippedStopStates []*BaseATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.GetItems() {
+ if ParserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ _, ok := c.GetState().(*RuleStopState)
+
+ if ok {
+ if fullCtx || t == TokenEOF {
+ if SkippedStopStates == nil {
+ SkippedStopStates = make([]*BaseATNConfig, 0)
+ }
+ SkippedStopStates = append(SkippedStopStates, c.(*BaseATNConfig))
+ if ParserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for j := 0; j < len(c.GetState().GetTransitions()); j++ {
+ trans := c.GetState().GetTransitions()[j]
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewBaseATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if ParserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+ // Now figure out where the reach operation can take us...
+ var reach ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if SkippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewBaseATNConfigSet(fullCtx)
+ closureBusy := NewSet(nil, nil)
+ treatEOFAsEpsilon := t == TokenEOF
+ for k := 0; k < len(intermediate.configs); k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet p condition whether or not it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate)
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if SkippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(SkippedStopStates); l++ {
+ reach.Add(SkippedStopStates[l], p.mergeCache)
+ }
+ }
+ if len(reach.GetItems()) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+//
+// Return a configuration set containing only the configurations from
+// {@code configs} which are in a {@link RuleStopState}. If all
+// configurations in {@code configs} are already in a rule stop state, p
+// method simply returns {@code configs}.
+//
+// When {@code lookToEndOfRule} is true, p method uses
+// {@link ATN//NextTokens} for each configuration in {@code configs} which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// @param configs the configuration set to update
+// @param lookToEndOfRule when true, p method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// {@code configs}.
+//
+// @return {@code configs} if all configurations in {@code configs} are in a
+// rule stop state, otherwise return a Newconfiguration set containing only
+// the configurations from {@code configs} which are in a rule stop state
+//
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewBaseATNConfigSet(configs.FullContext())
+ for _, config := range configs.GetItems() {
+
+ _, ok := config.GetState().(*RuleStopState)
+
+ if ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewBaseATNConfigSet(fullCtx)
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewBaseATNConfig6(target, i+1, initialContext)
+ closureBusy := NewSet(nil, nil)
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+//
+// This method transforms the start state computed by
+// {@link //computeStartState} to the special start state used by a
+// precedence DFA for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+//
+// Evaluate the precedence predicates for each configuration using
+// {@link SemanticContext//evalPrecedence}.
+// Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context. This transformation is
+// valid for the following reasons:
+//
+// The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+//
+//
+//
+//
+// The prediction context must be considered by p filter to address
+// situations like the following.
+//
+//
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+//
+//
+// If the above grammar, the ATN state immediately before the token
+// reference {@code 'a'} in {@code letterA} is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// {@code statement}. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to {@code prog} (and then back in to {@code statement}
+// from being eliminated by the filter.
+//
+//
+// @param configs The configuration set computed by
+// {@link //computeStartState} as the start state for the DFA.
+// @return The transformed configuration set representing the start state
+// for a precedence DFA at a particular precedence level (determined by
+// calling {@link Parser//getPrecedence}).
+//
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]PredictionContext)
+ configSet := NewBaseATNConfigSet(configs.FullContext())
+
+ for _, config := range configs.GetItems() {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.GetItems() {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.GetItems() {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i < nalts+1; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // nonambig alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // unpredicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+//
+// This method is used to improve the localization of error messages by
+// choosing an alternative rather than panicing a
+// {@link NoViableAltException} in particular prediction scenarios where the
+// {@link //ERROR} state was reached during ATN simulation.
+//
+//
+// The default implementation of p method uses the following
+// algorithm to identify an ATN configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// {@link ParserRuleContext} returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+//
+// If a syntactically valid path or paths reach the end of the decision rule and
+// they are semantically valid if predicated, return the min associated alt.
+// Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+//
+// Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
+//
+//
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a {@link FailedPredicateException} in
+// the parser. Specifically, p could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing p alternative within
+// {@link //AdaptivePredict} instead of panicing a
+// {@link NoViableAltException}, the resulting
+// {@link FailedPredicateException} in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+//
+// @param configs The ATN configurations which were valid immediately before
+// the {@link //ERROR} state was reached
+// @param outerContext The is the \gamma_0 initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// @return The value to return from {@link //AdaptivePredict}, or
+// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
+// identified and {@link //AdaptivePredict} should Report an error instead.
+//
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.GetItems()) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.GetItems() {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet {
+ succeeded := NewBaseATNConfigSet(configs.FullContext())
+ failed := NewBaseATNConfigSet(configs.FullContext())
+
+ for _, c := range configs.GetItems() {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []ATNConfigSet{succeeded, failed}
+}
+
+// Look through a list of predicate/alt pairs, returning alts for the
+// pairs that win. A {@code NONE} predicate indicates an alt containing an
+// unpredicated config which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+//
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")")
+ fmt.Println("configs(" + configs.String() + ")")
+ if config.GetReachesIntoOuterContext() > 50 {
+ panic("problem")
+ }
+ }
+
+ _, ok := config.GetState().(*RuleStopState)
+ if ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if ParserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if ParserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges//
+func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if ci, ok := c.(*BaseATNConfig); ok && ci != nil {
+ if !t.getIsEpsilon() && closureBusy.add(c) != c {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if p is > 0.
+
+ if closureBusy.add(c) != c {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ if p.dfa != nil && p.dfa.precedenceDfa {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+ configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method
+ newDepth--
+ if ParserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+
+ return ""
+}
+
+func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig {
+
+ switch t.getSerializationType() {
+ case TransitionRULE:
+ return p.ruleTransition(config, t.(*RuleTransition))
+ case TransitionPRECEDENCE:
+ return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionPREDICATE:
+ return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionACTION:
+ return p.actionTransition(config, t.(*ActionTransition))
+ case TransitionEPSILON:
+ return NewBaseATNConfig4(config, t.getTarget())
+ case TransitionATOM:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewBaseATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ case TransitionRANGE:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewBaseATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ case TransitionSET:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewBaseATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+}
+
+func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
+ fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
+ }
+ return NewBaseATNConfig4(config, t.getTarget())
+}
+
+func (p *ParserATNSimulator) precedenceTransition(config ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
+ strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *BaseATNConfig
+ if collectPredicates && inContext {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewBaseATNConfig4(config, pt.getTarget())
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
+ ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *BaseATNConfig
+ if collectPredicates && ((pt.isCtxDependent && inContext) || !pt.isCtxDependent) {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewBaseATNConfig4(config, pt.getTarget())
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
+ fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
+ }
+ returnState := t.followState
+ newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
+ return NewBaseATNConfig1(config, t.getTarget(), newContext)
+}
+
+func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet {
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModeGetAlts(altsets)
+}
+
+// Sam pointed out a problem with the previous definition, v3, of
+// ambiguous states. If we have another state associated with conflicting
+// alternatives, we should keep going. For example, the following grammar
+//
+// s : (ID | ID ID?) ''
+//
+// When the ATN simulation reaches the state before '', it has a DFA
+// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
+// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node
+// because alternative to has another way to continue, via [6|2|[]].
+// The key is that we have a single state that has config's only associated
+// with a single alternative, 2, and crucially the state transitions
+// among the configurations are all non-epsilon transitions. That means
+// we don't consider any conflicts that include alternative 2. So, we
+// ignore the conflict between alts 1 and 2. We ignore a set of
+// conflicting alts when there is an intersection with an alternative
+// associated with a single alt state in the state&rarrconfig-list map.
+//
+// It's also the case that we might have two conflicting configurations but
+// also a 3rd nonconflicting configuration for a different alternative:
+// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
+//
+// a : A | A | A B
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not
+// stop working on p state. In the previous example, we're concerned
+// with states associated with the conflicting alternatives. Here alt
+// 3 is not associated with the conflicting configs, but since we can continue
+// looking for input reasonably, I don't declare the state done. We
+// ignore a set of conflicting alts when we have an alternative
+// that we still need to pursue.
+//
+
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet {
+ var conflictingAlts *BitSet
+ if configs.GetUniqueAlt() != ATNInvalidAltNumber {
+ conflictingAlts = NewBitSet()
+ conflictingAlts.add(configs.GetUniqueAlt())
+ } else {
+ conflictingAlts = configs.GetConflictingAlts()
+ }
+ return conflictingAlts
+}
+
+func (p *ParserATNSimulator) GetTokenName(t int) string {
+ if t == TokenEOF {
+ return "EOF"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil {
+ if t >= len(p.parser.GetLiteralNames()) {
+ fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ","))
+ // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect
+ } else {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+ }
+
+ return strconv.Itoa(t)
+}
+
+func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
+ return p.GetTokenName(input.LA(1))
+}
+
+// Used for debugging in AdaptivePredict around execATN but I cut
+// it out for clarity now that alg. works well. We can leave p
+// "dead" code for a bit.
+//
+func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
+
+ panic("Not implemented")
+
+ // fmt.Println("dead end configs: ")
+ // var decs = nvae.deadEndConfigs
+ //
+ // for i:=0; i0) {
+ // var t = c.state.GetTransitions()[0]
+ // if t2, ok := t.(*AtomTransition); ok {
+ // trans = "Atom "+ p.GetTokenName(t2.label)
+ // } else if t3, ok := t.(SetTransition); ok {
+ // _, ok := t.(*NotSetTransition)
+ //
+ // var s string
+ // if (ok){
+ // s = "~"
+ // }
+ //
+ // trans = s + "Set " + t3.set
+ // }
+ // }
+ // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
+ // }
+}
+
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException {
+ return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
+}
+
+func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
+ alt := ATNInvalidAltNumber
+ for _, c := range configs.GetItems() {
+ if alt == ATNInvalidAltNumber {
+ alt = c.GetAlt() // found first alt
+ } else if c.GetAlt() != alt {
+ return ATNInvalidAltNumber
+ }
+ }
+ return alt
+}
+
+//
+// Add an edge to the DFA, if possible. This method calls
+// {@link //addDFAState} to ensure the {@code to} state is present in the
+// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
+// range of edges that can be represented in the DFA tables, p method
+// returns without adding the edge to the DFA.
+//
+// If {@code to} is {@code nil}, p method returns {@code nil}.
+// Otherwise, p method returns the {@link DFAState} returned by calling
+// {@link //addDFAState} for the {@code to} state.
+//
+// @param dfa The DFA
+// @param from The source state for the edge
+// @param t The input symbol
+// @param to The target state for the edge
+//
+// @return If {@code to} is {@code nil}, p method returns {@code nil}
+// otherwise p method returns the result of calling {@link //addDFAState}
+// on {@code to}
+//
+func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
+ if ParserATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
+ }
+ if to == nil {
+ return nil
+ }
+ to = p.addDFAState(dfa, to) // used existing if possible not incoming
+ if from == nil || t < -1 || t > p.atn.maxTokenType {
+ return to
+ }
+ if from.edges == nil {
+ from.edges = make([]*DFAState, p.atn.maxTokenType+1+1)
+ }
+ from.edges[t+1] = to // connect
+
+ if ParserATNSimulatorDebug {
+ var names []string
+ if p.parser != nil {
+ names = p.parser.GetLiteralNames()
+ }
+
+ fmt.Println("DFA=\n" + dfa.String(names, nil))
+ }
+ return to
+}
+
+//
+// Add state {@code D} to the DFA if it is not already present, and return
+// the actual instance stored in the DFA. If a state equivalent to {@code D}
+// is already in the DFA, the existing state is returned. Otherwise p
+// method returns {@code D} after adding it to the DFA.
+//
+// If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and
+// does not change the DFA.
+//
+// @param dfa The dfa
+// @param D The DFA state to add
+// @return The state stored in the DFA. This will be either the existing
+// state if {@code D} is already in the DFA, or {@code D} itself if the
+// state was not already present.
+//
+func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
+ if d == ATNSimulatorError {
+ return d
+ }
+ hash := d.hash()
+ existing, ok := dfa.getState(hash)
+ if ok {
+ return existing
+ }
+ d.stateNumber = dfa.numStates()
+ if !d.configs.ReadOnly() {
+ d.configs.OptimizeConfigs(p.BaseATNSimulator)
+ d.configs.SetReadOnly(true)
+ }
+ dfa.setState(hash, d)
+ if ParserATNSimulatorDebug {
+ fmt.Println("adding NewDFA state: " + d.String())
+ }
+ return d
+}
+
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
+
+// If context sensitive parsing, we know it's ambiguity not conflict//
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
new file mode 100644
index 00000000000..49cd10c5ffc
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
@@ -0,0 +1,362 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "reflect"
+ "strconv"
+)
+
+type ParserRuleContext interface {
+ RuleContext
+
+ SetException(RecognitionException)
+
+ AddTokenNode(token Token) *TerminalNodeImpl
+ AddErrorNode(badToken Token) *ErrorNodeImpl
+
+ EnterRule(listener ParseTreeListener)
+ ExitRule(listener ParseTreeListener)
+
+ SetStart(Token)
+ GetStart() Token
+
+ SetStop(Token)
+ GetStop() Token
+
+ AddChild(child RuleContext) RuleContext
+ RemoveLastChild()
+}
+
+type BaseParserRuleContext struct {
+ *BaseRuleContext
+
+ start, stop Token
+ exception RecognitionException
+ children []Tree
+}
+
+func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
+ prc := new(BaseParserRuleContext)
+
+ prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = -1
+ // * If we are debugging or building a parse tree for a Visitor,
+ // we need to track all of the tokens and rule invocations associated
+ // with prc rule's context. This is empty for parsing w/o tree constr.
+ // operation because we don't the need to track the details about
+ // how we parse prc rule.
+ // /
+ prc.children = nil
+ prc.start = nil
+ prc.stop = nil
+ // The exception that forced prc rule to return. If the rule successfully
+ // completed, prc is {@code nil}.
+ prc.exception = nil
+
+ return prc
+}
+
+func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
+ prc.exception = e
+}
+
+func (prc *BaseParserRuleContext) GetChildren() []Tree {
+ return prc.children
+}
+
+func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
+ // from RuleContext
+ prc.parentCtx = ctx.parentCtx
+ prc.invokingState = ctx.invokingState
+ prc.children = nil
+ prc.start = ctx.start
+ prc.stop = ctx.stop
+}
+
+func (prc *BaseParserRuleContext) GetText() string {
+ if prc.GetChildCount() == 0 {
+ return ""
+ }
+
+ var s string
+ for _, child := range prc.children {
+ s += child.(ParseTree).GetText()
+ }
+
+ return s
+}
+
+// Double dispatch methods for listeners
+func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
+}
+
+func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
+}
+
+// * Does not set parent link other add methods do that///
+func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+// * Used by EnterOuterAlt to toss out a RuleContext previously added as
+// we entered a rule. If we have // label, we will need to remove
+// generic ruleContext object.
+// /
+func (prc *BaseParserRuleContext) RemoveLastChild() {
+ if prc.children != nil && len(prc.children) > 0 {
+ prc.children = prc.children[0 : len(prc.children)-1]
+ }
+}
+
+func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
+
+ node := NewTerminalNodeImpl(token)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+
+}
+
+func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
+ node := NewErrorNodeImpl(badToken)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+}
+
+func (prc *BaseParserRuleContext) GetChild(i int) Tree {
+ if prc.children != nil && len(prc.children) >= i {
+ return prc.children[i]
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
+ if childType == nil {
+ return prc.GetChild(i).(RuleContext)
+ }
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if reflect.TypeOf(child) == childType {
+ if i == 0 {
+ return child.(RuleContext)
+ }
+
+ i--
+ }
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
+ return TreesStringTree(prc, ruleNames, recog)
+}
+
+func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
+ return visitor.VisitChildren(prc)
+}
+
+func (prc *BaseParserRuleContext) SetStart(t Token) {
+ prc.start = t
+}
+
+func (prc *BaseParserRuleContext) GetStart() Token {
+ return prc.start
+}
+
+func (prc *BaseParserRuleContext) SetStop(t Token) {
+ prc.stop = t
+}
+
+func (prc *BaseParserRuleContext) GetStop() Token {
+ return prc.stop
+}
+
+func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if c2, ok := child.(TerminalNode); ok {
+ if c2.GetSymbol().GetTokenType() == ttype {
+ if i == 0 {
+ return c2
+ }
+
+ i--
+ }
+ }
+ }
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
+ if prc.children == nil {
+ return make([]TerminalNode, 0)
+ }
+
+ tokens := make([]TerminalNode, 0)
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if tchild, ok := child.(TerminalNode); ok {
+ if tchild.GetSymbol().GetTokenType() == ttype {
+ tokens = append(tokens, tchild)
+ }
+ }
+ }
+
+ return tokens
+}
+
+func (prc *BaseParserRuleContext) GetPayload() interface{} {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
+ if prc.children == nil || i < 0 || i >= len(prc.children) {
+ return nil
+ }
+
+ j := -1 // what element have we found with ctxType?
+ for _, o := range prc.children {
+
+ childType := reflect.TypeOf(o)
+
+ if childType.Implements(ctxType) {
+ j++
+ if j == i {
+ return o.(RuleContext)
+ }
+ }
+ }
+ return nil
+}
+
+// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
+// check for convertibility
+
+func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
+ return prc.getChild(ctxType, i)
+}
+
+func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
+ if prc.children == nil {
+ return make([]RuleContext, 0)
+ }
+
+ contexts := make([]RuleContext, 0)
+
+ for _, child := range prc.children {
+ childType := reflect.TypeOf(child)
+
+ if childType.ConvertibleTo(ctxType) {
+ contexts = append(contexts, child.(RuleContext))
+ }
+ }
+ return contexts
+}
+
+func (prc *BaseParserRuleContext) GetChildCount() int {
+ if prc.children == nil {
+ return 0
+ }
+
+ return len(prc.children)
+}
+
+func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
+ if prc.start == nil || prc.stop == nil {
+ return TreeInvalidInterval
+ }
+
+ return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
+}
+
+//need to manage circular dependencies, so export now
+
+// Print out a whole tree, not just a node, in LISP format
+// (root child1 .. childN). Print just a node if b is a leaf.
+//
+
+func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
+
+ var p ParserRuleContext = prc
+ s := "["
+ for p != nil && p != stop {
+ if ruleNames == nil {
+ if !p.IsEmpty() {
+ s += strconv.Itoa(p.GetInvokingState())
+ }
+ } else {
+ ri := p.GetRuleIndex()
+ var ruleName string
+ if ri >= 0 && ri < len(ruleNames) {
+ ruleName = ruleNames[ri]
+ } else {
+ ruleName = strconv.Itoa(ri)
+ }
+ s += ruleName
+ }
+ if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
+ s += " "
+ }
+ pi := p.GetParent()
+ if pi != nil {
+ p = pi.(ParserRuleContext)
+ } else {
+ p = nil
+ }
+ }
+ s += "]"
+ return s
+}
+
+var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
+
+type InterpreterRuleContext interface {
+ ParserRuleContext
+}
+
+type BaseInterpreterRuleContext struct {
+ *BaseParserRuleContext
+}
+
+func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
+
+ prc := new(BaseInterpreterRuleContext)
+
+ prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = ruleIndex
+
+ return prc
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
new file mode 100644
index 00000000000..99acb333fa2
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
@@ -0,0 +1,756 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// Represents {@code $} in local context prediction, which means wildcard.
+// {@code//+x =//}.
+// /
+const (
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// Represents {@code $} in an array in full context mode, when {@code $}
+// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
+// {@code $} = {@link //EmptyReturnState}.
+// /
+
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+type PredictionContext interface {
+ hash() int
+ GetParent(int) PredictionContext
+ getReturnState(int) int
+ equals(PredictionContext) bool
+ length() int
+ isEmpty() bool
+ hasEmptyPath() bool
+ String() string
+}
+
+type BasePredictionContext struct {
+ cachedHash int
+}
+
+func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
+ pc := new(BasePredictionContext)
+ pc.cachedHash = cachedHash
+
+ return pc
+}
+
+func (b *BasePredictionContext) isEmpty() bool {
+ return false
+}
+
+func calculateHash(parent PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+func calculateEmptyHash() int {
+ h := murmurInit(1)
+ return murmurFinish(h, 0)
+}
+
+// Used to cache {@link BasePredictionContext} objects. Its used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+
+type PredictionContextCache struct {
+ cache map[PredictionContext]PredictionContext
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ t := new(PredictionContextCache)
+ t.cache = make(map[PredictionContext]PredictionContext)
+ return t
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a Newcontext to the cache.
+// Protect shared cache from unsafe thread access.
+//
+func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
+ if ctx == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY
+ }
+ existing := p.cache[ctx]
+ if existing != nil {
+ return existing
+ }
+ p.cache[ctx] = ctx
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
+ return p.cache[ctx]
+}
+
+func (p *PredictionContextCache) length() int {
+ return len(p.cache)
+}
+
+type SingletonPredictionContext interface {
+ PredictionContext
+}
+
+type BaseSingletonPredictionContext struct {
+ *BasePredictionContext
+
+ parentCtx PredictionContext
+ returnState int
+}
+
+func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
+
+ s := new(BaseSingletonPredictionContext)
+ s.BasePredictionContext = NewBasePredictionContext(37)
+
+ if parent != nil {
+ s.cachedHash = calculateHash(parent, returnState)
+ } else {
+ s.cachedHash = calculateEmptyHash()
+ }
+
+ s.parentCtx = parent
+ s.returnState = returnState
+
+ return s
+}
+
+func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func (b *BaseSingletonPredictionContext) length() int {
+ return 1
+}
+
+func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
+ return b.parentCtx
+}
+
+func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
+ return b.returnState
+}
+
+func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
+ return b.returnState == BasePredictionContextEmptyReturnState
+}
+
+func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
+ return false
+ } else if b.hash() != other.hash() {
+ return false // can't be same if hash is different
+ }
+
+ otherP := other.(*BaseSingletonPredictionContext)
+
+ if b.returnState != other.getReturnState(0) {
+ return false
+ } else if b.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return b.parentCtx.equals(otherP.parentCtx)
+}
+
+func (b *BaseSingletonPredictionContext) hash() int {
+ h := murmurInit(1)
+
+ if b.parentCtx == nil {
+ return murmurFinish(h, 0)
+ }
+
+ h = murmurUpdate(h, b.parentCtx.hash())
+ h = murmurUpdate(h, b.returnState)
+ return murmurFinish(h, 2)
+}
+
+func (b *BaseSingletonPredictionContext) String() string {
+ var up string
+
+ if b.parentCtx == nil {
+ up = ""
+ } else {
+ up = b.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if b.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(b.returnState)
+ }
+
+ return strconv.Itoa(b.returnState) + " " + up
+}
+
+var BasePredictionContextEMPTY = NewEmptyPredictionContext()
+
+type EmptyPredictionContext struct {
+ *BaseSingletonPredictionContext
+}
+
+func NewEmptyPredictionContext() *EmptyPredictionContext {
+
+ p := new(EmptyPredictionContext)
+
+ p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
+
+ return p
+}
+
+func (e *EmptyPredictionContext) isEmpty() bool {
+ return true
+}
+
+func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
+ return nil
+}
+
+func (e *EmptyPredictionContext) getReturnState(index int) int {
+ return e.returnState
+}
+
+func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
+ return e == other
+}
+
+func (e *EmptyPredictionContext) String() string {
+ return "$"
+}
+
+type ArrayPredictionContext struct {
+ *BasePredictionContext
+
+ parents []PredictionContext
+ returnStates []int
+}
+
+func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+
+ c := new(ArrayPredictionContext)
+ c.BasePredictionContext = NewBasePredictionContext(37)
+
+ for i := range parents {
+ c.cachedHash += calculateHash(parents[i], returnStates[i])
+ }
+
+ c.parents = parents
+ c.returnStates = returnStates
+
+ return c
+}
+
+func (a *ArrayPredictionContext) GetReturnStates() []int {
+ return a.returnStates
+}
+
+func (a *ArrayPredictionContext) hasEmptyPath() bool {
+ return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) isEmpty() bool {
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return a.returnStates[0] == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) length() int {
+ return len(a.returnStates)
+}
+
+func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
+ return a.parents[index]
+}
+
+func (a *ArrayPredictionContext) getReturnState(index int) int {
+ return a.returnStates[index]
+}
+
+func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
+ if _, ok := other.(*ArrayPredictionContext); !ok {
+ return false
+ } else if a.cachedHash != other.hash() {
+ return false // can't be same if hash is different
+ } else {
+ otherP := other.(*ArrayPredictionContext)
+ return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
+ }
+}
+
+func (a *ArrayPredictionContext) hash() int {
+ h := murmurInit(1)
+
+ for _, p := range a.parents {
+ h = murmurUpdate(h, p.hash())
+ }
+
+ for _, r := range a.returnStates {
+ h = murmurUpdate(h, r)
+ }
+
+ return murmurFinish(h, 2 * len(a.parents))
+}
+
+func (a *ArrayPredictionContext) String() string {
+ if a.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(a.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if a.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(a.returnStates[i])
+ if a.parents[i] != nil {
+ s = s + " " + a.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+
+ return s + "]"
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
+ if outerContext == nil {
+ outerContext = RuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ // share same graph if both same
+ if a == b {
+ return a
+ }
+
+ ac, ok1 := a.(*BaseSingletonPredictionContext)
+ bc, ok2 := b.(*BaseSingletonPredictionContext)
+
+ if ok1 && ok2 {
+ return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as// wildcard
+ if rootIsWildcard {
+ if _, ok := a.(*EmptyPredictionContext); ok {
+ return a
+ }
+ if _, ok := b.(*EmptyPredictionContext); ok {
+ return b
+ }
+ }
+ // convert singleton so both are arrays to normalize
+ if _, ok := a.(*BaseSingletonPredictionContext); ok {
+ a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+ }
+ if _, ok := b.(*BaseSingletonPredictionContext); ok {
+ b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+ }
+ return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
+}
+
+//
+// Merge two {@link SingletonBasePredictionContext} instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+// Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A Newroot node is created to point to the
+// merged parents.
+//
+//
+// Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+// Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.hash(), b.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.hash(), a.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent == a.parentCtx {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent == b.parentCtx {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array
+ // Newjoined parent so create Newsingleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent PredictionContext
+ if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), apc)
+ }
+ return apc
+}
+
+//
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+// Local-Context Merges
+//
+// These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+// Special case of last merge if local context.
+//
+//
+// Full-Context Merges
+//
+// These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+// Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
+ if rootIsWildcard {
+ if a == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+//
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+// Different tops, different parents.
+//
+//
+// Shared top, same parents.
+//
+//
+// Shared top, different parents.
+//
+//
+// Shared top, all shared parents.
+//
+//
+// Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+// /
+func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.hash(), b.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.hash(), a.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: track whether this is possible above during merge sort for speed
+ if M == a {
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), a)
+ }
+ return a
+ }
+ if M == b {
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), b)
+ }
+ return b
+ }
+ combineCommonParents(mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), M)
+ }
+ return M
+}
+
+//
+// Make pass over all M {@code parents} merge any {@code equals()}
+// ones.
+// /
+func combineCommonParents(parents []PredictionContext) {
+ uniqueParents := make(map[PredictionContext]PredictionContext)
+
+ for p := 0; p < len(parents); p++ {
+ parent := parents[p]
+ if uniqueParents[parent] == nil {
+ uniqueParents[parent] = parent
+ }
+ }
+ for q := 0; q < len(parents); q++ {
+ parents[q] = uniqueParents[parents[q]]
+ }
+}
+
+func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
+
+ if context.isEmpty() {
+ return context
+ }
+ existing := visited[context]
+ if existing != nil {
+ return existing
+ }
+ existing = contextCache.Get(context)
+ if existing != nil {
+ visited[context] = existing
+ return existing
+ }
+ changed := false
+ parents := make([]PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || parent != context.GetParent(i) {
+ if !changed {
+ parents = make([]PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited[context] = context
+ return context
+ }
+ var updated PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited[updated] = updated
+ visited[context] = updated
+
+ return updated
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
new file mode 100644
index 00000000000..15718f912bc
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
@@ -0,0 +1,553 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ //
+ // The SLL(*) prediction mode. This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // {@link //LL} prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the {@link //SLL} prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful {@link //LL} prediction abilities to complete successfully.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+ //
+ // The LL(*) prediction mode. This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // Report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+ //
+ // The LL(*) prediction mode with exact ambiguity detection. In addition to
+ // the correctness guarantees provided by the {@link //LL} prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+//
+// Computes the SLL prediction termination condition.
+//
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases.
+//
+//
+// The usual SLL+LL fallback upon SLL conflict
+// Pure SLL without LL fallback
+//
+//
+// COMBINED SLL+LL PARSING
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative (e.g.
+// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// HEURISTIC
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }
+//
+// When the ATN simulation reaches the state before {@code ''}, it has a
+// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
+// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
+// processing this node because alternative to has another way to continue,
+// via {@code [6|2|[]]}.
+//
+// It also let's us continue for this rule:
+//
+// {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// PURE SLL PARSING
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// PREDICATES IN SLL+LL PARSING
+//
+// SLL decisions don't evaluate predicates until after they reach DFA stop
+// states because they need to create the DFA cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
+// semantic predicate contexts so we might see two configurations like the
+// following.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p})}
+//
+// Before testing these configurations against others, we have to merge
+// {@code x} and {@code x'} (without modifying the existing configurations).
+// For example, we test {@code (x+x')==x''} when looking for conflicts in
+// the following configurations.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
+//
+// If the configuration set has predicates (as indicated by
+// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
+// the configurations to strip out all of the predicates so that a standard
+// {@link ATNConfigSet} will merge everything ignoring predicates.
+//
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.HasSemanticContext() {
+ // dup configs, tossing out semantic predicates
+ dup := NewBaseATNConfigSet(false)
+ for _, c := range configs.GetItems() {
+
+ // NewBaseATNConfig({semanticContext:}, c)
+ c = NewBaseATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar preds
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// Checks if any configuration in {@code configs} is in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if any configuration in {@code configs} is in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// Checks if all configurations in {@code configs} are in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if all configurations in {@code configs} are in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
+
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+//
+// Full LL prediction termination.
+//
+// Can we stop looking ahead during ATN simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations {@code C}, into
+// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
+// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
+// and {@code (s, j, ctx, _)} for {@code i!=j}.
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
+// {@code C} holding {@code s} and {@code ctx} fixed.
+//
+// Or in pseudo-code, for each configuration {@code c} in {@code C}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+//
+// The values in {@code map} are the set of {@code A_s,ctx} sets.
+//
+// If {@code |A_s,ctx|=1} then there is no conflict associated with
+// {@code s} and {@code ctx}.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// more lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// CONFLICTING CONFIGS
+//
+// Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
+// when {@code i!=j} but {@code x=x'}. Because we merge all
+// {@code (s, i, _)} configurations together, that means that there are at
+// most {@code n} configurations associated with state {@code s} for
+// {@code n} possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts {@code x} and
+// {@code x'}. Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either {@code x} or
+// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
+// is the superset, then {@code i} is the only possible prediction since the
+// others resolve to {@code min(i)} as well. However, if {@code x} is
+// associated with {@code j>i} then at least one stack configuration for
+// {@code j} is not in conflict with alternative {@code i}. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing a equality check between {@code x} and
+// {@code x'} that lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// CONTINUE/STOP RULE
+//
+// Continue if union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
+// alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// CASES
+//
+//
+//
+// no conflicts and more than 1 alternative in set => continue
+//
+// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
+// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
+// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1,3}} => continue
+//
+//
+// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
+// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1}} => stop and predict 1
+//
+// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {1}} = {@code {1}} => stop and predict 1, can announce
+// ambiguity {@code {1,2}}
+//
+// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
+// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {2}} = {@code {1,2}} => continue
+//
+// {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
+// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {3}} = {@code {1,3}} => continue
+//
+//
+//
+// EXACT AMBIGUITY DETECTION
+//
+// If all states Report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set.
+//
+// |A_i |>1 and
+// A_i = A_j for all i , j .
+//
+// In other words, we continue examining lookahead until all {@code A_i}
+// have more than one alternative and all {@code A_i} are the same. If
+// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
+// because the resolved set is {@code {1}}. To determine what the real
+// ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like
+// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+//
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+//
+// Determines if every alternative subset in {@code altsets} contains more
+// than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every {@link BitSet} in {@code altsets} has
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+//
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+//
+// Determines if any single alternative subset in {@code altsets} contains
+// exactly one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
+//
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+//
+// Determines if any single alternative subset in {@code altsets} contains
+// more than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+//
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+//
+// Determines if every alternative subset in {@code altsets} is equivalent.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every member of {@code altsets} is equal to the
+// others, otherwise {@code false}
+//
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+//
+// Returns the unique alternative predicted by all alternative subsets in
+// {@code altsets}. If no such alternative exists, this method returns
+// {@link ATN//INVALID_ALT_NUMBER}.
+//
+// @param altsets a collection of alternative subsets
+//
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// Gets the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each {@link BitSet}
+// in {@code altsets}.
+//
+// @param altsets a collection of alternative subsets
+// @return the set of represented alternatives in {@code altsets}
+//
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+//
+// This func gets the conflicting alt subsets from a configuration set.
+// For each configuration {@code c} in {@code configs}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+//
+func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
+ configToAlts := make(map[int]*BitSet)
+
+ for _, c := range configs.GetItems() {
+ key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
+
+ alts, ok := configToAlts[key]
+ if !ok {
+ alts = NewBitSet()
+ configToAlts[key] = alts
+ }
+ alts.add(c.GetAlt())
+ }
+
+ values := make([]*BitSet, 0, 10)
+ for _, v := range configToAlts {
+ values = append(values, v)
+ }
+ return values
+}
+
+//
+// Get a map from state to alt subset from a configuration set. For each
+// configuration {@code c} in {@code configs}:
+//
+//
+// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
+//
+//
+func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.GetItems() {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
new file mode 100644
index 00000000000..9ea9f6f5943
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
@@ -0,0 +1,217 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strings"
+
+ "strconv"
+)
+
+type Recognizer interface {
+ GetLiteralNames() []string
+ GetSymbolicNames() []string
+ GetRuleNames() []string
+
+ Sempred(RuleContext, int, int) bool
+ Precpred(RuleContext, int) bool
+
+ GetState() int
+ SetState(int)
+ Action(RuleContext, int, int)
+ AddErrorListener(ErrorListener)
+ RemoveErrorListeners()
+ GetATN() *ATN
+ GetErrorListenerDispatch() ErrorListener
+}
+
+type BaseRecognizer struct {
+ listeners []ErrorListener
+ state int
+
+ RuleNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ GrammarFileName string
+}
+
+func NewBaseRecognizer() *BaseRecognizer {
+ rec := new(BaseRecognizer)
+ rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
+ rec.state = -1
+ return rec
+}
+
+var tokenTypeMapCache = make(map[string]int)
+var ruleIndexMapCache = make(map[string]int)
+
+func (b *BaseRecognizer) checkVersion(toolVersion string) {
+ runtimeVersion := "4.7.2"
+ if runtimeVersion != toolVersion {
+ fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
+ }
+}
+
+func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
+ panic("action not implemented on Recognizer!")
+}
+
+func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
+ b.listeners = append(b.listeners, listener)
+}
+
+func (b *BaseRecognizer) RemoveErrorListeners() {
+ b.listeners = make([]ErrorListener, 0)
+}
+
+func (b *BaseRecognizer) GetRuleNames() []string {
+ return b.RuleNames
+}
+
+func (b *BaseRecognizer) GetTokenNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetSymbolicNames() []string {
+ return b.SymbolicNames
+}
+
+func (b *BaseRecognizer) GetLiteralNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetState() int {
+ return b.state
+}
+
+func (b *BaseRecognizer) SetState(v int) {
+ b.state = v
+}
+
+//func (b *Recognizer) GetTokenTypeMap() {
+// var tokenNames = b.GetTokenNames()
+// if (tokenNames==nil) {
+// panic("The current recognizer does not provide a list of token names.")
+// }
+// var result = tokenTypeMapCache[tokenNames]
+// if(result==nil) {
+// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
+// result.EOF = TokenEOF
+// tokenTypeMapCache[tokenNames] = result
+// }
+// return result
+//}
+
+// Get a map from rule names to rule indexes.
+//
+// Used for XPath and tree pattern compilation.
+//
+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
+
+ panic("Method not defined!")
+ // var ruleNames = b.GetRuleNames()
+ // if (ruleNames==nil) {
+ // panic("The current recognizer does not provide a list of rule names.")
+ // }
+ //
+ // var result = ruleIndexMapCache[ruleNames]
+ // if(result==nil) {
+ // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
+ // ruleIndexMapCache[ruleNames] = result
+ // }
+ // return result
+}
+
+func (b *BaseRecognizer) GetTokenType(tokenName string) int {
+ panic("Method not defined!")
+ // var ttype = b.GetTokenTypeMap()[tokenName]
+ // if (ttype !=nil) {
+ // return ttype
+ // } else {
+ // return TokenInvalidType
+ // }
+}
+
+//func (b *Recognizer) GetTokenTypeMap() map[string]int {
+// Vocabulary vocabulary = getVocabulary()
+//
+// Synchronized (tokenTypeMapCache) {
+// Map result = tokenTypeMapCache.Get(vocabulary)
+// if (result == null) {
+// result = new HashMap()
+// for (int i = 0; i < GetATN().maxTokenType; i++) {
+// String literalName = vocabulary.getLiteralName(i)
+// if (literalName != null) {
+// result.put(literalName, i)
+// }
+//
+// String symbolicName = vocabulary.GetSymbolicName(i)
+// if (symbolicName != null) {
+// result.put(symbolicName, i)
+// }
+// }
+//
+// result.put("EOF", Token.EOF)
+// result = Collections.unmodifiableMap(result)
+// tokenTypeMapCache.put(vocabulary, result)
+// }
+//
+// return result
+// }
+//}
+
+// What is the error header, normally line/character position information?//
+func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
+ line := e.GetOffendingToken().GetLine()
+ column := e.GetOffendingToken().GetColumn()
+ return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
+}
+
+// How should a token be displayed in an error message? The default
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of {@link ANTLRErrorStrategy} may provide a similar
+// feature when necessary. For example, see
+// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
+//
+func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+
+ return "'" + s + "'"
+}
+
+func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
+ return NewProxyErrorListener(b.listeners)
+}
+
+// subclass needs to override these if there are sempreds or actions
+// that the ATN interp needs to execute
+func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
+ return true
+}
+
+func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
+ return true
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
new file mode 100644
index 00000000000..600cf8c0625
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// A rule context is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack. We actually carry no
+// information about the rule associated with b context (except
+// when parsing). We keep only the state number of the invoking state from
+// the ATN submachine that invoked b. Contrast b with the s
+// pointer inside ParserRuleContext that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the subclass
+// ParserRuleContext.
+//
+// @see ParserRuleContext
+//
+
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
+
+type BaseRuleContext struct {
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
+}
+
+func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
+
+ rn := new(BaseRuleContext)
+
+ // What context invoked b rule?
+ rn.parentCtx = parent
+
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ rn.invokingState = -1
+ } else {
+ rn.invokingState = invokingState
+ }
+
+ return rn
+}
+
+func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
+ return b
+}
+
+func (b *BaseRuleContext) SetParent(v Tree) {
+ if v == nil {
+ b.parentCtx = nil
+ } else {
+ b.parentCtx = v.(RuleContext)
+ }
+}
+
+func (b *BaseRuleContext) GetInvokingState() int {
+ return b.invokingState
+}
+
+func (b *BaseRuleContext) SetInvokingState(t int) {
+ b.invokingState = t
+}
+
+func (b *BaseRuleContext) GetRuleIndex() int {
+ return b.RuleIndex
+}
+
+func (b *BaseRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
+
+// A context is empty if there is no invoking state meaning nobody call
+// current context.
+func (b *BaseRuleContext) IsEmpty() bool {
+ return b.invokingState == -1
+}
+
+// Return the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of b
+// method.
+//
+
+func (b *BaseRuleContext) GetParent() Tree {
+ return b.parentCtx
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
new file mode 100644
index 00000000000..49205a16240
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
@@ -0,0 +1,455 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A tree structure used to record the semantic context in which
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
+//
+//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
+// {@link SemanticContext} within the scope of this outer class.
+//
+
+type SemanticContext interface {
+ comparable
+
+ evaluate(parser Recognizer, outerContext RuleContext) bool
+ evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
+
+ hash() int
+ String() string
+}
+
+func SemanticContextandContext(a, b SemanticContext) SemanticContext {
+ if a == nil || a == SemanticContextNone {
+ return b
+ }
+ if b == nil || b == SemanticContextNone {
+ return a
+ }
+ result := NewAND(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+func SemanticContextorContext(a, b SemanticContext) SemanticContext {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+ if a == SemanticContextNone || b == SemanticContextNone {
+ return SemanticContextNone
+ }
+ result := NewOR(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+type Predicate struct {
+ ruleIndex int
+ predIndex int
+ isCtxDependent bool
+}
+
+func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
+ p := new(Predicate)
+
+ p.ruleIndex = ruleIndex
+ p.predIndex = predIndex
+ p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ return p
+}
+
+//The default {@link SemanticContext}, which is semantically equivalent to
+//a predicate of the form {@code {true}?}.
+
+var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
+
+func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ return p
+}
+
+func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+
+ var localctx RuleContext
+
+ if p.isCtxDependent {
+ localctx = outerContext
+ }
+
+ return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
+}
+
+func (p *Predicate) equals(other interface{}) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*Predicate); !ok {
+ return false
+ } else {
+ return p.ruleIndex == other.(*Predicate).ruleIndex &&
+ p.predIndex == other.(*Predicate).predIndex &&
+ p.isCtxDependent == other.(*Predicate).isCtxDependent
+ }
+}
+
+func (p *Predicate) hash() int {
+ return p.ruleIndex*43 + p.predIndex*47
+}
+
+func (p *Predicate) String() string {
+ return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
+}
+
+type PrecedencePredicate struct {
+ precedence int
+}
+
+func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
+
+ p := new(PrecedencePredicate)
+ p.precedence = precedence
+
+ return p
+}
+
+func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ return parser.Precpred(outerContext, p.precedence)
+}
+
+func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ if parser.Precpred(outerContext, p.precedence) {
+ return SemanticContextNone
+ }
+
+ return nil
+}
+
+func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
+ return p.precedence - other.precedence
+}
+
+func (p *PrecedencePredicate) equals(other interface{}) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*PrecedencePredicate); !ok {
+ return false
+ } else {
+ return p.precedence == other.(*PrecedencePredicate).precedence
+ }
+}
+
+func (p *PrecedencePredicate) hash() int {
+ return p.precedence * 51
+}
+
+func (p *PrecedencePredicate) String() string {
+ return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
+}
+
+func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate {
+ result := make([]*PrecedencePredicate, 0)
+
+ for _, v := range set.values() {
+ if c2, ok := v.(*PrecedencePredicate); ok {
+ result = append(result, c2)
+ }
+ }
+
+ return result
+}
+
+// A semantic context which is true whenever none of the contained contexts
+// is false.`
+
+type AND struct {
+ opnds []SemanticContext
+}
+
+func NewAND(a, b SemanticContext) *AND {
+
+ operands := NewSet(nil, nil)
+ if aa, ok := a.(*AND); ok {
+ for _, o := range aa.opnds {
+ operands.add(o)
+ }
+ } else {
+ operands.add(a)
+ }
+
+ if ba, ok := b.(*AND); ok {
+ for _, o := range ba.opnds {
+ operands.add(o)
+ }
+ } else {
+ operands.add(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence < reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.add(reduced)
+ }
+
+ vs := operands.values()
+ opnds := make([]SemanticContext, len(vs))
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
+
+ and := new(AND)
+ and.opnds = opnds
+
+ return and
+}
+
+func (a *AND) equals(other interface{}) bool {
+ if a == other {
+ return true
+ } else if _, ok := other.(*AND); !ok {
+ return false
+ } else {
+ for i, v := range other.(*AND).opnds {
+ if !a.opnds[i].equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// {@inheritDoc}
+//
+//
+// The evaluation of predicates by a context is short-circuiting, but
+// unordered.
+//
+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(a.opnds); i++ {
+ if !a.opnds[i].evaluate(parser, outerContext) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+
+ for i := 0; i < len(a.opnds); i++ {
+ context := a.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == nil {
+ // The AND context is false if any element is false
+ return nil
+ } else if evaluated != SemanticContextNone {
+ // Reduce the result by Skipping true elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return a
+ }
+
+ if len(operands) == 0 {
+ // all elements were true, so the AND context is true
+ return SemanticContextNone
+ }
+
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextandContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (a *AND) hash() int {
+ h := murmurInit(37) // Init with a value different from OR
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (a *OR) hash() int {
+ h := murmurInit(41) // Init with a value different from AND
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (a *AND) String() string {
+ s := ""
+
+ for _, o := range a.opnds {
+ s += "&& " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
+
+//
+// A semantic context which is true whenever at least one of the contained
+// contexts is true.
+//
+
+type OR struct {
+ opnds []SemanticContext
+}
+
+func NewOR(a, b SemanticContext) *OR {
+
+ operands := NewSet(nil, nil)
+ if aa, ok := a.(*OR); ok {
+ for _, o := range aa.opnds {
+ operands.add(o)
+ }
+ } else {
+ operands.add(a)
+ }
+
+ if ba, ok := b.(*OR); ok {
+ for _, o := range ba.opnds {
+ operands.add(o)
+ }
+ } else {
+ operands.add(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence > reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.add(reduced)
+ }
+
+ vs := operands.values()
+
+ opnds := make([]SemanticContext, len(vs))
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
+
+ o := new(OR)
+ o.opnds = opnds
+
+ return o
+}
+
+func (o *OR) equals(other interface{}) bool {
+ if o == other {
+ return true
+ } else if _, ok := other.(*OR); !ok {
+ return false
+ } else {
+ for i, v := range other.(*OR).opnds {
+ if !o.opnds[i].equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// The evaluation of predicates by o context is short-circuiting, but
+// unordered.
+//
+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(o.opnds); i++ {
+ if o.opnds[i].evaluate(parser, outerContext) {
+ return true
+ }
+ }
+ return false
+}
+
+func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+ for i := 0; i < len(o.opnds); i++ {
+ context := o.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == SemanticContextNone {
+ // The OR context is true if any element is true
+ return SemanticContextNone
+ } else if evaluated != nil {
+ // Reduce the result by Skipping false elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return o
+ }
+ if len(operands) == 0 {
+ // all elements were false, so the OR context is false
+ return nil
+ }
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextorContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (o *OR) String() string {
+ s := ""
+
+ for _, o := range o.opnds {
+ s += "|| " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
new file mode 100644
index 00000000000..2d8e99095d3
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
@@ -0,0 +1,210 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type TokenSourceCharStreamPair struct {
+ tokenSource TokenSource
+ charStream CharStream
+}
+
+// A token has properties: text, type, line, character position in the line
+// (so we can ignore tabs), token channel, index, and source from which
+// we obtained this token.
+
+type Token interface {
+ GetSource() *TokenSourceCharStreamPair
+ GetTokenType() int
+ GetChannel() int
+ GetStart() int
+ GetStop() int
+ GetLine() int
+ GetColumn() int
+
+ GetText() string
+ SetText(s string)
+
+ GetTokenIndex() int
+ SetTokenIndex(v int)
+
+ GetTokenSource() TokenSource
+ GetInputStream() CharStream
+}
+
+type BaseToken struct {
+ source *TokenSourceCharStreamPair
+ tokenType int // token type of the token
+ channel int // The parser ignores everything not on DEFAULT_CHANNEL
+ start int // optional return -1 if not implemented.
+ stop int // optional return -1 if not implemented.
+ tokenIndex int // from 0..n-1 of the token object in the input stream
+ line int // line=1..n of the 1st character
+ column int // beginning of the line at which it occurs, 0..n-1
+ text string // text of the token.
+ readOnly bool
+}
+
+const (
+ TokenInvalidType = 0
+
+ // During lookahead operations, this "token" signifies we hit rule end ATN state
+ // and did not follow it despite needing to.
+ TokenEpsilon = -2
+
+ TokenMinUserTokenType = 1
+
+ TokenEOF = -1
+
+ // All tokens go to the parser (unless Skip() is called in that rule)
+ // on a particular "channel". The parser tunes to a particular channel
+ // so that whitespace etc... can go to the parser on a "hidden" channel.
+
+ TokenDefaultChannel = 0
+
+ // Anything on different channel than DEFAULT_CHANNEL is not parsed
+ // by parser.
+
+ TokenHiddenChannel = 1
+)
+
+func (b *BaseToken) GetChannel() int {
+ return b.channel
+}
+
+func (b *BaseToken) GetStart() int {
+ return b.start
+}
+
+func (b *BaseToken) GetStop() int {
+ return b.stop
+}
+
+func (b *BaseToken) GetLine() int {
+ return b.line
+}
+
+func (b *BaseToken) GetColumn() int {
+ return b.column
+}
+
+func (b *BaseToken) GetTokenType() int {
+ return b.tokenType
+}
+
+func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
+ return b.source
+}
+
+func (b *BaseToken) GetTokenIndex() int {
+ return b.tokenIndex
+}
+
+func (b *BaseToken) SetTokenIndex(v int) {
+ b.tokenIndex = v
+}
+
+func (b *BaseToken) GetTokenSource() TokenSource {
+ return b.source.tokenSource
+}
+
+func (b *BaseToken) GetInputStream() CharStream {
+ return b.source.charStream
+}
+
+type CommonToken struct {
+ *BaseToken
+}
+
+func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
+
+ t := new(CommonToken)
+
+ t.BaseToken = new(BaseToken)
+
+ t.source = source
+ t.tokenType = tokenType
+ t.channel = channel
+ t.start = start
+ t.stop = stop
+ t.tokenIndex = -1
+ if t.source.tokenSource != nil {
+ t.line = source.tokenSource.GetLine()
+ t.column = source.tokenSource.GetCharPositionInLine()
+ } else {
+ t.column = -1
+ }
+ return t
+}
+
+// An empty {@link Pair} which is used as the default value of
+// {@link //source} for tokens that do not have a source.
+
+//CommonToken.EMPTY_SOURCE = [ nil, nil ]
+
+// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
+//
+//
+// If {@code oldToken} is also a {@link CommonToken} instance, the newly
+// constructed token will share a reference to the {@link //text} field and
+// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
+// be assigned the result of calling {@link //GetText}, and {@link //source}
+// will be constructed from the result of {@link Token//GetTokenSource} and
+// {@link Token//GetInputStream}.
+//
+// @param oldToken The token to copy.
+//
+func (c *CommonToken) clone() *CommonToken {
+ t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
+ t.tokenIndex = c.GetTokenIndex()
+ t.line = c.GetLine()
+ t.column = c.GetColumn()
+ t.text = c.GetText()
+ return t
+}
+
+func (c *CommonToken) GetText() string {
+ if c.text != "" {
+ return c.text
+ }
+ input := c.GetInputStream()
+ if input == nil {
+ return ""
+ }
+ n := input.Size()
+ if c.start < n && c.stop < n {
+ return input.GetTextFromInterval(NewInterval(c.start, c.stop))
+ }
+ return ""
+}
+
+func (c *CommonToken) SetText(text string) {
+ c.text = text
+}
+
+func (c *CommonToken) String() string {
+ txt := c.GetText()
+ if txt != "" {
+ txt = strings.Replace(txt, "\n", "\\n", -1)
+ txt = strings.Replace(txt, "\r", "\\r", -1)
+ txt = strings.Replace(txt, "\t", "\\t", -1)
+ } else {
+ txt = ""
+ }
+
+ var ch string
+ if c.channel > 0 {
+ ch = ",channel=" + strconv.Itoa(c.channel)
+ } else {
+ ch = ""
+ }
+
+ return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
+ txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
+ ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
new file mode 100644
index 00000000000..e023978fef4
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenSource interface {
+ NextToken() Token
+ Skip()
+ More()
+ GetLine() int
+ GetCharPositionInLine() int
+ GetInputStream() CharStream
+ GetSourceName() string
+ setTokenFactory(factory TokenFactory)
+ GetTokenFactory() TokenFactory
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
new file mode 100644
index 00000000000..df92c814789
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenStream interface {
+ IntStream
+
+ LT(k int) Token
+
+ Get(index int) Token
+ GetTokenSource() TokenSource
+ SetTokenSource(TokenSource)
+
+ GetAllText() string
+ GetTextFromInterval(*Interval) string
+ GetTextFromRuleContext(RuleContext) string
+ GetTextFromTokens(Token, Token) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
new file mode 100644
index 00000000000..96a03f02aa6
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
@@ -0,0 +1,649 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+package antlr
+
+import (
+"bytes"
+"fmt"
+)
+
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+
+
+const(
+ Default_Program_Name = "default"
+ Program_Init_Size = 100
+ Min_Token_Index = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instruction_index int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ op_name string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation)GetInstructionIndex() int{
+ return op.instruction_index
+}
+
+func (op *BaseRewriteOperation)GetIndex() int{
+ return op.index
+}
+
+func (op *BaseRewriteOperation)GetText() string{
+ return op.text
+}
+
+func (op *BaseRewriteOperation)GetOpName() string{
+ return op.op_name
+}
+
+func (op *BaseRewriteOperation)GetTokens() TokenStream{
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation)SetInstructionIndex(val int){
+ op.instruction_index = val
+}
+
+func (op *BaseRewriteOperation)SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation)SetText(val string){
+ op.text = val
+}
+
+func (op *BaseRewriteOperation)SetOpName(val string){
+ op.op_name = val
+}
+
+func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+
+func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.op_name,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
+ return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
+ index:index,
+ text:text,
+ op_name:"InsertBeforeOp",
+ tokens:stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index+1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// Distinguish between insert after/before to do the "insert afters"
+// first and then the "insert befores" at same index. Implementation
+// of "insert after" is "insert before index+1".
+
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
+ return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
+ index:index+1,
+ text:text,
+ tokens:stream,
+ }}
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index+1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct{
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation:BaseRewriteOperation{
+ index:from,
+ text:text,
+ op_name:"ReplaceOp",
+ tokens:stream,
+ },
+ LastIndex:to,
+ }
+}
+
+func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
+ if op.text != ""{
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex +1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ last_rewrite_token_indexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
+ },
+ last_rewrite_token_indexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
+ is, ok := tsr.programs[program_name]
+ if ok{
+ tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
+ tsr.Rollback(Default_Program_Name, instruction_index)
+}
+//Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
+ tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
+ tsr.DeleteProgram(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
+ tsr.InsertAfter(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
+ tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
+ tsr.InsertBefore(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
+ tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
+ tsr.Replace(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
+ tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
+ tsr.ReplaceToken(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
+ tsr.Replace(program_name, from, to, "" )
+}
+
+func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
+ tsr.Delete(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
+ tsr.DeleteDefault(index,index)
+}
+
+func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
+ tsr.ReplaceToken(program_name, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
+ tsr.DeleteToken(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
+ i, ok := tsr.last_rewrite_token_indexes[program_name]
+ if !ok{
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
+ return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
+ tsr.last_rewrite_token_indexes[program_name] = i
+}
+
+func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
+ is := make([]RewriteOperation, 0, Program_Init_Size)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok{
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter)GetTextDefault() string{
+ return tsr.GetText(
+ Default_Program_Name,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
+ rewrites := tsr.programs[program_name]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start,0)
+ if rewrites == nil || len(rewrites) == 0{
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
+ }
+ }
+ return buf.String()
+}
+
+// We need to combine operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc... Here are the cases:
+//
+// I.i.u I.j.v leave alone, nonoverlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this replace.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the replace range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// Return a map from token index to operation.
+//
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
+ // WALK REPLACES
+ for i:=0; i < len(rewrites); i++{
+ op := rewrites[i]
+ if op == nil{continue}
+ rop, ok := op.(*ReplaceOp)
+ if !ok{continue}
+ // Wipe prior inserts within range
+ for j:=0; j rop.index && iop.index <=rop.LastIndex{
+ // delete insert as it's a no-op.
+ rewrites[iop.instruction_index] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{
+ // delete replace as it's a no-op.
+ rewrites[prevop.instruction_index] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint{
+ rewrites[prevop.instruction_index] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
+ }else if !disjoint{
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i:=0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil{continue}
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok{continue}
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{
+ panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i:=0; i < len(rewrites); i++{
+ op := rewrites[i]
+ if op == nil {continue}
+ if _, ok := m[op.GetIndex()]; ok{
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+
+/*
+ Quick fixing Go lack of overloads
+ */
+
+func max(a,b int)int{
+ if a>b{
+ return a
+ }else {
+ return b
+ }
+}
+func min(a,b int)int{
+ if aThis is a one way link. It emanates from a state (usually via a list of
+// transitions) and has a target state.
+//
+// Since we never have to change the ATN transitions once we construct it,
+// the states. We'll use the term Edge for the DFA to distinguish them from
+// ATN transitions.
+
+type Transition interface {
+ getTarget() ATNState
+ setTarget(ATNState)
+ getIsEpsilon() bool
+ getLabel() *IntervalSet
+ getSerializationType() int
+ Matches(int, int, int) bool
+}
+
+type BaseTransition struct {
+ target ATNState
+ isEpsilon bool
+ label int
+ intervalSet *IntervalSet
+ serializationType int
+}
+
+func NewBaseTransition(target ATNState) *BaseTransition {
+
+ if target == nil {
+ panic("target cannot be nil.")
+ }
+
+ t := new(BaseTransition)
+
+ t.target = target
+ // Are we epsilon, action, sempred?
+ t.isEpsilon = false
+ t.intervalSet = nil
+
+ return t
+}
+
+func (t *BaseTransition) getTarget() ATNState {
+ return t.target
+}
+
+func (t *BaseTransition) setTarget(s ATNState) {
+ t.target = s
+}
+
+func (t *BaseTransition) getIsEpsilon() bool {
+ return t.isEpsilon
+}
+
+func (t *BaseTransition) getLabel() *IntervalSet {
+ return t.intervalSet
+}
+
+func (t *BaseTransition) getSerializationType() int {
+ return t.serializationType
+}
+
+func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ panic("Not implemented")
+}
+
+const (
+ TransitionEPSILON = 1
+ TransitionRANGE = 2
+ TransitionRULE = 3
+ TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
+ TransitionATOM = 5
+ TransitionACTION = 6
+ TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
+ TransitionNOTSET = 8
+ TransitionWILDCARD = 9
+ TransitionPRECEDENCE = 10
+)
+
+var TransitionserializationNames = []string{
+ "INVALID",
+ "EPSILON",
+ "RANGE",
+ "RULE",
+ "PREDICATE",
+ "ATOM",
+ "ACTION",
+ "SET",
+ "NOT_SET",
+ "WILDCARD",
+ "PRECEDENCE",
+}
+
+//var TransitionserializationTypes struct {
+// EpsilonTransition int
+// RangeTransition int
+// RuleTransition int
+// PredicateTransition int
+// AtomTransition int
+// ActionTransition int
+// SetTransition int
+// NotSetTransition int
+// WildcardTransition int
+// PrecedencePredicateTransition int
+//}{
+// TransitionEPSILON,
+// TransitionRANGE,
+// TransitionRULE,
+// TransitionPREDICATE,
+// TransitionATOM,
+// TransitionACTION,
+// TransitionSET,
+// TransitionNOTSET,
+// TransitionWILDCARD,
+// TransitionPRECEDENCE
+//}
+
+// TODO: make all transitions sets? no, should remove set edges
+type AtomTransition struct {
+ *BaseTransition
+}
+
+func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
+
+ t := new(AtomTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.label = intervalSet // The token type or character value or, signifies special intervalSet.
+ t.intervalSet = t.makeLabel()
+ t.serializationType = TransitionATOM
+
+ return t
+}
+
+func (t *AtomTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addOne(t.label)
+ return s
+}
+
+func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return t.label == symbol
+}
+
+func (t *AtomTransition) String() string {
+ return strconv.Itoa(t.label)
+}
+
+type RuleTransition struct {
+ *BaseTransition
+
+ followState ATNState
+ ruleIndex, precedence int
+}
+
+func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
+
+ t := new(RuleTransition)
+ t.BaseTransition = NewBaseTransition(ruleStart)
+
+ t.ruleIndex = ruleIndex
+ t.precedence = precedence
+ t.followState = followState
+ t.serializationType = TransitionRULE
+ t.isEpsilon = true
+
+ return t
+}
+
+func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+type EpsilonTransition struct {
+ *BaseTransition
+
+ outermostPrecedenceReturn int
+}
+
+func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
+
+ t := new(EpsilonTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionEPSILON
+ t.isEpsilon = true
+ t.outermostPrecedenceReturn = outermostPrecedenceReturn
+ return t
+}
+
+func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *EpsilonTransition) String() string {
+ return "epsilon"
+}
+
+type RangeTransition struct {
+ *BaseTransition
+
+ start, stop int
+}
+
+func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
+
+ t := new(RangeTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionRANGE
+ t.start = start
+ t.stop = stop
+ t.intervalSet = t.makeLabel()
+ return t
+}
+
+func (t *RangeTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addRange(t.start, t.stop)
+ return s
+}
+
+func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= t.start && symbol <= t.stop
+}
+
+func (t *RangeTransition) String() string {
+ return "'" + string(t.start) + "'..'" + string(t.stop) + "'"
+}
+
+type AbstractPredicateTransition interface {
+ Transition
+ IAbstractPredicateTransitionFoo()
+}
+
+type BaseAbstractPredicateTransition struct {
+ *BaseTransition
+}
+
+func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
+
+ t := new(BaseAbstractPredicateTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ return t
+}
+
+func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
+
+type PredicateTransition struct {
+ *BaseAbstractPredicateTransition
+
+ isCtxDependent bool
+ ruleIndex, predIndex int
+}
+
+func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
+
+ t := new(PredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPREDICATE
+ t.ruleIndex = ruleIndex
+ t.predIndex = predIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
+}
+
+func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *PredicateTransition) getPredicate() *Predicate {
+ return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
+}
+
+func (t *PredicateTransition) String() string {
+ return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
+}
+
+type ActionTransition struct {
+ *BaseTransition
+
+ isCtxDependent bool
+ ruleIndex, actionIndex, predIndex int
+}
+
+func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
+
+ t := new(ActionTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionACTION
+ t.ruleIndex = ruleIndex
+ t.actionIndex = actionIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
+}
+
+func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *ActionTransition) String() string {
+ return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
+}
+
+type SetTransition struct {
+ *BaseTransition
+}
+
+func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
+
+ t := new(SetTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionSET
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+
+ return t
+}
+
+func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return t.intervalSet.contains(symbol)
+}
+
+func (t *SetTransition) String() string {
+ return t.intervalSet.String()
+}
+
+type NotSetTransition struct {
+ *SetTransition
+}
+
+func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
+
+ t := new(NotSetTransition)
+
+ t.SetTransition = NewSetTransition(target, set)
+
+ t.serializationType = TransitionNOTSET
+
+ return t
+}
+
+func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
+}
+
+func (t *NotSetTransition) String() string {
+ return "~" + t.intervalSet.String()
+}
+
+type WildcardTransition struct {
+ *BaseTransition
+}
+
+func NewWildcardTransition(target ATNState) *WildcardTransition {
+
+ t := new(WildcardTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionWILDCARD
+ return t
+}
+
+func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
+}
+
+func (t *WildcardTransition) String() string {
+ return "."
+}
+
+type PrecedencePredicateTransition struct {
+ *BaseAbstractPredicateTransition
+
+ precedence int
+}
+
+func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
+
+ t := new(PrecedencePredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPRECEDENCE
+ t.precedence = precedence
+ t.isEpsilon = true
+
+ return t
+}
+
+func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
+ return NewPrecedencePredicate(t.precedence)
+}
+
+func (t *PrecedencePredicateTransition) String() string {
+ return fmt.Sprint(t.precedence) + " >= _p"
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
new file mode 100644
index 00000000000..ad0eabf005d
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
@@ -0,0 +1,251 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The basic notion of a tree has a parent, a payload, and a list of children.
+// It is the most abstract interface for all the trees used by ANTLR.
+///
+
+var TreeInvalidInterval = NewInterval(-1, -2)
+
+type Tree interface {
+ GetParent() Tree
+ SetParent(Tree)
+ GetPayload() interface{}
+ GetChild(i int) Tree
+ GetChildCount() int
+ GetChildren() []Tree
+}
+
+type SyntaxTree interface {
+ Tree
+
+ GetSourceInterval() *Interval
+}
+
+type ParseTree interface {
+ SyntaxTree
+
+ Accept(Visitor ParseTreeVisitor) interface{}
+ GetText() string
+
+ ToStringTree([]string, Recognizer) string
+}
+
+type RuleNode interface {
+ ParseTree
+
+ GetRuleContext() RuleContext
+ GetBaseRuleContext() *BaseRuleContext
+}
+
+type TerminalNode interface {
+ ParseTree
+
+ GetSymbol() Token
+}
+
+type ErrorNode interface {
+ TerminalNode
+
+ errorNode()
+}
+
+type ParseTreeVisitor interface {
+ Visit(tree ParseTree) interface{}
+ VisitChildren(node RuleNode) interface{}
+ VisitTerminal(node TerminalNode) interface{}
+ VisitErrorNode(node ErrorNode) interface{}
+}
+
+type BaseParseTreeVisitor struct{}
+
+var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
+
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
+
+// TODO
+//func (this ParseTreeVisitor) Visit(ctx) {
+// if (Utils.isArray(ctx)) {
+// self := this
+// return ctx.map(function(child) { return VisitAtom(self, child)})
+// } else {
+// return VisitAtom(this, ctx)
+// }
+//}
+//
+//func VisitAtom(Visitor, ctx) {
+// if (ctx.parser == nil) { //is terminal
+// return
+// }
+//
+// name := ctx.parser.ruleNames[ctx.ruleIndex]
+// funcName := "Visit" + Utils.titleCase(name)
+//
+// return Visitor[funcName](ctx)
+//}
+
+type ParseTreeListener interface {
+ VisitTerminal(node TerminalNode)
+ VisitErrorNode(node ErrorNode)
+ EnterEveryRule(ctx ParserRuleContext)
+ ExitEveryRule(ctx ParserRuleContext)
+}
+
+type BaseParseTreeListener struct{}
+
+var _ ParseTreeListener = &BaseParseTreeListener{}
+
+func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
+
+type TerminalNodeImpl struct {
+ parentCtx RuleContext
+
+ symbol Token
+}
+
+var _ TerminalNode = &TerminalNodeImpl{}
+
+func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
+ tn := new(TerminalNodeImpl)
+
+ tn.parentCtx = nil
+ tn.symbol = symbol
+
+ return tn
+}
+
+func (t *TerminalNodeImpl) GetChild(i int) Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) GetChildren() []Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
+ panic("Cannot set children on terminal node")
+}
+
+func (t *TerminalNodeImpl) GetSymbol() Token {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetParent() Tree {
+ return t.parentCtx
+}
+
+func (t *TerminalNodeImpl) SetParent(tree Tree) {
+ t.parentCtx = tree.(RuleContext)
+}
+
+func (t *TerminalNodeImpl) GetPayload() interface{} {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
+ if t.symbol == nil {
+ return TreeInvalidInterval
+ }
+ tokenIndex := t.symbol.GetTokenIndex()
+ return NewInterval(tokenIndex, tokenIndex)
+}
+
+func (t *TerminalNodeImpl) GetChildCount() int {
+ return 0
+}
+
+func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitTerminal(t)
+}
+
+func (t *TerminalNodeImpl) GetText() string {
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) String() string {
+ if t.symbol.GetTokenType() == TokenEOF {
+ return ""
+ }
+
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
+ return t.String()
+}
+
+// Represents a token that was consumed during reSynchronization
+// rather than during a valid Match operation. For example,
+// we will create this kind of a node during single token insertion
+// and deletion as well as during "consume until error recovery set"
+// upon no viable alternative exceptions.
+
+type ErrorNodeImpl struct {
+ *TerminalNodeImpl
+}
+
+var _ ErrorNode = &ErrorNodeImpl{}
+
+func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
+ en := new(ErrorNodeImpl)
+ en.TerminalNodeImpl = NewTerminalNodeImpl(token)
+ return en
+}
+
+func (e *ErrorNodeImpl) errorNode() {}
+
+func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitErrorNode(e)
+}
+
+type ParseTreeWalker struct {
+}
+
+func NewParseTreeWalker() *ParseTreeWalker {
+ return new(ParseTreeWalker)
+}
+
+func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ switch tt := t.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ p.EnterRule(listener, t.(RuleNode))
+ for i := 0; i < t.GetChildCount(); i++ {
+ child := t.GetChild(i)
+ p.Walk(listener, child)
+ }
+ p.ExitRule(listener, t.(RuleNode))
+ }
+}
+
+//
+// The discovery of a rule node, involves sending two events: the generic
+// {@link ParseTreeListener//EnterEveryRule} and a
+// {@link RuleContext}-specific event. First we trigger the generic and then
+// the rule specific. We to them in reverse order upon finishing the node.
+//
+func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+}
+
+func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+}
+
+var ParseTreeWalkerDefault = NewParseTreeWalker()
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
new file mode 100644
index 00000000000..80144ecadea
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+/** A set of utility routines useful for all kinds of ANTLR trees. */
+
+// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
+// node payloads to get the text for the nodes. Detect
+// parse trees and extract data appropriately.
+func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
+
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ s := TreesGetNodeText(tree, ruleNames, nil)
+
+ s = EscapeWhitespace(s, false)
+ c := tree.GetChildCount()
+ if c == 0 {
+ return s
+ }
+ res := "(" + s + " "
+ if c > 0 {
+ s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
+ res += s
+ }
+ for i := 1; i < c; i++ {
+ s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
+ res += (" " + s)
+ }
+ res += ")"
+ return res
+}
+
+func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ if ruleNames != nil {
+ switch t2 := t.(type) {
+ case RuleNode:
+ t3 := t2.GetRuleContext()
+ altNumber := t3.GetAltNumber()
+
+ if altNumber != ATNInvalidAltNumber {
+ return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
+ }
+ return ruleNames[t3.GetRuleIndex()]
+ case ErrorNode:
+ return fmt.Sprint(t2)
+ case TerminalNode:
+ if t2.GetSymbol() != nil {
+ return t2.GetSymbol().GetText()
+ }
+ }
+ }
+
+ // no recog for rule names
+ payload := t.GetPayload()
+ if p2, ok := payload.(Token); ok {
+ return p2.GetText()
+ }
+
+ return fmt.Sprint(t.GetPayload())
+}
+
+// Return ordered list of all children of this node
+func TreesGetChildren(t Tree) []Tree {
+ list := make([]Tree, 0)
+ for i := 0; i < t.GetChildCount(); i++ {
+ list = append(list, t.GetChild(i))
+ }
+ return list
+}
+
+// Return a list of all ancestors of this node. The first node of
+// list is the root and the last is the parent of this node.
+//
+func TreesgetAncestors(t Tree) []Tree {
+ ancestors := make([]Tree, 0)
+ t = t.GetParent()
+ for t != nil {
+ f := []Tree{t}
+ ancestors = append(f, ancestors...)
+ t = t.GetParent()
+ }
+ return ancestors
+}
+
+func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
+ return TreesfindAllNodes(t, ttype, true)
+}
+
+func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
+ return TreesfindAllNodes(t, ruleIndex, false)
+}
+
+func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
+ nodes := make([]ParseTree, 0)
+ treesFindAllNodes(t, index, findTokens, &nodes)
+ return nodes
+}
+
+func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
+ // check this node (the root) first
+
+ t2, ok := t.(TerminalNode)
+ t3, ok2 := t.(ParserRuleContext)
+
+ if findTokens && ok {
+ if t2.GetSymbol().GetTokenType() == index {
+ *nodes = append(*nodes, t2)
+ }
+ } else if !findTokens && ok2 {
+ if t3.GetRuleIndex() == index {
+ *nodes = append(*nodes, t3)
+ }
+ }
+ // check children
+ for i := 0; i < t.GetChildCount(); i++ {
+ treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
+ }
+}
+
+func TreesDescendants(t ParseTree) []ParseTree {
+ nodes := []ParseTree{t}
+ for i := 0; i < t.GetChildCount(); i++ {
+ nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
+ }
+ return nodes
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
new file mode 100644
index 00000000000..bba2ffae793
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
@@ -0,0 +1,417 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+func intMin(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func intMax(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// A simple integer stack
+
+type IntStack []int
+
+var ErrEmptyStack = errors.New("Stack is empty")
+
+func (s *IntStack) Pop() (int, error) {
+ l := len(*s) - 1
+ if l < 0 {
+ return 0, ErrEmptyStack
+ }
+ v := (*s)[l]
+ *s = (*s)[0:l]
+ return v, nil
+}
+
+func (s *IntStack) Push(e int) {
+ *s = append(*s, e)
+}
+
+type Set struct {
+ data map[int][]interface{}
+ hashcodeFunction func(interface{}) int
+ equalsFunction func(interface{}, interface{}) bool
+}
+
+func NewSet(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(interface{}, interface{}) bool) *Set {
+
+ s := new(Set)
+
+ s.data = make(map[int][]interface{})
+
+ if hashcodeFunction != nil {
+ s.hashcodeFunction = hashcodeFunction
+ } else {
+ s.hashcodeFunction = standardHashFunction
+ }
+
+ if equalsFunction == nil {
+ s.equalsFunction = standardEqualsFunction
+ } else {
+ s.equalsFunction = equalsFunction
+ }
+
+ return s
+}
+
+func standardEqualsFunction(a interface{}, b interface{}) bool {
+
+ ac, oka := a.(comparable)
+ bc, okb := b.(comparable)
+
+ if !oka || !okb {
+ panic("Not Comparable")
+ }
+
+ return ac.equals(bc)
+}
+
+func standardHashFunction(a interface{}) int {
+ if h, ok := a.(hasher); ok {
+ return h.hash()
+ }
+
+ panic("Not Hasher")
+}
+
+type hasher interface {
+ hash() int
+}
+
+func (s *Set) length() int {
+ return len(s.data)
+}
+
+func (s *Set) add(value interface{}) interface{} {
+
+ key := s.hashcodeFunction(value)
+
+ values := s.data[key]
+
+ if s.data[key] != nil {
+ for i := 0; i < len(values); i++ {
+ if s.equalsFunction(value, values[i]) {
+ return values[i]
+ }
+ }
+
+ s.data[key] = append(s.data[key], value)
+ return value
+ }
+
+ v := make([]interface{}, 1, 10)
+ v[0] = value
+ s.data[key] = v
+
+ return value
+}
+
+func (s *Set) contains(value interface{}) bool {
+
+ key := s.hashcodeFunction(value)
+
+ values := s.data[key]
+
+ if s.data[key] != nil {
+ for i := 0; i < len(values); i++ {
+ if s.equalsFunction(value, values[i]) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func (s *Set) values() []interface{} {
+ var l []interface{}
+
+ for _, v := range s.data {
+ l = append(l, v...)
+ }
+
+ return l
+}
+
+func (s *Set) String() string {
+ r := ""
+
+ for _, av := range s.data {
+ for _, v := range av {
+ r += fmt.Sprint(v)
+ }
+ }
+
+ return r
+}
+
+type BitSet struct {
+ data map[int]bool
+}
+
+func NewBitSet() *BitSet {
+ b := new(BitSet)
+ b.data = make(map[int]bool)
+ return b
+}
+
+func (b *BitSet) add(value int) {
+ b.data[value] = true
+}
+
+func (b *BitSet) clear(index int) {
+ delete(b.data, index)
+}
+
+func (b *BitSet) or(set *BitSet) {
+ for k := range set.data {
+ b.add(k)
+ }
+}
+
+func (b *BitSet) remove(value int) {
+ delete(b.data, value)
+}
+
+func (b *BitSet) contains(value int) bool {
+ return b.data[value]
+}
+
+func (b *BitSet) values() []int {
+ ks := make([]int, len(b.data))
+ i := 0
+ for k := range b.data {
+ ks[i] = k
+ i++
+ }
+ sort.Ints(ks)
+ return ks
+}
+
+func (b *BitSet) minValue() int {
+ min := 2147483647
+
+ for k := range b.data {
+ if k < min {
+ min = k
+ }
+ }
+
+ return min
+}
+
+func (b *BitSet) equals(other interface{}) bool {
+ otherBitSet, ok := other.(*BitSet)
+ if !ok {
+ return false
+ }
+
+ if len(b.data) != len(otherBitSet.data) {
+ return false
+ }
+
+ for k, v := range b.data {
+ if otherBitSet.data[k] != v {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *BitSet) length() int {
+ return len(b.data)
+}
+
+func (b *BitSet) String() string {
+ vals := b.values()
+ valsS := make([]string, len(vals))
+
+ for i, val := range vals {
+ valsS[i] = strconv.Itoa(val)
+ }
+ return "{" + strings.Join(valsS, ", ") + "}"
+}
+
+type AltDict struct {
+ data map[string]interface{}
+}
+
+func NewAltDict() *AltDict {
+ d := new(AltDict)
+ d.data = make(map[string]interface{})
+ return d
+}
+
+func (a *AltDict) Get(key string) interface{} {
+ key = "k-" + key
+ return a.data[key]
+}
+
+func (a *AltDict) put(key string, value interface{}) {
+ key = "k-" + key
+ a.data[key] = value
+}
+
+func (a *AltDict) values() []interface{} {
+ vs := make([]interface{}, len(a.data))
+ i := 0
+ for _, v := range a.data {
+ vs[i] = v
+ i++
+ }
+ return vs
+}
+
+type DoubleDict struct {
+ data map[int]map[int]interface{}
+}
+
+func NewDoubleDict() *DoubleDict {
+ dd := new(DoubleDict)
+ dd.data = make(map[int]map[int]interface{})
+ return dd
+}
+
+func (d *DoubleDict) Get(a, b int) interface{} {
+ data := d.data[a]
+
+ if data == nil {
+ return nil
+ }
+
+ return data[b]
+}
+
+func (d *DoubleDict) set(a, b int, o interface{}) {
+ data := d.data[a]
+
+ if data == nil {
+ data = make(map[int]interface{})
+ d.data[a] = data
+ }
+
+ data[b] = o
+}
+
+func EscapeWhitespace(s string, escapeSpaces bool) string {
+
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ if escapeSpaces {
+ s = strings.Replace(s, " ", "\u00B7", -1)
+ }
+ return s
+}
+
+func TerminalNodeToStringArray(sa []TerminalNode) []string {
+ st := make([]string, len(sa))
+
+ for i, s := range sa {
+ st[i] = fmt.Sprintf("%v", s)
+ }
+
+ return st
+}
+
+func PrintArrayJavaStyle(sa []string) string {
+ var buffer bytes.Buffer
+
+ buffer.WriteString("[")
+
+ for i, s := range sa {
+ buffer.WriteString(s)
+ if i != len(sa)-1 {
+ buffer.WriteString(", ")
+ }
+ }
+
+ buffer.WriteString("]")
+
+ return buffer.String()
+}
+
+// The following routines were lifted from bits.rotate* available in Go 1.9.
+
+const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64
+
+// rotateLeft returns the value of x rotated left by (k mod UintSize) bits.
+// To rotate x right by k bits, call RotateLeft(x, -k).
+func rotateLeft(x uint, k int) uint {
+ if uintSize == 32 {
+ return uint(rotateLeft32(uint32(x), k))
+ }
+ return uint(rotateLeft64(uint64(x), k))
+}
+
+// rotateLeft32 returns the value of x rotated left by (k mod 32) bits.
+func rotateLeft32(x uint32, k int) uint32 {
+ const n = 32
+ s := uint(k) & (n - 1)
+ return x<>(n-s)
+}
+
+// rotateLeft64 returns the value of x rotated left by (k mod 64) bits.
+func rotateLeft64(x uint64, k int) uint64 {
+ const n = 64
+ s := uint(k) & (n - 1)
+ return x<>(n-s)
+}
+
+
+// murmur hash
+const (
+ c1_32 uint = 0xCC9E2D51
+ c2_32 uint = 0x1B873593
+ n1_32 uint = 0xE6546B64
+)
+
+func murmurInit(seed int) int {
+ return seed
+}
+
+func murmurUpdate(h1 int, k1 int) int {
+ var k1u uint
+ k1u = uint(k1) * c1_32
+ k1u = rotateLeft(k1u, 15)
+ k1u *= c2_32
+
+ var h1u = uint(h1) ^ k1u
+ k1u = rotateLeft(k1u, 13)
+ h1u = h1u*5 + 0xe6546b64
+ return int(h1u)
+}
+
+func murmurFinish(h1 int, numberOfWords int) int {
+ var h1u uint = uint(h1)
+ h1u ^= uint(numberOfWords * 4)
+ h1u ^= h1u >> 16
+ h1u *= uint(0x85ebca6b)
+ h1u ^= h1u >> 13
+ h1u *= 0xc2b2ae35
+ h1u ^= h1u >> 16
+
+ return int(h1u)
+}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
index 1b1b1921efa..0f646931a46 100644
--- a/vendor/github.com/golang/protobuf/LICENSE
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -1,7 +1,4 @@
-Go support for Protocol Buffers - Google's data interchange format
-
Copyright 2010 The Go Authors. All rights reserved.
-https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
new file mode 100644
index 00000000000..ac7e51bfb19
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
@@ -0,0 +1,93 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package descriptor provides functions for obtaining protocol buffer
+// descriptors for generated Go types.
+//
+// These functions cannot go in package proto because they depend on the
+// generated protobuf descriptor messages, which themselves depend on proto.
+package descriptor
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/golang/protobuf/proto"
+ protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
+func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) {
+ r, err := gzip.NewReader(bytes.NewReader(gz))
+ if err != nil {
+ return nil, fmt.Errorf("failed to open gzip reader: %v", err)
+ }
+ defer r.Close()
+
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
+ }
+
+ fd := new(protobuf.FileDescriptorProto)
+ if err := proto.Unmarshal(b, fd); err != nil {
+ return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
+ }
+
+ return fd, nil
+}
+
+// Message is a proto.Message with a method to return its descriptor.
+//
+// Message types generated by the protocol compiler always satisfy
+// the Message interface.
+type Message interface {
+ proto.Message
+ Descriptor() ([]byte, []int)
+}
+
+// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
+// describing the given message.
+func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) {
+ gz, path := msg.Descriptor()
+ fd, err := extractFile(gz)
+ if err != nil {
+ panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
+ }
+
+ md = fd.MessageType[path[0]]
+ for _, i := range path[1:] {
+ md = md.NestedType[i]
+ }
+ return fd, md
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
index d9aa3c42d66..63b0f08bef2 100644
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
if b&0x80 == 0 {
goto done
}
- // x -= 0x80 << 63 // Always zero.
return 0, errOverflow
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 00000000000..35b882c09aa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
index c27d35f866b..3abfed2cff0 100644
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -37,27 +37,9 @@ package proto
import (
"errors"
- "fmt"
"reflect"
)
-// RequiredNotSetError is the error returned if Marshal is called with
-// a protocol buffer struct whose required fields have not
-// all been initialized. It is also the error returned if Unmarshal is
-// called with an encoded protocol buffer that does not include all the
-// required fields.
-//
-// When printed, RequiredNotSetError reports the first unset required field in a
-// message. If the field cannot be precisely determined, it is reported as
-// "{Unknown}".
-type RequiredNotSetError struct {
- field string
-}
-
-func (e *RequiredNotSetError) Error() string {
- return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-
var (
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
index d4db5a1c145..f9b6e41b3c1 100644
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
return false
}
- m1, m2 := e1.value, e2.value
+ m1 := extensionAsLegacyType(e1.value)
+ m2 := extensionAsLegacyType(e2.value)
if m1 == nil && m2 == nil {
// Both have only encoded form.
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index 816a3b9d6c0..fa88add30a4 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -185,9 +185,25 @@ type Extension struct {
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
- desc *ExtensionDesc
+ desc *ExtensionDesc
+
+ // value is a concrete value for the extension field. Let the type of
+ // desc.ExtensionType be the "API type" and the type of Extension.value
+ // be the "storage type". The API type and storage type are the same except:
+ // * For scalars (except []byte), the API type uses *T,
+ // while the storage type uses T.
+ // * For repeated fields, the API type uses []T, while the storage type
+ // uses *[]T.
+ //
+ // The reason for the divergence is so that the storage type more naturally
+ // matches what is expected of when retrieving the values through the
+ // protobuf reflection APIs.
+ //
+ // The value may only be populated if desc is also populated.
value interface{}
- enc []byte
+
+ // enc is the raw bytes for the extension field.
+ enc []byte
}
// SetRawExtension is for testing only.
@@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
- return e.value, nil
+ return extensionAsLegacyType(e.value), nil
}
if extension.ExtensionType == nil {
@@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
- e.value = v
+ e.value = extensionAsStorageType(v)
e.desc = extension
e.enc = nil
emap[extension.Field] = e
- return e.value, nil
+ return extensionAsLegacyType(e.value), nil
}
// defaultExtensionValue returns the default value for extension.
@@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
- return errors.New("proto: bad extension value type")
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
@@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: value}
+ extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
return nil
}
@@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ // Represent primitive types as a pointer to the value.
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Slice:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ }
+ return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ case reflect.Slice:
+ // Represent slice types as a pointer to the value.
+ if rv.Type().Elem().Kind() != reflect.Uint8 {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ }
+ return v
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
index 0e2191b8ada..fdd328bb7f5 100644
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -265,7 +265,6 @@ package proto
import (
"encoding/json"
- "errors"
"fmt"
"log"
"reflect"
@@ -274,34 +273,73 @@ import (
"sync"
)
-var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
- Reset()
- String() string
- ProtoMessage()
+func (e *RequiredNotSetError) Error() string {
+ if e.field == "" {
+ return fmt.Sprintf("proto: required field not set")
+ }
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
}
-// Stats records allocation details about the protocol buffer encoders
-// and decoders. Useful for tuning the library itself.
-type Stats struct {
- Emalloc uint64 // mallocs in encode
- Dmalloc uint64 // mallocs in decode
- Encode uint64 // number of encodes
- Decode uint64 // number of decodes
- Chit uint64 // number of cache hits
- Cmiss uint64 // number of cache misses
- Size uint64 // number of sizes
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+ if e.field == "" {
+ return "proto: invalid UTF-8 detected"
+ }
+ return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+ return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+ if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+ return true
+ }
+ if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+ return true
+ }
+ return false
}
-// Set to true to enable stats collection.
-const collectStats = false
+type nonFatal struct{ E error }
-var stats Stats
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+ if err == nil {
+ return true // not an error
+ }
+ if !isNonFatal(err) {
+ return false // fatal error
+ }
+ if nf.E == nil {
+ nf.E = err // store first instance of non-fatal error
+ }
+ return true
+}
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
@@ -902,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool {
return false
}
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion2 = true
+const (
+ // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion3 = true
+
+ // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion2 = true
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion1 = true
+ // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion1 = true
+)
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
index 3b6ca41d5e5..f48a756761e 100644
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -36,13 +36,7 @@ package proto
*/
import (
- "bytes"
- "encoding/json"
"errors"
- "fmt"
- "reflect"
- "sort"
- "sync"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte {
return buf[i+1:]
}
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
- return marshalMessageSet(exts, false)
-}
-
-// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
-func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- var u marshalInfo
- siz := u.sizeMessageSet(exts)
- b := make([]byte, 0, siz)
- return u.appendMessageSet(b, exts, deterministic)
-
- case map[int32]Extension:
- // This is an old-style extension map.
- // Wrap it in a new-style XXX_InternalExtensions.
- ie := XXX_InternalExtensions{
- p: &struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- }{
- extensionMap: exts,
- },
- }
-
- var u marshalInfo
- siz := u.sizeMessageSet(&ie)
- b := make([]byte, 0, siz)
- return u.appendMessageSet(b, &ie, deterministic)
-
- default:
- return nil, errors.New("proto: not an extension map")
- }
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
@@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
}
return nil
}
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- var mu sync.Locker
- m, mu = exts.extensionsRead()
- if m != nil {
- // Keep the extensions map locked until we're done marshaling to prevent
- // races between marshaling and unmarshaling the lazily-{en,de}coded
- // values.
- mu.Lock()
- defer mu.Unlock()
- }
- case map[int32]Extension:
- m = exts
- default:
- return nil, errors.New("proto: not an extension map")
- }
- var b bytes.Buffer
- b.WriteByte('{')
-
- // Process the map in key order for deterministic output.
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
- for i, id := range ids {
- ext := m[id]
- msd, ok := messageSetMap[id]
- if !ok {
- // Unknown type; we can't render it, so skip it.
- continue
- }
-
- if i > 0 && b.Len() > 1 {
- b.WriteByte(',')
- }
-
- fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
- x := ext.value
- if x == nil {
- x = reflect.New(msd.t.Elem()).Interface()
- if err := Unmarshal(ext.enc, x.(Message)); err != nil {
- return nil, err
- }
- }
- d, err := json.Marshal(x)
- if err != nil {
- return nil, err
- }
- b.Write(d)
- }
- b.WriteByte('}')
- return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
- // Common-case fast path.
- if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
- return nil
- }
-
- // This is fairly tricky, and it's not clear that it is needed.
- return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
- t reflect.Type // pointer to struct
- name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
- messageSetMap[fieldNum] = messageSetDesc{
- t: reflect.TypeOf(m),
- name: name,
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
index b6cad90834b..94fa9194a88 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -79,10 +79,13 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
v := reflect.ValueOf(*i)
u := reflect.New(v.Type())
u.Elem().Set(v)
+ if deref {
+ u = u.Elem()
+ }
return pointer{v: u}
}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
index d55a335d945..dbfffe071b8 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -85,16 +85,21 @@ func toPointer(i *Message) pointer {
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
// Super-tricky - read or get the address of data word of interface value.
if isptr {
// The interface is of pointer type, thus it is a direct interface.
// The data word is the pointer data itself. We take its address.
- return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ } else {
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
- // The interface is not of pointer type. The data word is the pointer
- // to the data.
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+ if deref {
+ p.p = *(*unsafe.Pointer)(p.p)
+ }
+ return p
}
// valToPointer converts v to a pointer. v must be of pointer type.
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index f710adab092..79668ff5c5c 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -139,7 +139,7 @@ type Properties struct {
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ proto3 bool // whether this is known to be a proto3 field
oneof bool // whether this is a oneof field
Default string // default value
@@ -148,9 +148,9 @@ type Properties struct {
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
- mtype reflect.Type // set for map types only
- mkeyprop *Properties // set for map types only
- mvalprop *Properties // set for map types only
+ mtype reflect.Type // set for map types only
+ MapKeyProp *Properties // set for map types only
+ MapValProp *Properties // set for map types only
}
// String formats the properties in the protobuf struct field tag style.
@@ -275,16 +275,16 @@ func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, loc
case reflect.Map:
p.mtype = t1
- p.mkeyprop = &Properties{}
- p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.mvalprop = &Properties{}
+ p.MapKeyProp = &Properties{}
+ p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.MapValProp = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
- p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
if p.stype != nil {
@@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties {
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
- if collectStats {
- stats.Chit++
- }
return sprop
}
@@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties {
return sprop
}
+type (
+ oneofFuncsIface interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ oneofWrappersIface interface {
+ XXX_OneofWrappers() []interface{}
+ }
+)
+
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
- if collectStats {
- stats.Chit++
- }
return prop
}
- if collectStats {
- stats.Cmiss++
- }
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
@@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
// Re-order prop.order.
sort.Sort(prop)
- type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ var oots []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oots = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oots = m.XXX_OneofWrappers()
}
- if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
- var oots []interface{}
- _, _, _, oots = om.XXX_OneofFuncs()
-
+ if len(oots) > 0 {
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
index 0f212b3029d..5cb11fa955e 100644
--- a/vendor/github.com/golang/protobuf/proto/table_marshal.go
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -87,6 +87,7 @@ type marshalElemInfo struct {
sizer sizer
marshaler marshaler
isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+ deref bool // dereference the pointer before operating on it; implies isptr
}
var (
@@ -231,7 +232,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
return b, err
}
- var err, errreq error
+ var err, errLater error
// The old marshaler encodes extensions at beginning.
if u.extensions.IsValid() {
e := ptr.offset(u.extensions).toExtensions()
@@ -252,11 +253,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
}
}
for _, f := range u.fields {
- if f.required && errreq == nil {
+ if f.required {
if ptr.offset(f.field).getPointer().isNil() {
// Required field is not set.
// We record the error but keep going, to give a complete marshaling.
- errreq = &RequiredNotSetError{f.name}
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name}
+ }
continue
}
}
@@ -269,14 +272,21 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
if err1, ok := err.(*RequiredNotSetError); ok {
// Required field in submessage is not set.
// We record the error but keep going, to give a complete marshaling.
- if errreq == nil {
- errreq = &RequiredNotSetError{f.name + "." + err1.field}
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name + "." + err1.field}
}
continue
}
if err == errRepeatedHasNil {
err = errors.New("proto: repeated field " + f.name + " has nil element")
}
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
return b, err
}
}
@@ -284,7 +294,7 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte
s := *ptr.offset(u.unrecognized).toBytes()
b = append(b, s...)
}
- return b, errreq
+ return b, errLater
}
// computeMarshalInfo initializes the marshal info.
@@ -311,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() {
// get oneof implementers
var oneofImplementers []interface{}
- if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
}
n := t.NumField()
@@ -398,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
panic("tag is not an integer")
}
wt := wiretype(tags[0])
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ t = t.Elem()
+ }
sizer, marshaler := typeMarshaler(t, tags, false, false)
+ var deref bool
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ t = reflect.PtrTo(t)
+ deref = true
+ }
e = &marshalElemInfo{
wiretag: uint64(tag)<<3 | wt,
tagsize: SizeVarint(uint64(tag) << 3),
sizer: sizer,
marshaler: marshaler,
isptr: t.Kind() == reflect.Ptr,
+ deref: deref,
}
// update cache
@@ -439,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
fi.field = toField(f)
- fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
fi.isPointer = true
fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
@@ -467,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
}
}
-type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
-}
-
// wiretype returns the wire encoding of the type.
func wiretype(encoding string) uint64 {
switch encoding {
@@ -530,6 +548,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
packed := false
proto3 := false
+ validateUTF8 := true
for i := 2; i < len(tags); i++ {
if tags[i] == "packed" {
packed = true
@@ -538,6 +557,7 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
proto3 = true
}
}
+ validateUTF8 = validateUTF8 && proto3
switch t.Kind() {
case reflect.Bool:
@@ -735,6 +755,18 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma
}
return sizeFloat64Value, appendFloat64Value
case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return sizeStringPtr, appendUTF8StringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendUTF8StringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendUTF8StringValueNoZero
+ }
+ return sizeStringValue, appendUTF8StringValue
+ }
if pointer {
return sizeStringPtr, appendStringPtr
}
@@ -1983,52 +2015,105 @@ func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byt
return b, nil
}
func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
v := *ptr.toString()
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
return b, nil
}
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
v := *ptr.toString()
if v == "" {
return b, nil
}
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
return b, nil
}
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
p := *ptr.toStringPtr()
if p == nil {
return b, nil
}
v := *p
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
return b, nil
}
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
s := *ptr.toStringSlice()
for _, v := range s {
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ invalidUTF8 = true
}
b = appendVarint(b, wiretag)
b = appendVarint(b, uint64(len(v)))
b = append(b, v...)
}
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
return b, nil
}
func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
@@ -2107,7 +2192,8 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getPointerSlice()
- var err, errreq error
+ var err error
+ var nerr nonFatal
for _, v := range s {
if v.isNil() {
return b, errRepeatedHasNil
@@ -2115,22 +2201,14 @@ func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
b = appendVarint(b, wiretag) // start group
b, err = u.marshal(b, v, deterministic)
b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- if err != nil {
- if _, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errreq == nil {
- errreq = err
- }
- continue
- }
+ if !nerr.Merge(err) {
if err == ErrNil {
err = errRepeatedHasNil
}
return b, err
}
}
- return b, errreq
+ return b, nerr.E
}
}
@@ -2174,7 +2252,8 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
},
func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
s := ptr.getPointerSlice()
- var err, errreq error
+ var err error
+ var nerr nonFatal
for _, v := range s {
if v.isNil() {
return b, errRepeatedHasNil
@@ -2184,22 +2263,14 @@ func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
b = appendVarint(b, uint64(siz))
b, err = u.marshal(b, v, deterministic)
- if err != nil {
- if _, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errreq == nil {
- errreq = err
- }
- continue
- }
+ if !nerr.Merge(err) {
if err == ErrNil {
err = errRepeatedHasNil
}
return b, err
}
}
- return b, errreq
+ return b, nerr.E
}
}
@@ -2223,14 +2294,33 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
// value.
// Key cannot be pointer-typed.
valIsPtr := valType.Kind() == reflect.Ptr
+
+ // If value is a message with nested maps, calling
+ // valSizer in marshal may be quadratic. We should use
+ // cached version in marshal (but not in size).
+ // If value is not message type, we don't have size cache,
+ // but it cannot be nested either. Just use valSizer.
+ valCachedSizer := valSizer
+ if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+ u := getMarshalInfo(valType.Elem())
+ valCachedSizer = func(ptr pointer, tagsize int) int {
+ // Same as message sizer, but use cache.
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.cachedsize(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ }
return func(ptr pointer, tagsize int) int {
m := ptr.asPointerTo(t).Elem() // the map
n := 0
for _, k := range m.MapKeys() {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
n += siz + SizeVarint(uint64(siz)) + tagsize
}
@@ -2243,24 +2333,26 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
if len(keys) > 1 && deterministic {
sort.Sort(mapKeys(keys))
}
+
+ var nerr nonFatal
for _, k := range keys {
ki := k.Interface()
vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
b = appendVarint(b, tag)
- siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
b = appendVarint(b, uint64(siz))
b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
- if err != nil && err != ErrNil { // allow nil value in map
+ if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
}
@@ -2316,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
// the last time this function was called.
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
mu.Unlock()
@@ -2333,6 +2425,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
defer mu.Unlock()
var err error
+ var nerr nonFatal
// Fast-path for common cases: zero or one extensions.
// Don't bother sorting the keys.
@@ -2350,13 +2443,13 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
// Sort the keys to provide a deterministic encoding.
@@ -2381,13 +2474,13 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
// message set format is:
@@ -2426,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, 1) // message, tag = 3 (size=1)
}
mu.Unlock()
@@ -2444,6 +2537,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
defer mu.Unlock()
var err error
+ var nerr nonFatal
// Fast-path for common cases: zero or one extensions.
// Don't bother sorting the keys.
@@ -2468,14 +2562,14 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
b = append(b, 1<<3|WireEndGroup)
}
- return b, nil
+ return b, nerr.E
}
// Sort the keys to provide a deterministic encoding.
@@ -2506,14 +2600,14 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
b = append(b, 1<<3|WireEndGroup)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
@@ -2536,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
n += ei.sizer(p, ei.tagsize)
}
return n
@@ -2556,6 +2650,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
sort.Ints(keys)
var err error
+ var nerr nonFatal
for _, k := range keys {
e := m[int32(k)]
if e.value == nil || e.desc == nil {
@@ -2570,13 +2665,13 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ
ei := u.getExtElemInfo(e.desc)
v := e.value
- p := toAddrPointer(&v, ei.isptr)
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if err != nil {
+ if !nerr.Merge(err) {
return b, err
}
}
- return b, nil
+ return b, nerr.E
}
// newMarshaler is the interface representing objects that can marshal themselves.
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
index 55f0340a3fd..acee2fc5296 100644
--- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -97,6 +97,8 @@ type unmarshalFieldInfo struct {
// if a required field, contains a single set bit at this field's index in the required field list.
reqMask uint64
+
+ name string // name of the field, for error reporting
}
var (
@@ -134,10 +136,10 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
u.computeUnmarshalInfo()
}
if u.isMessageSet {
- return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+ return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
}
- var reqMask uint64 // bitmask of required fields we've seen.
- var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage.
+ var reqMask uint64 // bitmask of required fields we've seen.
+ var errLater error
for len(b) > 0 {
// Read tag and wire type.
// Special case 1 and 2 byte varints.
@@ -176,11 +178,20 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
if r, ok := err.(*RequiredNotSetError); ok {
// Remember this error, but keep parsing. We need to produce
// a full parse even if a required field is missing.
- rnse = r
+ if errLater == nil {
+ errLater = r
+ }
reqMask |= f.reqMask
continue
}
if err != errInternalBadWireType {
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
return err
}
// Fragments with bad wire type are treated as unknown fields.
@@ -239,20 +250,16 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
emap[int32(tag)] = e
}
}
- if rnse != nil {
- // A required field of a submessage/group is missing. Return that error.
- return rnse
- }
- if reqMask != u.reqMask {
+ if reqMask != u.reqMask && errLater == nil {
// A required field of this message is missing.
for _, n := range u.reqFields {
if reqMask&1 == 0 {
- return &RequiredNotSetError{n}
+ errLater = &RequiredNotSetError{n}
}
reqMask >>= 1
}
}
- return nil
+ return errLater
}
// computeUnmarshalInfo fills in u with information for use
@@ -351,43 +358,52 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
}
// Store the info in the correct slot in the message.
- u.setTag(tag, toField(&f), unmarshal, reqMask)
+ u.setTag(tag, toField(&f), unmarshal, reqMask, name)
}
// Find any types associated with oneof fields.
- // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
- fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
- if fn.IsValid() {
- res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
- for i := res.Len() - 1; i >= 0; i-- {
- v := res.Index(i) // interface{}
- tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
- typ := tptr.Elem() // Msg_X
-
- f := typ.Field(0) // oneof implementers have one field
- baseUnmarshal := fieldUnmarshaler(&f)
- tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
- tag, err := strconv.Atoi(tagstr)
- if err != nil {
- panic("protobuf tag field not an integer: " + tagstr)
- }
-
- // Find the oneof field that this struct implements.
- // Might take O(n^2) to process all of the oneofs, but who cares.
- for _, of := range oneofFields {
- if tptr.Implements(of.ityp) {
- // We have found the corresponding interface for this struct.
- // That lets us know where this struct should be stored
- // when we encounter it during unmarshaling.
- unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
- u.setTag(tag, of.field, unmarshal, 0)
- }
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ for _, v := range oneofImplementers {
+ tptr := reflect.TypeOf(v) // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
}
}
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
+ }
+ }
+
}
// Get extension ranges, if any.
- fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
if fn.IsValid() {
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
panic("a message with extensions, but no extensions field in " + t.Name())
@@ -401,7 +417,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
- }, 0)
+ }, 0, "")
// Set mask for required field check.
u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
for len(u.dense) <= tag {
@@ -442,11 +459,17 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
tagArray := strings.Split(tags, ",")
encoding := tagArray[0]
name := "unknown"
+ proto3 := false
+ validateUTF8 := true
for _, tag := range tagArray[3:] {
if strings.HasPrefix(tag, "name=") {
name = tag[5:]
}
+ if tag == "proto3" {
+ proto3 = true
+ }
}
+ validateUTF8 = validateUTF8 && proto3
// Figure out packaging (pointer, slice, or both)
slice := false
@@ -594,6 +617,15 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
}
return unmarshalBytesValue
case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return unmarshalUTF8StringPtr
+ }
+ if slice {
+ return unmarshalUTF8StringSlice
+ }
+ return unmarshalUTF8StringValue
+ }
if pointer {
return unmarshalStringPtr
}
@@ -1448,9 +1480,6 @@ func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
- if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
- }
*f.toString() = v
return b[x:], nil
}
@@ -1468,9 +1497,6 @@ func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
- if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
- }
*f.toStringPtr() = &v
return b[x:], nil
}
@@ -1488,11 +1514,69 @@ func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
if !utf8.ValidString(v) {
- return nil, errInvalidUTF8
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
}
+ v := string(b[:x])
s := f.toStringSlice()
*s = append(*s, v)
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
return b[x:], nil
}
@@ -1674,6 +1758,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
// Maps will be somewhat slow. Oh well.
// Read key and value from data.
+ var nerr nonFatal
k := reflect.New(kt)
v := reflect.New(vt)
for len(b) > 0 {
@@ -1694,7 +1779,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
err = errInternalBadWireType // skip unknown tag
}
- if err == nil {
+ if nerr.Merge(err) {
continue
}
if err != errInternalBadWireType {
@@ -1717,7 +1802,7 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
// Insert into map.
m.SetMapIndex(k.Elem(), v.Elem())
- return r, nil
+ return r, nerr.E
}
}
@@ -1743,15 +1828,16 @@ func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshal
// Unmarshal data into holder.
// We unmarshal into the first field of the holder object.
var err error
+ var nerr nonFatal
b, err = unmarshal(b, valToPointer(v).offset(field0), w)
- if err != nil {
+ if !nerr.Merge(err) {
return nil, err
}
// Write pointer to holder into target field.
f.asPointerTo(ityp).Elem().Set(v)
- return b, nil
+ return b, nerr.E
}
}
@@ -1864,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte {
// If there is an error, it returns 0,0.
func decodeVarint(b []byte) (uint64, int) {
var x, y uint64
- if len(b) <= 0 {
+ if len(b) == 0 {
goto bad
}
x = uint64(b[0])
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
index 2205fdaadf8..1aaee725b45 100644
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -353,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
- if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@@ -370,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
- if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ if err := tm.writeAny(w, val, props.MapValProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
index 0685bae36d5..bb55a3af276 100644
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -630,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
if err := p.consumeToken(":"); err != nil {
return err
}
- if err := p.readAny(key, props.mkeyprop); err != nil {
+ if err := p.readAny(key, props.MapKeyProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
- if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
return err
}
- if err := p.readAny(val, props.mvalprop); err != nil {
+ if err := p.readAny(val, props.MapValProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
index e855b1f5c4a..1ded05bbe71 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/descriptor.proto
-package descriptor // import "github.com/golang/protobuf/protoc-gen-go/descriptor"
+package descriptor
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type FieldDescriptorProto_Type int32
@@ -72,6 +74,7 @@ var FieldDescriptorProto_Type_name = map[int32]string{
17: "TYPE_SINT32",
18: "TYPE_SINT64",
}
+
var FieldDescriptorProto_Type_value = map[string]int32{
"TYPE_DOUBLE": 1,
"TYPE_FLOAT": 2,
@@ -98,9 +101,11 @@ func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
*p = x
return p
}
+
func (x FieldDescriptorProto_Type) String() string {
return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
}
+
func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
if err != nil {
@@ -109,8 +114,9 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
*x = FieldDescriptorProto_Type(value)
return nil
}
+
func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0}
+ return fileDescriptor_e5baabe45344a177, []int{4, 0}
}
type FieldDescriptorProto_Label int32
@@ -127,6 +133,7 @@ var FieldDescriptorProto_Label_name = map[int32]string{
2: "LABEL_REQUIRED",
3: "LABEL_REPEATED",
}
+
var FieldDescriptorProto_Label_value = map[string]int32{
"LABEL_OPTIONAL": 1,
"LABEL_REQUIRED": 2,
@@ -138,9 +145,11 @@ func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
*p = x
return p
}
+
func (x FieldDescriptorProto_Label) String() string {
return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
}
+
func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
if err != nil {
@@ -149,8 +158,9 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
*x = FieldDescriptorProto_Label(value)
return nil
}
+
func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1}
+ return fileDescriptor_e5baabe45344a177, []int{4, 1}
}
// Generated classes can be optimized for speed or code size.
@@ -168,6 +178,7 @@ var FileOptions_OptimizeMode_name = map[int32]string{
2: "CODE_SIZE",
3: "LITE_RUNTIME",
}
+
var FileOptions_OptimizeMode_value = map[string]int32{
"SPEED": 1,
"CODE_SIZE": 2,
@@ -179,9 +190,11 @@ func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
*p = x
return p
}
+
func (x FileOptions_OptimizeMode) String() string {
return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
}
+
func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
if err != nil {
@@ -190,8 +203,9 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
*x = FileOptions_OptimizeMode(value)
return nil
}
+
func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0}
+ return fileDescriptor_e5baabe45344a177, []int{10, 0}
}
type FieldOptions_CType int32
@@ -208,6 +222,7 @@ var FieldOptions_CType_name = map[int32]string{
1: "CORD",
2: "STRING_PIECE",
}
+
var FieldOptions_CType_value = map[string]int32{
"STRING": 0,
"CORD": 1,
@@ -219,9 +234,11 @@ func (x FieldOptions_CType) Enum() *FieldOptions_CType {
*p = x
return p
}
+
func (x FieldOptions_CType) String() string {
return proto.EnumName(FieldOptions_CType_name, int32(x))
}
+
func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
if err != nil {
@@ -230,8 +247,9 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
*x = FieldOptions_CType(value)
return nil
}
+
func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0}
+ return fileDescriptor_e5baabe45344a177, []int{12, 0}
}
type FieldOptions_JSType int32
@@ -250,6 +268,7 @@ var FieldOptions_JSType_name = map[int32]string{
1: "JS_STRING",
2: "JS_NUMBER",
}
+
var FieldOptions_JSType_value = map[string]int32{
"JS_NORMAL": 0,
"JS_STRING": 1,
@@ -261,9 +280,11 @@ func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
*p = x
return p
}
+
func (x FieldOptions_JSType) String() string {
return proto.EnumName(FieldOptions_JSType_name, int32(x))
}
+
func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
if err != nil {
@@ -272,8 +293,9 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
*x = FieldOptions_JSType(value)
return nil
}
+
func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1}
+ return fileDescriptor_e5baabe45344a177, []int{12, 1}
}
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
@@ -292,6 +314,7 @@ var MethodOptions_IdempotencyLevel_name = map[int32]string{
1: "NO_SIDE_EFFECTS",
2: "IDEMPOTENT",
}
+
var MethodOptions_IdempotencyLevel_value = map[string]int32{
"IDEMPOTENCY_UNKNOWN": 0,
"NO_SIDE_EFFECTS": 1,
@@ -303,9 +326,11 @@ func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
*p = x
return p
}
+
func (x MethodOptions_IdempotencyLevel) String() string {
return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
}
+
func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
if err != nil {
@@ -314,8 +339,9 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
*x = MethodOptions_IdempotencyLevel(value)
return nil
}
+
func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0}
+ return fileDescriptor_e5baabe45344a177, []int{17, 0}
}
// The protocol compiler can output a FileDescriptorSet containing the .proto
@@ -331,16 +357,17 @@ func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} }
func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
func (*FileDescriptorSet) ProtoMessage() {}
func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0}
+ return fileDescriptor_e5baabe45344a177, []int{0}
}
+
func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
}
func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
}
-func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileDescriptorSet.Merge(dst, src)
+func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FileDescriptorSet.Merge(m, src)
}
func (m *FileDescriptorSet) XXX_Size() int {
return xxx_messageInfo_FileDescriptorSet.Size(m)
@@ -392,16 +419,17 @@ func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} }
func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*FileDescriptorProto) ProtoMessage() {}
func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1}
+ return fileDescriptor_e5baabe45344a177, []int{1}
}
+
func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
}
func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
}
-func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileDescriptorProto.Merge(dst, src)
+func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FileDescriptorProto.Merge(m, src)
}
func (m *FileDescriptorProto) XXX_Size() int {
return xxx_messageInfo_FileDescriptorProto.Size(m)
@@ -519,16 +547,17 @@ func (m *DescriptorProto) Reset() { *m = DescriptorProto{} }
func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
func (*DescriptorProto) ProtoMessage() {}
func (*DescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2}
+ return fileDescriptor_e5baabe45344a177, []int{2}
}
+
func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
}
func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
}
-func (dst *DescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DescriptorProto.Merge(dst, src)
+func (m *DescriptorProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DescriptorProto.Merge(m, src)
}
func (m *DescriptorProto) XXX_Size() int {
return xxx_messageInfo_DescriptorProto.Size(m)
@@ -622,16 +651,17 @@ func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_
func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0}
+ return fileDescriptor_e5baabe45344a177, []int{2, 0}
}
+
func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
}
func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
}
-func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src)
+func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
}
func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
@@ -678,16 +708,17 @@ func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_R
func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
func (*DescriptorProto_ReservedRange) ProtoMessage() {}
func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1}
+ return fileDescriptor_e5baabe45344a177, []int{2, 1}
}
+
func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
}
func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
}
-func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src)
+func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
}
func (m *DescriptorProto_ReservedRange) XXX_Size() int {
return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
@@ -725,7 +756,7 @@ func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} }
func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
func (*ExtensionRangeOptions) ProtoMessage() {}
func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3}
+ return fileDescriptor_e5baabe45344a177, []int{3}
}
var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
@@ -735,14 +766,15 @@ var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_ExtensionRangeOptions
}
+
func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
}
func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
}
-func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src)
+func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
}
func (m *ExtensionRangeOptions) XXX_Size() int {
return xxx_messageInfo_ExtensionRangeOptions.Size(m)
@@ -801,16 +833,17 @@ func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*FieldDescriptorProto) ProtoMessage() {}
func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4}
+ return fileDescriptor_e5baabe45344a177, []int{4}
}
+
func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
}
func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
}
-func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldDescriptorProto.Merge(dst, src)
+func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
}
func (m *FieldDescriptorProto) XXX_Size() int {
return xxx_messageInfo_FieldDescriptorProto.Size(m)
@@ -904,16 +937,17 @@ func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*OneofDescriptorProto) ProtoMessage() {}
func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5}
+ return fileDescriptor_e5baabe45344a177, []int{5}
}
+
func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
}
func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
}
-func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OneofDescriptorProto.Merge(dst, src)
+func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
}
func (m *OneofDescriptorProto) XXX_Size() int {
return xxx_messageInfo_OneofDescriptorProto.Size(m)
@@ -959,16 +993,17 @@ func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*EnumDescriptorProto) ProtoMessage() {}
func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6}
+ return fileDescriptor_e5baabe45344a177, []int{6}
}
+
func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
}
func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
}
-func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumDescriptorProto.Merge(dst, src)
+func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
}
func (m *EnumDescriptorProto) XXX_Size() int {
return xxx_messageInfo_EnumDescriptorProto.Size(m)
@@ -1032,16 +1067,17 @@ func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescr
func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0}
+ return fileDescriptor_e5baabe45344a177, []int{6, 0}
}
+
func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
}
func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
}
-func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src)
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
}
func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
@@ -1080,16 +1116,17 @@ func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorPro
func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*EnumValueDescriptorProto) ProtoMessage() {}
func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7}
+ return fileDescriptor_e5baabe45344a177, []int{7}
}
+
func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
}
func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
}
-func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src)
+func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
}
func (m *EnumValueDescriptorProto) XXX_Size() int {
return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
@@ -1135,16 +1172,17 @@ func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{}
func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*ServiceDescriptorProto) ProtoMessage() {}
func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8}
+ return fileDescriptor_e5baabe45344a177, []int{8}
}
+
func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
}
func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
}
-func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src)
+func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
}
func (m *ServiceDescriptorProto) XXX_Size() int {
return xxx_messageInfo_ServiceDescriptorProto.Size(m)
@@ -1197,16 +1235,17 @@ func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*MethodDescriptorProto) ProtoMessage() {}
func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9}
+ return fileDescriptor_e5baabe45344a177, []int{9}
}
+
func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
}
func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
}
-func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MethodDescriptorProto.Merge(dst, src)
+func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
}
func (m *MethodDescriptorProto) XXX_Size() int {
return xxx_messageInfo_MethodDescriptorProto.Size(m)
@@ -1336,6 +1375,14 @@ type FileOptions struct {
// is empty. When this option is empty, the package name will be used for
// determining the namespace.
PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
+ // Use this option to change the namespace of php generated metadata classes.
+ // Default is empty. When this option is empty, the proto file name will be used
+ // for determining the namespace.
+ PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
+ // Use this option to change the package of ruby generated classes. Default
+ // is empty. When this option is not set, the package name will be used for
+ // determining the ruby package.
+ RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -1349,7 +1396,7 @@ func (m *FileOptions) Reset() { *m = FileOptions{} }
func (m *FileOptions) String() string { return proto.CompactTextString(m) }
func (*FileOptions) ProtoMessage() {}
func (*FileOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10}
+ return fileDescriptor_e5baabe45344a177, []int{10}
}
var extRange_FileOptions = []proto.ExtensionRange{
@@ -1359,14 +1406,15 @@ var extRange_FileOptions = []proto.ExtensionRange{
func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_FileOptions
}
+
func (m *FileOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FileOptions.Unmarshal(m, b)
}
func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
}
-func (dst *FileOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileOptions.Merge(dst, src)
+func (m *FileOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FileOptions.Merge(m, src)
}
func (m *FileOptions) XXX_Size() int {
return xxx_messageInfo_FileOptions.Size(m)
@@ -1514,6 +1562,20 @@ func (m *FileOptions) GetPhpNamespace() string {
return ""
}
+func (m *FileOptions) GetPhpMetadataNamespace() string {
+ if m != nil && m.PhpMetadataNamespace != nil {
+ return *m.PhpMetadataNamespace
+ }
+ return ""
+}
+
+func (m *FileOptions) GetRubyPackage() string {
+ if m != nil && m.RubyPackage != nil {
+ return *m.RubyPackage
+ }
+ return ""
+}
+
func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
if m != nil {
return m.UninterpretedOption
@@ -1584,7 +1646,7 @@ func (m *MessageOptions) Reset() { *m = MessageOptions{} }
func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
func (*MessageOptions) ProtoMessage() {}
func (*MessageOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11}
+ return fileDescriptor_e5baabe45344a177, []int{11}
}
var extRange_MessageOptions = []proto.ExtensionRange{
@@ -1594,14 +1656,15 @@ var extRange_MessageOptions = []proto.ExtensionRange{
func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_MessageOptions
}
+
func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
}
func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
}
-func (dst *MessageOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MessageOptions.Merge(dst, src)
+func (m *MessageOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MessageOptions.Merge(m, src)
}
func (m *MessageOptions) XXX_Size() int {
return xxx_messageInfo_MessageOptions.Size(m)
@@ -1723,7 +1786,7 @@ func (m *FieldOptions) Reset() { *m = FieldOptions{} }
func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
func (*FieldOptions) ProtoMessage() {}
func (*FieldOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12}
+ return fileDescriptor_e5baabe45344a177, []int{12}
}
var extRange_FieldOptions = []proto.ExtensionRange{
@@ -1733,14 +1796,15 @@ var extRange_FieldOptions = []proto.ExtensionRange{
func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_FieldOptions
}
+
func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
}
func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
}
-func (dst *FieldOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldOptions.Merge(dst, src)
+func (m *FieldOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FieldOptions.Merge(m, src)
}
func (m *FieldOptions) XXX_Size() int {
return xxx_messageInfo_FieldOptions.Size(m)
@@ -1819,7 +1883,7 @@ func (m *OneofOptions) Reset() { *m = OneofOptions{} }
func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
func (*OneofOptions) ProtoMessage() {}
func (*OneofOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13}
+ return fileDescriptor_e5baabe45344a177, []int{13}
}
var extRange_OneofOptions = []proto.ExtensionRange{
@@ -1829,14 +1893,15 @@ var extRange_OneofOptions = []proto.ExtensionRange{
func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_OneofOptions
}
+
func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
}
func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
}
-func (dst *OneofOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OneofOptions.Merge(dst, src)
+func (m *OneofOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OneofOptions.Merge(m, src)
}
func (m *OneofOptions) XXX_Size() int {
return xxx_messageInfo_OneofOptions.Size(m)
@@ -1875,7 +1940,7 @@ func (m *EnumOptions) Reset() { *m = EnumOptions{} }
func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
func (*EnumOptions) ProtoMessage() {}
func (*EnumOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14}
+ return fileDescriptor_e5baabe45344a177, []int{14}
}
var extRange_EnumOptions = []proto.ExtensionRange{
@@ -1885,14 +1950,15 @@ var extRange_EnumOptions = []proto.ExtensionRange{
func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_EnumOptions
}
+
func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
}
func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
}
-func (dst *EnumOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumOptions.Merge(dst, src)
+func (m *EnumOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EnumOptions.Merge(m, src)
}
func (m *EnumOptions) XXX_Size() int {
return xxx_messageInfo_EnumOptions.Size(m)
@@ -1944,7 +2010,7 @@ func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
func (*EnumValueOptions) ProtoMessage() {}
func (*EnumValueOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15}
+ return fileDescriptor_e5baabe45344a177, []int{15}
}
var extRange_EnumValueOptions = []proto.ExtensionRange{
@@ -1954,14 +2020,15 @@ var extRange_EnumValueOptions = []proto.ExtensionRange{
func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_EnumValueOptions
}
+
func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
}
func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
}
-func (dst *EnumValueOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnumValueOptions.Merge(dst, src)
+func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EnumValueOptions.Merge(m, src)
}
func (m *EnumValueOptions) XXX_Size() int {
return xxx_messageInfo_EnumValueOptions.Size(m)
@@ -2006,7 +2073,7 @@ func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
func (*ServiceOptions) ProtoMessage() {}
func (*ServiceOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16}
+ return fileDescriptor_e5baabe45344a177, []int{16}
}
var extRange_ServiceOptions = []proto.ExtensionRange{
@@ -2016,14 +2083,15 @@ var extRange_ServiceOptions = []proto.ExtensionRange{
func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_ServiceOptions
}
+
func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
}
func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
}
-func (dst *ServiceOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceOptions.Merge(dst, src)
+func (m *ServiceOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceOptions.Merge(m, src)
}
func (m *ServiceOptions) XXX_Size() int {
return xxx_messageInfo_ServiceOptions.Size(m)
@@ -2069,7 +2137,7 @@ func (m *MethodOptions) Reset() { *m = MethodOptions{} }
func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
func (*MethodOptions) ProtoMessage() {}
func (*MethodOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17}
+ return fileDescriptor_e5baabe45344a177, []int{17}
}
var extRange_MethodOptions = []proto.ExtensionRange{
@@ -2079,14 +2147,15 @@ var extRange_MethodOptions = []proto.ExtensionRange{
func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
return extRange_MethodOptions
}
+
func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
}
func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
}
-func (dst *MethodOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MethodOptions.Merge(dst, src)
+func (m *MethodOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MethodOptions.Merge(m, src)
}
func (m *MethodOptions) XXX_Size() int {
return xxx_messageInfo_MethodOptions.Size(m)
@@ -2146,16 +2215,17 @@ func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
func (*UninterpretedOption) ProtoMessage() {}
func (*UninterpretedOption) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18}
+ return fileDescriptor_e5baabe45344a177, []int{18}
}
+
func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
}
func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
}
-func (dst *UninterpretedOption) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UninterpretedOption.Merge(dst, src)
+func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UninterpretedOption.Merge(m, src)
}
func (m *UninterpretedOption) XXX_Size() int {
return xxx_messageInfo_UninterpretedOption.Size(m)
@@ -2232,16 +2302,17 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio
func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0}
+ return fileDescriptor_e5baabe45344a177, []int{18, 0}
}
+
func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
}
func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
}
-func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src)
+func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
}
func (m *UninterpretedOption_NamePart) XXX_Size() int {
return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
@@ -2322,16 +2393,17 @@ func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
func (*SourceCodeInfo) ProtoMessage() {}
func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19}
+ return fileDescriptor_e5baabe45344a177, []int{19}
}
+
func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
}
func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
}
-func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SourceCodeInfo.Merge(dst, src)
+func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SourceCodeInfo.Merge(m, src)
}
func (m *SourceCodeInfo) XXX_Size() int {
return xxx_messageInfo_SourceCodeInfo.Size(m)
@@ -2439,16 +2511,17 @@ func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location
func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
func (*SourceCodeInfo_Location) ProtoMessage() {}
func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0}
+ return fileDescriptor_e5baabe45344a177, []int{19, 0}
}
+
func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
}
func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
}
-func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src)
+func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
}
func (m *SourceCodeInfo_Location) XXX_Size() int {
return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
@@ -2510,16 +2583,17 @@ func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} }
func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
func (*GeneratedCodeInfo) ProtoMessage() {}
func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20}
+ return fileDescriptor_e5baabe45344a177, []int{20}
}
+
func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
}
func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
}
-func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src)
+func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
}
func (m *GeneratedCodeInfo) XXX_Size() int {
return xxx_messageInfo_GeneratedCodeInfo.Size(m)
@@ -2559,16 +2633,17 @@ func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_
func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0}
+ return fileDescriptor_e5baabe45344a177, []int{20, 0}
}
+
func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
}
func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
}
-func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src)
+func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
}
func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
@@ -2608,6 +2683,12 @@ func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
}
func init() {
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+ proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+ proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
@@ -2635,178 +2716,172 @@ func init() {
proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
- proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
- proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
- proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
- proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
- proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
- proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
}
-func init() {
- proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6)
-}
-
-var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{
- // 2555 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7,
- 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f,
- 0x96, 0x9d, 0x7f, 0xa8, 0xc0, 0xb1, 0x1d, 0x47, 0xfe, 0x23, 0x2d, 0x45, 0xae, 0x15, 0xaa, 0x12,
- 0xc9, 0x2e, 0xa9, 0xe6, 0x03, 0x28, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb,
- 0xb4, 0xad, 0xa0, 0x17, 0x06, 0x7a, 0xd5, 0xab, 0xde, 0x16, 0x45, 0xd1, 0x8b, 0xde, 0x04, 0xe8,
- 0x03, 0x14, 0xc8, 0x5d, 0x9f, 0xa0, 0x40, 0xde, 0xa0, 0x68, 0x0b, 0xb4, 0x8f, 0xd0, 0xcb, 0x62,
- 0x66, 0x76, 0x97, 0xbb, 0x24, 0x15, 0x2b, 0x01, 0xe2, 0x5c, 0x91, 0xf3, 0x9b, 0xdf, 0x39, 0x73,
- 0xe6, 0xcc, 0x99, 0x33, 0x67, 0x66, 0x61, 0x7b, 0xe4, 0x38, 0x23, 0x8b, 0xee, 0xba, 0x9e, 0x13,
- 0x38, 0xa7, 0xd3, 0xe1, 0xae, 0x41, 0x7d, 0xdd, 0x33, 0xdd, 0xc0, 0xf1, 0xea, 0x1c, 0xc3, 0x6b,
- 0x82, 0x51, 0x8f, 0x18, 0xb5, 0x63, 0x58, 0x7f, 0x60, 0x5a, 0xb4, 0x15, 0x13, 0xfb, 0x34, 0xc0,
- 0xf7, 0x20, 0x37, 0x34, 0x2d, 0x2a, 0x4b, 0xdb, 0xd9, 0x9d, 0xf2, 0xad, 0x37, 0xeb, 0x73, 0x42,
- 0xf5, 0xb4, 0x44, 0x8f, 0xc1, 0x2a, 0x97, 0xa8, 0xfd, 0x2b, 0x07, 0x1b, 0x4b, 0x7a, 0x31, 0x86,
- 0x9c, 0x4d, 0x26, 0x4c, 0xa3, 0xb4, 0x53, 0x52, 0xf9, 0x7f, 0x2c, 0xc3, 0x8a, 0x4b, 0xf4, 0x47,
- 0x64, 0x44, 0xe5, 0x0c, 0x87, 0xa3, 0x26, 0x7e, 0x1d, 0xc0, 0xa0, 0x2e, 0xb5, 0x0d, 0x6a, 0xeb,
- 0x67, 0x72, 0x76, 0x3b, 0xbb, 0x53, 0x52, 0x13, 0x08, 0x7e, 0x07, 0xd6, 0xdd, 0xe9, 0xa9, 0x65,
- 0xea, 0x5a, 0x82, 0x06, 0xdb, 0xd9, 0x9d, 0xbc, 0x8a, 0x44, 0x47, 0x6b, 0x46, 0xbe, 0x0e, 0x6b,
- 0x4f, 0x28, 0x79, 0x94, 0xa4, 0x96, 0x39, 0xb5, 0xca, 0xe0, 0x04, 0xb1, 0x09, 0x95, 0x09, 0xf5,
- 0x7d, 0x32, 0xa2, 0x5a, 0x70, 0xe6, 0x52, 0x39, 0xc7, 0x67, 0xbf, 0xbd, 0x30, 0xfb, 0xf9, 0x99,
- 0x97, 0x43, 0xa9, 0xc1, 0x99, 0x4b, 0x71, 0x03, 0x4a, 0xd4, 0x9e, 0x4e, 0x84, 0x86, 0xfc, 0x39,
- 0xfe, 0x53, 0xec, 0xe9, 0x64, 0x5e, 0x4b, 0x91, 0x89, 0x85, 0x2a, 0x56, 0x7c, 0xea, 0x3d, 0x36,
- 0x75, 0x2a, 0x17, 0xb8, 0x82, 0xeb, 0x0b, 0x0a, 0xfa, 0xa2, 0x7f, 0x5e, 0x47, 0x24, 0x87, 0x9b,
- 0x50, 0xa2, 0x4f, 0x03, 0x6a, 0xfb, 0xa6, 0x63, 0xcb, 0x2b, 0x5c, 0xc9, 0x5b, 0x4b, 0x56, 0x91,
- 0x5a, 0xc6, 0xbc, 0x8a, 0x99, 0x1c, 0xbe, 0x0b, 0x2b, 0x8e, 0x1b, 0x98, 0x8e, 0xed, 0xcb, 0xc5,
- 0x6d, 0x69, 0xa7, 0x7c, 0xeb, 0xd5, 0xa5, 0x81, 0xd0, 0x15, 0x1c, 0x35, 0x22, 0xe3, 0x36, 0x20,
- 0xdf, 0x99, 0x7a, 0x3a, 0xd5, 0x74, 0xc7, 0xa0, 0x9a, 0x69, 0x0f, 0x1d, 0xb9, 0xc4, 0x15, 0x5c,
- 0x5d, 0x9c, 0x08, 0x27, 0x36, 0x1d, 0x83, 0xb6, 0xed, 0xa1, 0xa3, 0x56, 0xfd, 0x54, 0x1b, 0x5f,
- 0x82, 0x82, 0x7f, 0x66, 0x07, 0xe4, 0xa9, 0x5c, 0xe1, 0x11, 0x12, 0xb6, 0x6a, 0x5f, 0x17, 0x60,
- 0xed, 0x22, 0x21, 0x76, 0x1f, 0xf2, 0x43, 0x36, 0x4b, 0x39, 0xf3, 0x5d, 0x7c, 0x20, 0x64, 0xd2,
- 0x4e, 0x2c, 0x7c, 0x4f, 0x27, 0x36, 0xa0, 0x6c, 0x53, 0x3f, 0xa0, 0x86, 0x88, 0x88, 0xec, 0x05,
- 0x63, 0x0a, 0x84, 0xd0, 0x62, 0x48, 0xe5, 0xbe, 0x57, 0x48, 0x7d, 0x0a, 0x6b, 0xb1, 0x49, 0x9a,
- 0x47, 0xec, 0x51, 0x14, 0x9b, 0xbb, 0xcf, 0xb3, 0xa4, 0xae, 0x44, 0x72, 0x2a, 0x13, 0x53, 0xab,
- 0x34, 0xd5, 0xc6, 0x2d, 0x00, 0xc7, 0xa6, 0xce, 0x50, 0x33, 0xa8, 0x6e, 0xc9, 0xc5, 0x73, 0xbc,
- 0xd4, 0x65, 0x94, 0x05, 0x2f, 0x39, 0x02, 0xd5, 0x2d, 0xfc, 0xe1, 0x2c, 0xd4, 0x56, 0xce, 0x89,
- 0x94, 0x63, 0xb1, 0xc9, 0x16, 0xa2, 0xed, 0x04, 0xaa, 0x1e, 0x65, 0x71, 0x4f, 0x8d, 0x70, 0x66,
- 0x25, 0x6e, 0x44, 0xfd, 0xb9, 0x33, 0x53, 0x43, 0x31, 0x31, 0xb1, 0x55, 0x2f, 0xd9, 0xc4, 0x6f,
- 0x40, 0x0c, 0x68, 0x3c, 0xac, 0x80, 0x67, 0xa1, 0x4a, 0x04, 0x76, 0xc8, 0x84, 0x6e, 0x7d, 0x09,
- 0xd5, 0xb4, 0x7b, 0xf0, 0x26, 0xe4, 0xfd, 0x80, 0x78, 0x01, 0x8f, 0xc2, 0xbc, 0x2a, 0x1a, 0x18,
- 0x41, 0x96, 0xda, 0x06, 0xcf, 0x72, 0x79, 0x95, 0xfd, 0xc5, 0x3f, 0x9d, 0x4d, 0x38, 0xcb, 0x27,
- 0xfc, 0xf6, 0xe2, 0x8a, 0xa6, 0x34, 0xcf, 0xcf, 0x7b, 0xeb, 0x03, 0x58, 0x4d, 0x4d, 0xe0, 0xa2,
- 0x43, 0xd7, 0x7e, 0x05, 0x2f, 0x2f, 0x55, 0x8d, 0x3f, 0x85, 0xcd, 0xa9, 0x6d, 0xda, 0x01, 0xf5,
- 0x5c, 0x8f, 0xb2, 0x88, 0x15, 0x43, 0xc9, 0xff, 0x5e, 0x39, 0x27, 0xe6, 0x4e, 0x92, 0x6c, 0xa1,
- 0x45, 0xdd, 0x98, 0x2e, 0x82, 0x37, 0x4b, 0xc5, 0xff, 0xac, 0xa0, 0x67, 0xcf, 0x9e, 0x3d, 0xcb,
- 0xd4, 0x7e, 0x57, 0x80, 0xcd, 0x65, 0x7b, 0x66, 0xe9, 0xf6, 0xbd, 0x04, 0x05, 0x7b, 0x3a, 0x39,
- 0xa5, 0x1e, 0x77, 0x52, 0x5e, 0x0d, 0x5b, 0xb8, 0x01, 0x79, 0x8b, 0x9c, 0x52, 0x4b, 0xce, 0x6d,
- 0x4b, 0x3b, 0xd5, 0x5b, 0xef, 0x5c, 0x68, 0x57, 0xd6, 0x8f, 0x98, 0x88, 0x2a, 0x24, 0xf1, 0x47,
- 0x90, 0x0b, 0x53, 0x34, 0xd3, 0x70, 0xf3, 0x62, 0x1a, 0xd8, 0x5e, 0x52, 0xb9, 0x1c, 0x7e, 0x05,
- 0x4a, 0xec, 0x57, 0xc4, 0x46, 0x81, 0xdb, 0x5c, 0x64, 0x00, 0x8b, 0x0b, 0xbc, 0x05, 0x45, 0xbe,
- 0x4d, 0x0c, 0x1a, 0x1d, 0x6d, 0x71, 0x9b, 0x05, 0x96, 0x41, 0x87, 0x64, 0x6a, 0x05, 0xda, 0x63,
- 0x62, 0x4d, 0x29, 0x0f, 0xf8, 0x92, 0x5a, 0x09, 0xc1, 0x5f, 0x30, 0x0c, 0x5f, 0x85, 0xb2, 0xd8,
- 0x55, 0xa6, 0x6d, 0xd0, 0xa7, 0x3c, 0x7b, 0xe6, 0x55, 0xb1, 0xd1, 0xda, 0x0c, 0x61, 0xc3, 0x3f,
- 0xf4, 0x1d, 0x3b, 0x0a, 0x4d, 0x3e, 0x04, 0x03, 0xf8, 0xf0, 0x1f, 0xcc, 0x27, 0xee, 0xd7, 0x96,
- 0x4f, 0x6f, 0x3e, 0xa6, 0x6a, 0x7f, 0xc9, 0x40, 0x8e, 0xe7, 0x8b, 0x35, 0x28, 0x0f, 0x3e, 0xeb,
- 0x29, 0x5a, 0xab, 0x7b, 0xb2, 0x7f, 0xa4, 0x20, 0x09, 0x57, 0x01, 0x38, 0xf0, 0xe0, 0xa8, 0xdb,
- 0x18, 0xa0, 0x4c, 0xdc, 0x6e, 0x77, 0x06, 0x77, 0x6f, 0xa3, 0x6c, 0x2c, 0x70, 0x22, 0x80, 0x5c,
- 0x92, 0xf0, 0xfe, 0x2d, 0x94, 0xc7, 0x08, 0x2a, 0x42, 0x41, 0xfb, 0x53, 0xa5, 0x75, 0xf7, 0x36,
- 0x2a, 0xa4, 0x91, 0xf7, 0x6f, 0xa1, 0x15, 0xbc, 0x0a, 0x25, 0x8e, 0xec, 0x77, 0xbb, 0x47, 0xa8,
- 0x18, 0xeb, 0xec, 0x0f, 0xd4, 0x76, 0xe7, 0x00, 0x95, 0x62, 0x9d, 0x07, 0x6a, 0xf7, 0xa4, 0x87,
- 0x20, 0xd6, 0x70, 0xac, 0xf4, 0xfb, 0x8d, 0x03, 0x05, 0x95, 0x63, 0xc6, 0xfe, 0x67, 0x03, 0xa5,
- 0x8f, 0x2a, 0x29, 0xb3, 0xde, 0xbf, 0x85, 0x56, 0xe3, 0x21, 0x94, 0xce, 0xc9, 0x31, 0xaa, 0xe2,
- 0x75, 0x58, 0x15, 0x43, 0x44, 0x46, 0xac, 0xcd, 0x41, 0x77, 0x6f, 0x23, 0x34, 0x33, 0x44, 0x68,
- 0x59, 0x4f, 0x01, 0x77, 0x6f, 0x23, 0x5c, 0x6b, 0x42, 0x9e, 0x47, 0x17, 0xc6, 0x50, 0x3d, 0x6a,
- 0xec, 0x2b, 0x47, 0x5a, 0xb7, 0x37, 0x68, 0x77, 0x3b, 0x8d, 0x23, 0x24, 0xcd, 0x30, 0x55, 0xf9,
- 0xf9, 0x49, 0x5b, 0x55, 0x5a, 0x28, 0x93, 0xc4, 0x7a, 0x4a, 0x63, 0xa0, 0xb4, 0x50, 0xb6, 0xa6,
- 0xc3, 0xe6, 0xb2, 0x3c, 0xb9, 0x74, 0x67, 0x24, 0x96, 0x38, 0x73, 0xce, 0x12, 0x73, 0x5d, 0x0b,
- 0x4b, 0xfc, 0xcf, 0x0c, 0x6c, 0x2c, 0x39, 0x2b, 0x96, 0x0e, 0xf2, 0x13, 0xc8, 0x8b, 0x10, 0x15,
- 0xa7, 0xe7, 0x8d, 0xa5, 0x87, 0x0e, 0x0f, 0xd8, 0x85, 0x13, 0x94, 0xcb, 0x25, 0x2b, 0x88, 0xec,
- 0x39, 0x15, 0x04, 0x53, 0xb1, 0x90, 0xd3, 0x7f, 0xb9, 0x90, 0xd3, 0xc5, 0xb1, 0x77, 0xf7, 0x22,
- 0xc7, 0x1e, 0xc7, 0xbe, 0x5b, 0x6e, 0xcf, 0x2f, 0xc9, 0xed, 0xf7, 0x61, 0x7d, 0x41, 0xd1, 0x85,
- 0x73, 0xec, 0xaf, 0x25, 0x90, 0xcf, 0x73, 0xce, 0x73, 0x32, 0x5d, 0x26, 0x95, 0xe9, 0xee, 0xcf,
- 0x7b, 0xf0, 0xda, 0xf9, 0x8b, 0xb0, 0xb0, 0xd6, 0x5f, 0x49, 0x70, 0x69, 0x79, 0xa5, 0xb8, 0xd4,
- 0x86, 0x8f, 0xa0, 0x30, 0xa1, 0xc1, 0xd8, 0x89, 0xaa, 0xa5, 0xb7, 0x97, 0x9c, 0xc1, 0xac, 0x7b,
- 0x7e, 0xb1, 0x43, 0xa9, 0xe4, 0x21, 0x9e, 0x3d, 0xaf, 0xdc, 0x13, 0xd6, 0x2c, 0x58, 0xfa, 0x9b,
- 0x0c, 0xbc, 0xbc, 0x54, 0xf9, 0x52, 0x43, 0x5f, 0x03, 0x30, 0x6d, 0x77, 0x1a, 0x88, 0x8a, 0x48,
- 0x24, 0xd8, 0x12, 0x47, 0x78, 0xf2, 0x62, 0xc9, 0x73, 0x1a, 0xc4, 0xfd, 0x59, 0xde, 0x0f, 0x02,
- 0xe2, 0x84, 0x7b, 0x33, 0x43, 0x73, 0xdc, 0xd0, 0xd7, 0xcf, 0x99, 0xe9, 0x42, 0x60, 0xbe, 0x07,
- 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x4c, 0x4c, 0x7b, 0xc4, 0x4f, 0x90, 0xe2,
- 0x5e, 0x7e, 0x48, 0x2c, 0x9f, 0xaa, 0x6b, 0xa2, 0xbb, 0x1f, 0xf5, 0x32, 0x09, 0x1e, 0x40, 0x5e,
- 0x42, 0xa2, 0x90, 0x92, 0x10, 0xdd, 0xb1, 0x44, 0xed, 0xeb, 0x22, 0x94, 0x13, 0x75, 0x35, 0xbe,
- 0x06, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0xf0, 0x44, 0x99, 0x61, 0xbd, 0xf0, 0xbe,
- 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0xee, 0xb4, 0x22,
- 0xa7, 0x62, 0xd6, 0xd7, 0x65, 0x5d, 0xcd, 0xa8, 0x07, 0xdf, 0x81, 0x0d, 0x2e, 0x31, 0x99, 0x5a,
- 0x81, 0xe9, 0x5a, 0x54, 0x63, 0xb7, 0x37, 0x9f, 0x9f, 0x24, 0xb1, 0x65, 0xeb, 0x8c, 0x71, 0x1c,
- 0x12, 0x98, 0x45, 0x3e, 0x6e, 0xc1, 0x6b, 0x5c, 0x6c, 0x44, 0x6d, 0xea, 0x91, 0x80, 0x6a, 0xf4,
- 0x8b, 0x29, 0xb1, 0x7c, 0x8d, 0xd8, 0x86, 0x36, 0x26, 0xfe, 0x58, 0xde, 0x64, 0x0a, 0xf6, 0x33,
- 0xb2, 0xa4, 0x5e, 0x61, 0xc4, 0x83, 0x90, 0xa7, 0x70, 0x5a, 0xc3, 0x36, 0x3e, 0x26, 0xfe, 0x18,
- 0xef, 0xc1, 0x25, 0xae, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x69, 0xfa, 0x98, 0xea, 0x8f, 0xb4, 0x69,
- 0x30, 0xbc, 0x27, 0xbf, 0x92, 0x1c, 0x9f, 0x5b, 0xd8, 0xe7, 0x9c, 0x26, 0xa3, 0x9c, 0x04, 0xc3,
- 0x7b, 0xb8, 0x0f, 0x15, 0xb6, 0x18, 0x13, 0xf3, 0x4b, 0xaa, 0x0d, 0x1d, 0x8f, 0x1f, 0x8d, 0xd5,
- 0x25, 0xa9, 0x29, 0xe1, 0xc1, 0x7a, 0x37, 0x14, 0x38, 0x76, 0x0c, 0xba, 0x97, 0xef, 0xf7, 0x14,
- 0xa5, 0xa5, 0x96, 0x23, 0x2d, 0x0f, 0x1c, 0x8f, 0x05, 0xd4, 0xc8, 0x89, 0x1d, 0x5c, 0x16, 0x01,
- 0x35, 0x72, 0x22, 0xf7, 0xde, 0x81, 0x0d, 0x5d, 0x17, 0x73, 0x36, 0x75, 0x2d, 0xbc, 0x63, 0xf9,
- 0x32, 0x4a, 0x39, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0x8c, 0x71, 0x1f, 0x7f, 0x08, 0x2f, 0xcf, 0x9c,
- 0x95, 0x14, 0x5c, 0x5f, 0x98, 0xe5, 0xbc, 0xe8, 0x1d, 0xd8, 0x70, 0xcf, 0x16, 0x05, 0x71, 0x6a,
- 0x44, 0xf7, 0x6c, 0x5e, 0xec, 0x03, 0xd8, 0x74, 0xc7, 0xee, 0xa2, 0xdc, 0xcd, 0xa4, 0x1c, 0x76,
- 0xc7, 0xee, 0xbc, 0xe0, 0x5b, 0xfc, 0xc2, 0xed, 0x51, 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x27, 0xe9,
- 0x89, 0x0e, 0xbc, 0x0b, 0x48, 0xd7, 0x35, 0x6a, 0x93, 0x53, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1,
- 0xe5, 0xab, 0x49, 0x72, 0x55, 0xd7, 0x15, 0xde, 0xdb, 0xe0, 0x9d, 0xf8, 0x26, 0xac, 0x3b, 0xa7,
- 0x0f, 0x75, 0x11, 0x92, 0x9a, 0xeb, 0xd1, 0xa1, 0xf9, 0x54, 0x7e, 0x93, 0xfb, 0x77, 0x8d, 0x75,
- 0xf0, 0x80, 0xec, 0x71, 0x18, 0xdf, 0x00, 0xa4, 0xfb, 0x63, 0xe2, 0xb9, 0x3c, 0x27, 0xfb, 0x2e,
- 0xd1, 0xa9, 0xfc, 0x96, 0xa0, 0x0a, 0xbc, 0x13, 0xc1, 0x6c, 0x4b, 0xf8, 0x4f, 0xcc, 0x61, 0x10,
- 0x69, 0xbc, 0x2e, 0xb6, 0x04, 0xc7, 0x42, 0x6d, 0x3b, 0x80, 0x98, 0x2b, 0x52, 0x03, 0xef, 0x70,
- 0x5a, 0xd5, 0x1d, 0xbb, 0xc9, 0x71, 0xdf, 0x80, 0x55, 0xc6, 0x9c, 0x0d, 0x7a, 0x43, 0x14, 0x64,
- 0xee, 0x38, 0x31, 0xe2, 0x0f, 0x56, 0x1b, 0xd7, 0xf6, 0xa0, 0x92, 0x8c, 0x4f, 0x5c, 0x02, 0x11,
- 0xa1, 0x48, 0x62, 0xc5, 0x4a, 0xb3, 0xdb, 0x62, 0x65, 0xc6, 0xe7, 0x0a, 0xca, 0xb0, 0x72, 0xe7,
- 0xa8, 0x3d, 0x50, 0x34, 0xf5, 0xa4, 0x33, 0x68, 0x1f, 0x2b, 0x28, 0x9b, 0xa8, 0xab, 0x0f, 0x73,
- 0xc5, 0xb7, 0xd1, 0xf5, 0xda, 0x37, 0x19, 0xa8, 0xa6, 0x2f, 0x4a, 0xf8, 0xff, 0xe1, 0x72, 0xf4,
- 0xaa, 0xe1, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0xe3, 0x4c, 0x88, 0x38, 0xc4, 0xe2, 0xa5, 0xdb,
- 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0x5b, 0x4c, 0x48, 0x80, 0x8f, 0xe0, 0xaa, 0xed,
- 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0x4f, 0xd2, 0x88, 0xae, 0x53, 0xdf, 0x77,
- 0xc4, 0x81, 0x15, 0x6b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x96, 0xc9, 0x1b, 0x21, 0x75, 0x2e,
- 0xcc, 0xb2, 0xe7, 0x85, 0xd9, 0x2b, 0x50, 0x9a, 0x10, 0x57, 0xa3, 0x76, 0xe0, 0x9d, 0xf1, 0xf2,
- 0xb8, 0xa8, 0x16, 0x27, 0xc4, 0x55, 0x58, 0xfb, 0x85, 0xdc, 0x52, 0x0e, 0x73, 0xc5, 0x22, 0x2a,
- 0x1d, 0xe6, 0x8a, 0x25, 0x04, 0xb5, 0x7f, 0x64, 0xa1, 0x92, 0x2c, 0x97, 0xd9, 0xed, 0x43, 0xe7,
- 0x27, 0x8b, 0xc4, 0x73, 0xcf, 0x1b, 0xdf, 0x5a, 0x5c, 0xd7, 0x9b, 0xec, 0xc8, 0xd9, 0x2b, 0x88,
- 0x22, 0x56, 0x15, 0x92, 0xec, 0xb8, 0x67, 0xd9, 0x86, 0x8a, 0xa2, 0xa1, 0xa8, 0x86, 0x2d, 0x7c,
- 0x00, 0x85, 0x87, 0x3e, 0xd7, 0x5d, 0xe0, 0xba, 0xdf, 0xfc, 0x76, 0xdd, 0x87, 0x7d, 0xae, 0xbc,
- 0x74, 0xd8, 0xd7, 0x3a, 0x5d, 0xf5, 0xb8, 0x71, 0xa4, 0x86, 0xe2, 0xf8, 0x0a, 0xe4, 0x2c, 0xf2,
- 0xe5, 0x59, 0xfa, 0x70, 0xe2, 0xd0, 0x45, 0x17, 0xe1, 0x0a, 0xe4, 0x9e, 0x50, 0xf2, 0x28, 0x7d,
- 0x24, 0x70, 0xe8, 0x07, 0xdc, 0x0c, 0xbb, 0x90, 0xe7, 0xfe, 0xc2, 0x00, 0xa1, 0xc7, 0xd0, 0x4b,
- 0xb8, 0x08, 0xb9, 0x66, 0x57, 0x65, 0x1b, 0x02, 0x41, 0x45, 0xa0, 0x5a, 0xaf, 0xad, 0x34, 0x15,
- 0x94, 0xa9, 0xdd, 0x81, 0x82, 0x70, 0x02, 0xdb, 0x2c, 0xb1, 0x1b, 0xd0, 0x4b, 0x61, 0x33, 0xd4,
- 0x21, 0x45, 0xbd, 0x27, 0xc7, 0xfb, 0x8a, 0x8a, 0x32, 0xe9, 0xa5, 0xce, 0xa1, 0x7c, 0xcd, 0x87,
- 0x4a, 0xb2, 0x5e, 0x7e, 0x31, 0x77, 0xe1, 0xbf, 0x4a, 0x50, 0x4e, 0xd4, 0xbf, 0xac, 0x70, 0x21,
- 0x96, 0xe5, 0x3c, 0xd1, 0x88, 0x65, 0x12, 0x3f, 0x0c, 0x0d, 0xe0, 0x50, 0x83, 0x21, 0x17, 0x5d,
- 0xba, 0x17, 0xb4, 0x45, 0xf2, 0xa8, 0x50, 0xfb, 0xa3, 0x04, 0x68, 0xbe, 0x00, 0x9d, 0x33, 0x53,
- 0xfa, 0x31, 0xcd, 0xac, 0xfd, 0x41, 0x82, 0x6a, 0xba, 0xea, 0x9c, 0x33, 0xef, 0xda, 0x8f, 0x6a,
- 0xde, 0xdf, 0x33, 0xb0, 0x9a, 0xaa, 0x35, 0x2f, 0x6a, 0xdd, 0x17, 0xb0, 0x6e, 0x1a, 0x74, 0xe2,
- 0x3a, 0x01, 0xb5, 0xf5, 0x33, 0xcd, 0xa2, 0x8f, 0xa9, 0x25, 0xd7, 0x78, 0xd2, 0xd8, 0xfd, 0xf6,
- 0x6a, 0xb6, 0xde, 0x9e, 0xc9, 0x1d, 0x31, 0xb1, 0xbd, 0x8d, 0x76, 0x4b, 0x39, 0xee, 0x75, 0x07,
- 0x4a, 0xa7, 0xf9, 0x99, 0x76, 0xd2, 0xf9, 0x59, 0xa7, 0xfb, 0x49, 0x47, 0x45, 0xe6, 0x1c, 0xed,
- 0x07, 0xdc, 0xf6, 0x3d, 0x40, 0xf3, 0x46, 0xe1, 0xcb, 0xb0, 0xcc, 0x2c, 0xf4, 0x12, 0xde, 0x80,
- 0xb5, 0x4e, 0x57, 0xeb, 0xb7, 0x5b, 0x8a, 0xa6, 0x3c, 0x78, 0xa0, 0x34, 0x07, 0x7d, 0xf1, 0x3e,
- 0x11, 0xb3, 0x07, 0xa9, 0x0d, 0x5e, 0xfb, 0x7d, 0x16, 0x36, 0x96, 0x58, 0x82, 0x1b, 0xe1, 0xcd,
- 0x42, 0x5c, 0x76, 0xde, 0xbd, 0x88, 0xf5, 0x75, 0x56, 0x10, 0xf4, 0x88, 0x17, 0x84, 0x17, 0x91,
- 0x1b, 0xc0, 0xbc, 0x64, 0x07, 0xe6, 0xd0, 0xa4, 0x5e, 0xf8, 0x9c, 0x23, 0xae, 0x1b, 0x6b, 0x33,
- 0x5c, 0xbc, 0xe8, 0xfc, 0x1f, 0x60, 0xd7, 0xf1, 0xcd, 0xc0, 0x7c, 0x4c, 0x35, 0xd3, 0x8e, 0xde,
- 0x7e, 0xd8, 0xf5, 0x23, 0xa7, 0xa2, 0xa8, 0xa7, 0x6d, 0x07, 0x31, 0xdb, 0xa6, 0x23, 0x32, 0xc7,
- 0x66, 0xc9, 0x3c, 0xab, 0xa2, 0xa8, 0x27, 0x66, 0x5f, 0x83, 0x8a, 0xe1, 0x4c, 0x59, 0x4d, 0x26,
- 0x78, 0xec, 0xec, 0x90, 0xd4, 0xb2, 0xc0, 0x62, 0x4a, 0x58, 0x6d, 0xcf, 0x1e, 0x9d, 0x2a, 0x6a,
- 0x59, 0x60, 0x82, 0x72, 0x1d, 0xd6, 0xc8, 0x68, 0xe4, 0x31, 0xe5, 0x91, 0x22, 0x71, 0x7f, 0xa8,
- 0xc6, 0x30, 0x27, 0x6e, 0x1d, 0x42, 0x31, 0xf2, 0x03, 0x3b, 0xaa, 0x99, 0x27, 0x34, 0x57, 0x5c,
- 0x8a, 0x33, 0x3b, 0x25, 0xb5, 0x68, 0x47, 0x9d, 0xd7, 0xa0, 0x62, 0xfa, 0xda, 0xec, 0x0d, 0x3d,
- 0xb3, 0x9d, 0xd9, 0x29, 0xaa, 0x65, 0xd3, 0x8f, 0xdf, 0x1f, 0x6b, 0x5f, 0x65, 0xa0, 0x9a, 0xfe,
- 0x06, 0x80, 0x5b, 0x50, 0xb4, 0x1c, 0x9d, 0xf0, 0xd0, 0x12, 0x1f, 0xa0, 0x76, 0x9e, 0xf3, 0xd9,
- 0xa0, 0x7e, 0x14, 0xf2, 0xd5, 0x58, 0x72, 0xeb, 0x6f, 0x12, 0x14, 0x23, 0x18, 0x5f, 0x82, 0x9c,
- 0x4b, 0x82, 0x31, 0x57, 0x97, 0xdf, 0xcf, 0x20, 0x49, 0xe5, 0x6d, 0x86, 0xfb, 0x2e, 0xb1, 0x79,
- 0x08, 0x84, 0x38, 0x6b, 0xb3, 0x75, 0xb5, 0x28, 0x31, 0xf8, 0xe5, 0xc4, 0x99, 0x4c, 0xa8, 0x1d,
- 0xf8, 0xd1, 0xba, 0x86, 0x78, 0x33, 0x84, 0xf1, 0x3b, 0xb0, 0x1e, 0x78, 0xc4, 0xb4, 0x52, 0xdc,
- 0x1c, 0xe7, 0xa2, 0xa8, 0x23, 0x26, 0xef, 0xc1, 0x95, 0x48, 0xaf, 0x41, 0x03, 0xa2, 0x8f, 0xa9,
- 0x31, 0x13, 0x2a, 0xf0, 0x47, 0x88, 0xcb, 0x21, 0xa1, 0x15, 0xf6, 0x47, 0xb2, 0xb5, 0x6f, 0x24,
- 0x58, 0x8f, 0xae, 0x53, 0x46, 0xec, 0xac, 0x63, 0x00, 0x62, 0xdb, 0x4e, 0x90, 0x74, 0xd7, 0x62,
- 0x28, 0x2f, 0xc8, 0xd5, 0x1b, 0xb1, 0x90, 0x9a, 0x50, 0xb0, 0x35, 0x01, 0x98, 0xf5, 0x9c, 0xeb,
- 0xb6, 0xab, 0x50, 0x0e, 0x3f, 0xf0, 0xf0, 0xaf, 0x84, 0xe2, 0x02, 0x0e, 0x02, 0x62, 0xf7, 0x2e,
- 0xbc, 0x09, 0xf9, 0x53, 0x3a, 0x32, 0xed, 0xf0, 0xd9, 0x56, 0x34, 0xa2, 0x67, 0x92, 0x5c, 0xfc,
- 0x4c, 0xb2, 0xff, 0x5b, 0x09, 0x36, 0x74, 0x67, 0x32, 0x6f, 0xef, 0x3e, 0x9a, 0x7b, 0x05, 0xf0,
- 0x3f, 0x96, 0x3e, 0xff, 0x68, 0x64, 0x06, 0xe3, 0xe9, 0x69, 0x5d, 0x77, 0x26, 0xbb, 0x23, 0xc7,
- 0x22, 0xf6, 0x68, 0xf6, 0x99, 0x93, 0xff, 0xd1, 0xdf, 0x1d, 0x51, 0xfb, 0xdd, 0x91, 0x93, 0xf8,
- 0xe8, 0x79, 0x7f, 0xf6, 0xf7, 0xbf, 0x92, 0xf4, 0xa7, 0x4c, 0xf6, 0xa0, 0xb7, 0xff, 0xe7, 0xcc,
- 0xd6, 0x81, 0x18, 0xae, 0x17, 0xb9, 0x47, 0xa5, 0x43, 0x8b, 0xea, 0x6c, 0xca, 0xff, 0x0b, 0x00,
- 0x00, 0xff, 0xff, 0x1a, 0x28, 0x25, 0x79, 0x42, 0x1d, 0x00, 0x00,
+func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) }
+
+var fileDescriptor_e5baabe45344a177 = []byte{
+ // 2589 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6,
+ 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca,
+ 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee,
+ 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca,
+ 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80,
+ 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c,
+ 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73,
+ 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04,
+ 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a,
+ 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0,
+ 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52,
+ 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90,
+ 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88,
+ 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd,
+ 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c,
+ 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6,
+ 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf,
+ 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79,
+ 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11,
+ 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53,
+ 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84,
+ 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4,
+ 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e,
+ 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9,
+ 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2,
+ 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02,
+ 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6,
+ 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d,
+ 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33,
+ 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79,
+ 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a,
+ 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a,
+ 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c,
+ 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56,
+ 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06,
+ 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1,
+ 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23,
+ 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01,
+ 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f,
+ 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d,
+ 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58,
+ 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32,
+ 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e,
+ 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb,
+ 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11,
+ 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02,
+ 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f,
+ 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31,
+ 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac,
+ 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f,
+ 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d,
+ 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac,
+ 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e,
+ 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72,
+ 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b,
+ 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2,
+ 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e,
+ 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94,
+ 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a,
+ 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61,
+ 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0,
+ 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5,
+ 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a,
+ 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a,
+ 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8,
+ 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64,
+ 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c,
+ 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8,
+ 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb,
+ 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2,
+ 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33,
+ 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4,
+ 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15,
+ 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39,
+ 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9,
+ 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41,
+ 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40,
+ 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35,
+ 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0,
+ 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4,
+ 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53,
+ 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e,
+ 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d,
+ 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e,
+ 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f,
+ 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36,
+ 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60,
+ 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1,
+ 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d,
+ 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45,
+ 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58,
+ 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3,
+ 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c,
+ 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87,
+ 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49,
+ 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26,
+ 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39,
+ 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c,
+ 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77,
+ 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83,
+ 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87,
+ 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20,
+ 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6,
+ 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e,
+ 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c,
+ 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0,
+ 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8,
+ 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0,
+ 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3,
+ 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1,
+ 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53,
+ 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42,
+ 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b,
+ 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15,
+ 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae,
+ 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2,
+ 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35,
+ 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0,
+ 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82,
+ 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a,
+ 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c,
+ 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5,
+ 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29,
+ 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca,
+ 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd,
+ 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18,
+ 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb,
+ 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae,
+ 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6,
+ 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a,
+ 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47,
+ 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8,
+ 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e,
+ 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0,
+ 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70,
+ 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41,
+ 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e,
+ 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c,
+ 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47,
+ 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2,
+ 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66,
+ 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2,
+ 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0,
+ 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9,
+ 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40,
+ 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c,
+ 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe,
+ 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d,
+ 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99,
+ 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69,
+ 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2,
+ 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda,
+ 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86,
+ 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24,
+ 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8,
+ 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25,
+ 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e,
+ 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee,
+ 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39,
+ 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f,
+ 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00,
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index b2af97f4a98..70276e8f5c9 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -130,10 +130,12 @@ func UnmarshalAny(any *any.Any, pb proto.Message) error {
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
- aname, err := AnyMessageName(any)
- if err != nil {
+ // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
+ // but it avoids scanning TypeUrl for the slash.
+ if any == nil {
return false
}
-
- return aname == proto.MessageName(pb)
+ name := proto.MessageName(pb)
+ prefix := len(any.TypeUrl) - len(name)
+ return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index f67edc7dc2b..78ee5233495 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
-package any // import "github.com/golang/protobuf/ptypes/any"
+package any
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
@@ -99,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// }
//
type Any struct {
- // A URL/resource name whose content describes the type of the
- // serialized protocol buffer message.
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
//
- // For URLs which use the scheme `http`, `https`, or no scheme, the
- // following restrictions and interpretations apply:
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
- // * The last segment of the URL's path must represent the fully
- // qualified name of the type (as in `path/google.protobuf.Duration`).
- // The name should be in a canonical form (e.g., leading "." is
- // not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
@@ -118,10 +121,14 @@ type Any struct {
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
- TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@@ -133,17 +140,19 @@ func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) {
- return fileDescriptor_any_744b9ca530f228db, []int{0}
+ return fileDescriptor_b53526c13ae22eb4, []int{0}
}
+
func (*Any) XXX_WellKnownType() string { return "Any" }
+
func (m *Any) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Any.Unmarshal(m, b)
}
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
}
-func (dst *Any) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Any.Merge(dst, src)
+func (m *Any) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Any.Merge(m, src)
}
func (m *Any) XXX_Size() int {
return xxx_messageInfo_Any.Size(m)
@@ -172,9 +181,9 @@ func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
-var fileDescriptor_any_744b9ca530f228db = []byte{
+var fileDescriptor_b53526c13ae22eb4 = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
index 65cb0f8eb5f..26d1ca2fb58 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -82,7 +82,7 @@ func Duration(p *durpb.Duration) (time.Duration, error) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
- d += time.Duration(p.Nanos)
+ d += time.Duration(p.Nanos) * time.Nanosecond
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index 4d75473b8b2..0d681ee21a0 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
-package duration // import "github.com/golang/protobuf/ptypes/duration"
+package duration
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
@@ -82,14 +84,14 @@ type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
- Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -99,17 +101,19 @@ func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) {
- return fileDescriptor_duration_e7d612259e3f0613, []int{0}
+ return fileDescriptor_23597b2ebd7ac6c5, []int{0}
}
+
func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
func (m *Duration) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Duration.Unmarshal(m, b)
}
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
}
-func (dst *Duration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Duration.Merge(dst, src)
+func (m *Duration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Duration.Merge(m, src)
}
func (m *Duration) XXX_Size() int {
return xxx_messageInfo_Duration.Size(m)
@@ -138,11 +142,9 @@ func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
-func init() {
- proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
-}
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
-var fileDescriptor_duration_e7d612259e3f0613 = []byte{
+var fileDescriptor_23597b2ebd7ac6c5 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
index a69b403ce15..b4eb03eccf7 100644
--- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/empty.proto
-package empty // import "github.com/golang/protobuf/ptypes/empty"
+package empty
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A generic empty message that you can re-use to avoid defining duplicated
// empty messages in your APIs. A typical example is to use it as the request
@@ -37,17 +39,19 @@ func (m *Empty) Reset() { *m = Empty{} }
func (m *Empty) String() string { return proto.CompactTextString(m) }
func (*Empty) ProtoMessage() {}
func (*Empty) Descriptor() ([]byte, []int) {
- return fileDescriptor_empty_39e6d6db0632e5b2, []int{0}
+ return fileDescriptor_900544acb223d5b8, []int{0}
}
+
func (*Empty) XXX_WellKnownType() string { return "Empty" }
+
func (m *Empty) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Empty.Unmarshal(m, b)
}
func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
}
-func (dst *Empty) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Empty.Merge(dst, src)
+func (m *Empty) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Empty.Merge(m, src)
}
func (m *Empty) XXX_Size() int {
return xxx_messageInfo_Empty.Size(m)
@@ -62,9 +66,9 @@ func init() {
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
}
-func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_empty_39e6d6db0632e5b2) }
+func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
-var fileDescriptor_empty_39e6d6db0632e5b2 = []byte{
+var fileDescriptor_900544acb223d5b8 = []byte{
// 148 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 00000000000..33daa73dd28
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,336 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+package structpb
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+ // Null value.
+ NullValue_NULL_VALUE NullValue = 0
+)
+
+var NullValue_name = map[int32]string{
+ 0: "NULL_VALUE",
+}
+
+var NullValue_value = map[string]int32{
+ "NULL_VALUE": 0,
+}
+
+func (x NullValue) String() string {
+ return proto.EnumName(NullValue_name, int32(x))
+}
+
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (NullValue) XXX_WellKnownType() string { return "NullValue" }
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+ // Unordered map of dynamically typed values.
+ Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Struct) Reset() { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage() {}
+func (*Struct) Descriptor() ([]byte, []int) {
+ return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (*Struct) XXX_WellKnownType() string { return "Struct" }
+
+func (m *Struct) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Struct.Unmarshal(m, b)
+}
+func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
+}
+func (m *Struct) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Struct.Merge(m, src)
+}
+func (m *Struct) XXX_Size() int {
+ return xxx_messageInfo_Struct.Size(m)
+}
+func (m *Struct) XXX_DiscardUnknown() {
+ xxx_messageInfo_Struct.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Struct proto.InternalMessageInfo
+
+func (m *Struct) GetFields() map[string]*Value {
+ if m != nil {
+ return m.Fields
+ }
+ return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+ // The kind of value.
+ //
+ // Types that are valid to be assigned to Kind:
+ // *Value_NullValue
+ // *Value_NumberValue
+ // *Value_StringValue
+ // *Value_BoolValue
+ // *Value_StructValue
+ // *Value_ListValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Value) Reset() { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage() {}
+func (*Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_df322afd6c9fb402, []int{1}
+}
+
+func (*Value) XXX_WellKnownType() string { return "Value" }
+
+func (m *Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Value.Unmarshal(m, b)
+}
+func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Value.Marshal(b, m, deterministic)
+}
+func (m *Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Value.Merge(m, src)
+}
+func (m *Value) XXX_Size() int {
+ return xxx_messageInfo_Value.Size(m)
+}
+func (m *Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Value proto.InternalMessageInfo
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_NumberValue struct {
+ NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_StructValue struct {
+ StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_NumberValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_StructValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (m *Value) GetNullValue() NullValue {
+ if x, ok := m.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return NullValue_NULL_VALUE
+}
+
+func (m *Value) GetNumberValue() float64 {
+ if x, ok := m.GetKind().(*Value_NumberValue); ok {
+ return x.NumberValue
+ }
+ return 0
+}
+
+func (m *Value) GetStringValue() string {
+ if x, ok := m.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (m *Value) GetBoolValue() bool {
+ if x, ok := m.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *Value) GetStructValue() *Struct {
+ if x, ok := m.GetKind().(*Value_StructValue); ok {
+ return x.StructValue
+ }
+ return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+ if x, ok := m.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Value) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_NumberValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_StructValue)(nil),
+ (*Value_ListValue)(nil),
+ }
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+ // Repeated field of dynamically typed values.
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListValue) Reset() { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage() {}
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_df322afd6c9fb402, []int{2}
+}
+
+func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+
+func (m *ListValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListValue.Unmarshal(m, b)
+}
+func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
+}
+func (m *ListValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListValue.Merge(m, src)
+}
+func (m *ListValue) XXX_Size() int {
+ return xxx_messageInfo_ListValue.Size(m)
+}
+func (m *ListValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListValue proto.InternalMessageInfo
+
+func (m *ListValue) GetValues() []*Value {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
+ proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+ proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
+ proto.RegisterType((*Value)(nil), "google.protobuf.Value")
+ proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
+
+var fileDescriptor_df322afd6c9fb402 = []byte{
+ // 417 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+ 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
+ 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
+ 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
+ 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
+ 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
+ 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
+ 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
+ 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
+ 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
+ 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
+ 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
+ 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
+ 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
+ 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
+ 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
+ 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
+ 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
+ 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
+ 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
+ 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
+ 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
+ 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
+ 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
+ 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
+ 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 47f10dbc2cc..8da0df01acd 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -111,11 +111,9 @@ func TimestampNow() *tspb.Timestamp {
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
- seconds := t.Unix()
- nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
- Seconds: seconds,
- Nanos: nanos,
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index e9c22228216..31cd846de99 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
-package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
+package timestamp
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -16,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
@@ -81,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required, though only UTC (as indicated by "Z") is presently supported.
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
@@ -92,20 +96,20 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
-// to obtain a formatter capable of generating timestamps in this format.
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
- Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -115,17 +119,19 @@ func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
- return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
+ return fileDescriptor_292007bbfe81227e, []int{0}
}
+
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
}
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
}
-func (dst *Timestamp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Timestamp.Merge(dst, src)
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Timestamp.Merge(m, src)
}
func (m *Timestamp) XXX_Size() int {
return xxx_messageInfo_Timestamp.Size(m)
@@ -154,11 +160,9 @@ func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
-func init() {
- proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
-}
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
-var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
+var fileDescriptor_292007bbfe81227e = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 00000000000..add19a1adb0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,461 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+package wrappers
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+ // The double value.
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DoubleValue) Reset() { *m = DoubleValue{} }
+func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
+func (*DoubleValue) ProtoMessage() {}
+func (*DoubleValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5377b62bda767935, []int{0}
+}
+
+func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
+
+func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
+}
+func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
+}
+func (m *DoubleValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DoubleValue.Merge(m, src)
+}
+func (m *DoubleValue) XXX_Size() int {
+ return xxx_messageInfo_DoubleValue.Size(m)
+}
+func (m *DoubleValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_DoubleValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
+
+func (m *DoubleValue) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+ // The float value.
+ Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FloatValue) Reset() { *m = FloatValue{} }
+func (m *FloatValue) String() string { return proto.CompactTextString(m) }
+func (*FloatValue) ProtoMessage() {}
+func (*FloatValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5377b62bda767935, []int{1}
+}
+
+func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
+
+func (m *FloatValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FloatValue.Unmarshal(m, b)
+}
+func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
+}
+func (m *FloatValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FloatValue.Merge(m, src)
+}
+func (m *FloatValue) XXX_Size() int {
+ return xxx_messageInfo_FloatValue.Size(m)
+}
+func (m *FloatValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_FloatValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FloatValue proto.InternalMessageInfo
+
+func (m *FloatValue) GetValue() float32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+ // The int64 value.
+ Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Int64Value) Reset() { *m = Int64Value{} }
+func (m *Int64Value) String() string { return proto.CompactTextString(m) }
+func (*Int64Value) ProtoMessage() {}
+func (*Int64Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5377b62bda767935, []int{2}
+}
+
+func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
+
+func (m *Int64Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Int64Value.Unmarshal(m, b)
+}
+func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
+}
+func (m *Int64Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Int64Value.Merge(m, src)
+}
+func (m *Int64Value) XXX_Size() int {
+ return xxx_messageInfo_Int64Value.Size(m)
+}
+func (m *Int64Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_Int64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int64Value proto.InternalMessageInfo
+
+func (m *Int64Value) GetValue() int64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+ // The uint64 value.
+ Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UInt64Value) Reset() { *m = UInt64Value{} }
+func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
+func (*UInt64Value) ProtoMessage() {}
+func (*UInt64Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5377b62bda767935, []int{3}
+}
+
+func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
+
+func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
+}
+func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
+}
+func (m *UInt64Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UInt64Value.Merge(m, src)
+}
+func (m *UInt64Value) XXX_Size() int {
+ return xxx_messageInfo_UInt64Value.Size(m)
+}
+func (m *UInt64Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_UInt64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
+
+func (m *UInt64Value) GetValue() uint64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+ // The int32 value.
+ Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Int32Value) Reset() { *m = Int32Value{} }
+func (m *Int32Value) String() string { return proto.CompactTextString(m) }
+func (*Int32Value) ProtoMessage() {}
+func (*Int32Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5377b62bda767935, []int{4}
+}
+
+func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
+
+func (m *Int32Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Int32Value.Unmarshal(m, b)
+}
+func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
+}
+func (m *Int32Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Int32Value.Merge(m, src)
+}
+func (m *Int32Value) XXX_Size() int {
+ return xxx_messageInfo_Int32Value.Size(m)
+}
+func (m *Int32Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_Int32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int32Value proto.InternalMessageInfo
+
+func (m *Int32Value) GetValue() int32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+ // The uint32 value.
+ Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UInt32Value) Reset() { *m = UInt32Value{} }
+func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
+func (*UInt32Value) ProtoMessage() {}
+func (*UInt32Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5377b62bda767935, []int{5}
+}
+
+func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
+
+func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
+}
+func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
+}
+func (m *UInt32Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UInt32Value.Merge(m, src)
+}
+func (m *UInt32Value) XXX_Size() int {
+ return xxx_messageInfo_UInt32Value.Size(m)
+}
+func (m *UInt32Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_UInt32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
+
+func (m *UInt32Value) GetValue() uint32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+ // The bool value.
+ Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BoolValue) Reset() { *m = BoolValue{} }
+func (m *BoolValue) String() string { return proto.CompactTextString(m) }
+func (*BoolValue) ProtoMessage() {}
+func (*BoolValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5377b62bda767935, []int{6}
+}
+
+func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
+
+func (m *BoolValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BoolValue.Unmarshal(m, b)
+}
+func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
+}
+func (m *BoolValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BoolValue.Merge(m, src)
+}
+func (m *BoolValue) XXX_Size() int {
+ return xxx_messageInfo_BoolValue.Size(m)
+}
+func (m *BoolValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_BoolValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BoolValue proto.InternalMessageInfo
+
+func (m *BoolValue) GetValue() bool {
+ if m != nil {
+ return m.Value
+ }
+ return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+ // The string value.
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StringValue) Reset() { *m = StringValue{} }
+func (m *StringValue) String() string { return proto.CompactTextString(m) }
+func (*StringValue) ProtoMessage() {}
+func (*StringValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5377b62bda767935, []int{7}
+}
+
+func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
+
+func (m *StringValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StringValue.Unmarshal(m, b)
+}
+func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
+}
+func (m *StringValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StringValue.Merge(m, src)
+}
+func (m *StringValue) XXX_Size() int {
+ return xxx_messageInfo_StringValue.Size(m)
+}
+func (m *StringValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_StringValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StringValue proto.InternalMessageInfo
+
+func (m *StringValue) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+ // The bytes value.
+ Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BytesValue) Reset() { *m = BytesValue{} }
+func (m *BytesValue) String() string { return proto.CompactTextString(m) }
+func (*BytesValue) ProtoMessage() {}
+func (*BytesValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5377b62bda767935, []int{8}
+}
+
+func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
+
+func (m *BytesValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BytesValue.Unmarshal(m, b)
+}
+func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
+}
+func (m *BytesValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BytesValue.Merge(m, src)
+}
+func (m *BytesValue) XXX_Size() int {
+ return xxx_messageInfo_BytesValue.Size(m)
+}
+func (m *BytesValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_BytesValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesValue proto.InternalMessageInfo
+
+func (m *BytesValue) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
+ proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
+ proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
+ proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
+ proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
+ proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
+ proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
+ proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
+ proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) }
+
+var fileDescriptor_5377b62bda767935 = []byte{
+ // 259 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
+ 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
+ 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
+ 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
+ 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
+ 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
+ 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
+ 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
+ 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
+ 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
+ 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
+ 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
+ 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
+ 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
+ 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
+ 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/google/cel-go/LICENSE b/vendor/github.com/google/cel-go/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/github.com/google/cel-go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/cel-go/cel/cel.go b/vendor/github.com/google/cel-go/cel/cel.go
new file mode 100644
index 00000000000..eb5a9f4cc52
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/cel.go
@@ -0,0 +1,19 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cel defines the top-level interface for the Common Expression Language (CEL).
+//
+// CEL is a non-Turing complete expression language designed to parse, check, and evaluate
+// expressions against user-defined environments.
+package cel
diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go
new file mode 100644
index 00000000000..84823e34d06
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/env.go
@@ -0,0 +1,270 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/google/cel-go/checker/decls"
+
+ "github.com/google/cel-go/checker"
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/packages"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/interpreter/functions"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Source interface representing a user-provided expression.
+type Source interface {
+ common.Source
+}
+
+// Ast interface representing the checked or unchecked expression, its source, and related metadata
+// such as source position information.
+type Ast interface {
+ // Expr returns the proto serializable instance of the parsed/checked expression.
+ Expr() *exprpb.Expr
+
+ // IsChecked returns whether the Ast value has been successfully type-checked.
+ IsChecked() bool
+
+ // ResultType returns the output type of the expression if the Ast has been type-checked,
+ // else returns decls.Dyn as the parse step cannot infer the type.
+ ResultType() *exprpb.Type
+
+ // Source returns a view of the input used to create the Ast. This source may be complete or
+ // constructed from the SourceInfo.
+ Source() Source
+
+ // SourceInfo returns character offset and newling position information about expression
+ // elements.
+ SourceInfo() *exprpb.SourceInfo
+}
+
+// Env defines functions for parsing and type-checking expressions against a set of user-defined
+// constants, variables, and functions. The Env interface also defines a method for generating
+// evaluable programs from parsed and checked Asts.
+type Env interface {
+ // Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues.
+ //
+ // Checking has failed if the returned Issues value and its Issues.Err() value is non-nil.
+ // Issues should be inspected if they are non-nil, but may not represent a fatal error.
+ //
+ // It is possible to have both non-nil Ast and Issues values returned from this call: however,
+ // the mere presence of an Ast does not imply that it is valid for use.
+ Check(ast Ast) (Ast, Issues)
+
+ // Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
+ //
+ // Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil.
+ // Issues should be inspected if they are non-nil, but may not represent a fatal error.
+ //
+ // It is possible to have both non-nil Ast and Issues values returned from this call; however,
+ // the mere presence of an Ast does not imply that it is valid for use.
+ Parse(txt string) (Ast, Issues)
+
+ // Program generates an evaluable instance of the Ast within the environment (Env).
+ Program(ast Ast, opts ...ProgramOption) (Program, error)
+
+ // TypeAdapter returns the `ref.TypeAdapter` configured for the environment.
+ TypeAdapter() ref.TypeAdapter
+
+ // TypeProvider returns the `ref.TypeProvider` configured for the environment.
+ TypeProvider() ref.TypeProvider
+}
+
+// Issues defines methods for inspecting the error details of parse and check calls.
+//
+// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues interface.
+type Issues interface {
+ fmt.Stringer
+
+ // Err returns an error value if the issues list contains one or more errors.
+ Err() error
+
+ // Errors returns the collection of errors encountered in more granular detail.
+ Errors() []common.Error
+}
+
+// NewEnv creates an Env instance suitable for parsing and checking expressions against a set of
+// user-defined constants, variables, and functions. Macros and the standard built-ins are enabled
+// by default.
+//
+// See the EnvOptions for the options that can be used to configure the environment.
+func NewEnv(opts ...EnvOption) (Env, error) {
+ registry := types.NewRegistry()
+ e := &env{
+ declarations: checker.StandardDeclarations(),
+ macros: parser.AllMacros,
+ pkg: packages.DefaultPackage,
+ provider: registry,
+ adapter: registry,
+ enableBuiltins: true,
+ enableDynamicAggregateLiterals: true,
+ }
+ // Customized the environment using the provided EnvOption values. If an error is
+ // generated at any step this, will be returned as a nil Env with a non-nil error.
+ var err error
+ for _, opt := range opts {
+ e, err = opt(e)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // Construct the internal checker env, erroring if there is an issue adding the declarations.
+ ce := checker.NewEnv(e.pkg, e.provider)
+ ce.EnableDynamicAggregateLiterals(e.enableDynamicAggregateLiterals)
+ err = ce.Add(e.declarations...)
+ if err != nil {
+ return nil, err
+ }
+ e.chk = ce
+ return e, nil
+}
+
+// astValue is the internal implementation of the ast interface.
+type astValue struct {
+ expr *exprpb.Expr
+ info *exprpb.SourceInfo
+ source Source
+ refMap map[int64]*exprpb.Reference
+ typeMap map[int64]*exprpb.Type
+}
+
+// Expr implements the Ast interface method.
+func (ast *astValue) Expr() *exprpb.Expr {
+ return ast.expr
+}
+
+// IsChecked implements the Ast interface method.
+func (ast *astValue) IsChecked() bool {
+ return ast.refMap != nil && ast.typeMap != nil
+}
+
+// SourceInfo implements the Ast interface method.
+func (ast *astValue) SourceInfo() *exprpb.SourceInfo {
+ return ast.info
+}
+
+// ResultType implements the Ast interface method.
+func (ast *astValue) ResultType() *exprpb.Type {
+ if !ast.IsChecked() {
+ return decls.Dyn
+ }
+ return ast.typeMap[ast.expr.Id]
+}
+
+// Source implements the Ast interface method.
+func (ast *astValue) Source() Source {
+ return ast.source
+}
+
+// env is the internal implementation of the Env interface.
+type env struct {
+ declarations []*exprpb.Decl
+ macros []parser.Macro
+ pkg packages.Packager
+ provider ref.TypeProvider
+ adapter ref.TypeAdapter
+ chk *checker.Env
+ // environment options, true by default.
+ enableBuiltins bool
+ enableDynamicAggregateLiterals bool
+}
+
+// Check implements the Env interface method.
+func (e *env) Check(ast Ast) (Ast, Issues) {
+ pe, err := AstToParsedExpr(ast)
+ if err != nil {
+ errs := common.NewErrors(ast.Source())
+ errs.ReportError(common.NoLocation, err.Error())
+ return nil, &issues{errs: errs}
+ }
+ res, errs := checker.Check(pe, ast.Source(), e.chk)
+ if len(errs.GetErrors()) > 0 {
+ return nil, &issues{errs: errs}
+ }
+ // Manually create the Ast to ensure that the Ast source information (which may be more
+ // detailed than the information provided by Check), is returned to the caller.
+ return &astValue{
+ source: ast.Source(),
+ expr: res.GetExpr(),
+ info: res.GetSourceInfo(),
+ refMap: res.GetReferenceMap(),
+ typeMap: res.GetTypeMap()}, nil
+}
+
+// Parse implements the Env interface method.
+func (e *env) Parse(txt string) (Ast, Issues) {
+ src := common.NewTextSource(txt)
+ res, errs := parser.ParseWithMacros(src, e.macros)
+ if len(errs.GetErrors()) > 0 {
+ return nil, &issues{errs: errs}
+ }
+ // Manually create the Ast to ensure that the text source information is propagated on
+ // subsequent calls to Check.
+ return &astValue{
+ source: Source(src),
+ expr: res.GetExpr(),
+ info: res.GetSourceInfo()}, nil
+}
+
+// Program implements the Env interface method.
+func (e *env) Program(ast Ast, opts ...ProgramOption) (Program, error) {
+ if e.enableBuiltins {
+ opts = append(
+ []ProgramOption{Functions(functions.StandardOverloads()...)},
+ opts...)
+ }
+ return newProgram(e, ast, opts...)
+}
+
+// TypeAdapter implements the Env interface method.
+func (e *env) TypeAdapter() ref.TypeAdapter {
+ return e.adapter
+}
+
+// TypeProvider implements the Env interface method.
+func (e *env) TypeProvider() ref.TypeProvider {
+ return e.provider
+}
+
+// issues is the internal implementation of the Issues interface.
+type issues struct {
+ errs *common.Errors
+}
+
+// Err implements the Issues interface method.
+func (i *issues) Err() error {
+ if len(i.errs.GetErrors()) > 0 {
+ return errors.New(i.errs.ToDisplayString())
+ }
+ return nil
+}
+
+// Errors implements the Issues interface method.
+func (i *issues) Errors() []common.Error {
+ return i.errs.GetErrors()
+}
+
+// String converts the issues to a suitable display string.
+func (i *issues) String() string {
+ return i.errs.ToDisplayString()
+}
diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go
new file mode 100644
index 00000000000..68538a02fa8
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/io.go
@@ -0,0 +1,66 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// CheckedExprToAst converts a checked expression proto message to an Ast.
+func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) Ast {
+ return &astValue{
+ expr: checkedExpr.GetExpr(),
+ info: checkedExpr.GetSourceInfo(),
+ source: common.NewInfoSource(checkedExpr.GetSourceInfo()),
+ refMap: checkedExpr.GetReferenceMap(),
+ typeMap: checkedExpr.GetTypeMap(),
+ }
+}
+
+// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value.
+//
+// If the Ast.IsChecked() returns false, this conversion method will return an error.
+func AstToCheckedExpr(a Ast) (*exprpb.CheckedExpr, error) {
+ if !a.IsChecked() {
+ return nil, fmt.Errorf("cannot convert unchecked ast")
+ }
+ return &exprpb.CheckedExpr{
+ Expr: a.Expr(),
+ SourceInfo: a.SourceInfo(),
+ ReferenceMap: a.(*astValue).refMap,
+ TypeMap: a.(*astValue).typeMap,
+ }, nil
+}
+
+// ParsedExprToAst converts a parsed expression proto message to an Ast.
+func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) Ast {
+ return &astValue{
+ expr: parsedExpr.GetExpr(),
+ info: parsedExpr.GetSourceInfo(),
+ source: common.NewInfoSource(parsedExpr.GetSourceInfo()),
+ }
+}
+
+// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value.
+func AstToParsedExpr(a Ast) (*exprpb.ParsedExpr, error) {
+ return &exprpb.ParsedExpr{
+ Expr: a.Expr(),
+ SourceInfo: a.SourceInfo(),
+ }, nil
+}
diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go
new file mode 100644
index 00000000000..010f20f51d2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/cel/options.go
@@ -0,0 +1,249 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cel
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/google/cel-go/common/packages"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/interpreter"
+ "github.com/google/cel-go/interpreter/functions"
+ "github.com/google/cel-go/parser"
+
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// EnvOption is a functional interface for configuring the environment.
+type EnvOption func(e *env) (*env, error)
+
+// ClearBuiltIns option removes all standard types, operators, and macros from the environment.
+//
+// Note: This option must be specified before Declarations and/or Macros if used together.
+func ClearBuiltIns() EnvOption {
+ return func(e *env) (*env, error) {
+ e.declarations = []*exprpb.Decl{}
+ e.macros = parser.NoMacros
+ e.enableBuiltins = false
+ return e, nil
+ }
+}
+
+// ClearMacros options clears all parser macros.
+//
+// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as
+// comprehensions such as `all` and `exists` are enabled only via macros.
+//
+// Note: This option is a no-op when used with ClearBuiltIns, and must be used before Macros
+// if used together.
+func ClearMacros() EnvOption {
+ return func(e *env) (*env, error) {
+ e.macros = parser.NoMacros
+ return e, nil
+ }
+}
+
+// CustomTypeAdapter swaps the default ref.TypeAdapter implementation with a custom one.
+//
+// Note: This option must be specified before the Types and TypeDescs options when used together.
+func CustomTypeAdapter(adapter ref.TypeAdapter) EnvOption {
+ return func(e *env) (*env, error) {
+ e.adapter = adapter
+ return e, nil
+ }
+}
+
+// CustomTypeProvider swaps the default ref.TypeProvider implementation with a custom one.
+//
+// Note: This option must be specified before the Types and TypeDescs options when used together.
+func CustomTypeProvider(provider ref.TypeProvider) EnvOption {
+ return func(e *env) (*env, error) {
+ e.provider = provider
+ return e, nil
+ }
+}
+
+// Declarations option extends the declaration set configured in the environment.
+//
+// Note: This option must be specified after ClearBuiltIns if both are used together.
+func Declarations(decls ...*exprpb.Decl) EnvOption {
+ // TODO: provide an alternative means of specifying declarations that doesn't refer
+ // to the underlying proto implementations.
+ return func(e *env) (*env, error) {
+ e.declarations = append(e.declarations, decls...)
+ return e, nil
+ }
+}
+
+// HomogeneousAggregateLiterals option ensures that list and map literal entry types must agree
+// during type-checking.
+//
+// Note, it is still possible to have heterogeneous aggregates when provided as variables to the
+// expression, as well as via conversion of well-known dynamic types, or with unchecked
+// expressions.
+func HomogeneousAggregateLiterals() EnvOption {
+ return func(e *env) (*env, error) {
+ e.enableDynamicAggregateLiterals = false
+ return e, nil
+ }
+}
+
+// Macros option extends the macro set configured in the environment.
+//
+// Note: This option must be specified after ClearBuiltIns and/or ClearMacros if used together.
+func Macros(macros ...parser.Macro) EnvOption {
+ return func(e *env) (*env, error) {
+ e.macros = append(e.macros, macros...)
+ return e, nil
+ }
+}
+
+// Container sets the container for resolving variable names. Defaults to an empty container.
+//
+// If all references within an expression are relative to a protocol buffer package, then
+// specifying a container of `google.type` would make it possible to write expressions such as
+// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`.
+func Container(pkg string) EnvOption {
+ return func(e *env) (*env, error) {
+ e.pkg = packages.NewPackage(pkg)
+ return e, nil
+ }
+}
+
+// Types adds one or more type declarations to the environment, allowing for construction of
+// type-literals whose definitions are included in the common expression built-in set.
+//
+// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type
+// provided to this option will result in an error.
+//
+// Well-known protobuf types within the `google.protobuf.*` package are included in the standard
+// environment by default.
+//
+// Note: This option must be specified after the CustomTypeProvider option when used together.
+func Types(addTypes ...interface{}) EnvOption {
+ return func(e *env) (*env, error) {
+ reg, isReg := e.provider.(ref.TypeRegistry)
+ if !isReg {
+ return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
+ }
+ for _, t := range addTypes {
+ switch t.(type) {
+ case proto.Message:
+ err := reg.RegisterMessage(t.(proto.Message))
+ if err != nil {
+ return nil, err
+ }
+ case ref.Type:
+ err := reg.RegisterType(t.(ref.Type))
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unsupported type: %T", t)
+ }
+ }
+ return e, nil
+ }
+}
+
+// TypeDescs adds type declarations for one or more protocol buffer
+// FileDescriptorProtos or FileDescriptorSets. Note that types added
+// via descriptor will not be able to instantiate messages, and so are
+// only useful for Check() operations.
+func TypeDescs(descs ...interface{}) EnvOption {
+ return func(e *env) (*env, error) {
+ reg, isReg := e.provider.(ref.TypeRegistry)
+ if !isReg {
+ return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider)
+ }
+ for _, d := range descs {
+ switch p := d.(type) {
+ case *descpb.FileDescriptorSet:
+ for _, fd := range p.File {
+ err := reg.RegisterDescriptor(fd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ case *descpb.FileDescriptorProto:
+ err := reg.RegisterDescriptor(p)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unsupported type descriptor: %T", d)
+ }
+ }
+ return e, nil
+ }
+}
+
+// ProgramOption is a functional interface for configuring evaluation bindings and behaviors.
+type ProgramOption func(p *prog) (*prog, error)
+
+// Functions adds function overloads that extend or override the set of CEL built-ins.
+func Functions(funcs ...*functions.Overload) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ if err := p.dispatcher.Add(funcs...); err != nil {
+ return nil, err
+ }
+ return p, nil
+ }
+}
+
+// Globals sets the global variable values for a given program. These values may be shadowed by
+// variables with the same name provided to the Eval() call.
+//
+// The vars value may either be an `interpreter.Activation` instance or a `map[string]interface{}`.
+func Globals(vars interface{}) ProgramOption {
+ return func(p *prog) (*prog, error) {
+ defaultVars, err :=
+ interpreter.NewAdaptingActivation(p.adapter, vars)
+ if err != nil {
+ return nil, err
+ }
+ p.defaultVars = defaultVars
+ return p, nil
+ }
+}
+
+// EvalOption indicates an evaluation option that may affect the evaluation behavior or information
+// in the output result.
+type EvalOption int
+
+const (
+ // OptTrackState will cause the runtime to return an immutable EvalState value in the Result.
+ OptTrackState EvalOption = 1 << iota
+
+ // OptExhaustiveEval causes the runtime to disable short-circuits and track state.
+ OptExhaustiveEval EvalOption = 1< 0 {
+ // Instantiate overload's type with fresh type variables.
+ substitutions := newMapping()
+ for _, typePar := range overload.TypeParams {
+ substitutions.add(decls.NewTypeParamType(typePar), c.newTypeVar())
+ }
+
+ overloadType = substitute(substitutions, overloadType, false)
+ }
+
+ candidateArgTypes := overloadType.GetFunction().ArgTypes
+ if c.isAssignableList(argTypes, candidateArgTypes) {
+ if checkedRef == nil {
+ checkedRef = newFunctionReference(overload.OverloadId)
+ } else {
+ checkedRef.OverloadId = append(checkedRef.OverloadId, overload.OverloadId)
+ }
+
+ if resultType == nil {
+ // First matching overload, determines result type.
+ resultType = substitute(c.mappings,
+ overloadType.GetFunction().ResultType,
+ false)
+ } else {
+ // More than one matching overload, narrow result type to DYN.
+ resultType = decls.Dyn
+ }
+
+ }
+ }
+
+ if resultType == nil {
+ c.errors.noMatchingOverload(loc, fn.Name, argTypes, target != nil)
+ resultType = decls.Error
+ return nil
+ }
+
+ return newResolution(checkedRef, resultType)
+}
+
+func (c *checker) checkCreateList(e *exprpb.Expr) {
+ create := e.GetListExpr()
+ var elemType *exprpb.Type
+ for _, e := range create.Elements {
+ c.check(e)
+ elemType = c.joinTypes(c.location(e), elemType, c.getType(e))
+ }
+ if elemType == nil {
+ // If the list is empty, assign free type var to elem type.
+ elemType = c.newTypeVar()
+ }
+ c.setType(e, decls.NewListType(elemType))
+}
+
+func (c *checker) checkCreateStruct(e *exprpb.Expr) {
+ str := e.GetStructExpr()
+ if str.MessageName != "" {
+ c.checkCreateMessage(e)
+ } else {
+ c.checkCreateMap(e)
+ }
+}
+
+func (c *checker) checkCreateMap(e *exprpb.Expr) {
+ mapVal := e.GetStructExpr()
+ var keyType *exprpb.Type
+ var valueType *exprpb.Type
+ for _, ent := range mapVal.GetEntries() {
+ key := ent.GetMapKey()
+ c.check(key)
+ keyType = c.joinTypes(c.location(key), keyType, c.getType(key))
+
+ c.check(ent.Value)
+ valueType = c.joinTypes(c.location(ent.Value), valueType, c.getType(ent.Value))
+ }
+ if keyType == nil {
+ // If the map is empty, assign free type variables to typeKey and value type.
+ keyType = c.newTypeVar()
+ valueType = c.newTypeVar()
+ }
+ c.setType(e, decls.NewMapType(keyType, valueType))
+}
+
+func (c *checker) checkCreateMessage(e *exprpb.Expr) {
+ msgVal := e.GetStructExpr()
+ // Determine the type of the message.
+ messageType := decls.Error
+ decl := c.env.LookupIdent(msgVal.MessageName)
+ if decl == nil {
+ c.errors.undeclaredReference(
+ c.location(e), c.env.packager.Package(), msgVal.MessageName)
+ return
+ }
+
+ c.setReference(e, newIdentReference(decl.Name, nil))
+ ident := decl.GetIdent()
+ identKind := kindOf(ident.Type)
+ if identKind != kindError {
+ if identKind != kindType {
+ c.errors.notAType(c.location(e), ident.Type)
+ } else {
+ messageType = ident.Type.GetType()
+ if kindOf(messageType) != kindObject {
+ c.errors.notAMessageType(c.location(e), messageType)
+ messageType = decls.Error
+ }
+ }
+ }
+ c.setType(e, messageType)
+
+ // Check the field initializers.
+ for _, ent := range msgVal.GetEntries() {
+ field := ent.GetFieldKey()
+ value := ent.Value
+ c.check(value)
+
+ fieldType := decls.Error
+ if t, found := c.lookupFieldType(
+ c.locationByID(ent.Id),
+ messageType.GetMessageType(),
+ field); found {
+ fieldType = t.Type
+ }
+ if !c.isAssignable(fieldType, c.getType(value)) {
+ c.errors.fieldTypeMismatch(
+ c.locationByID(ent.Id), field, fieldType, c.getType(value))
+ }
+ }
+}
+
+func (c *checker) checkComprehension(e *exprpb.Expr) {
+ comp := e.GetComprehensionExpr()
+ c.check(comp.IterRange)
+ c.check(comp.AccuInit)
+ accuType := c.getType(comp.AccuInit)
+ rangeType := c.getType(comp.IterRange)
+ var varType *exprpb.Type
+
+ switch kindOf(rangeType) {
+ case kindList:
+ varType = rangeType.GetListType().ElemType
+ case kindMap:
+ // Ranges over the keys.
+ varType = rangeType.GetMapType().KeyType
+ case kindDyn, kindError:
+ varType = decls.Dyn
+ default:
+ c.errors.notAComprehensionRange(c.location(comp.IterRange), rangeType)
+ }
+
+ // Create a scope for the comprehension since it has a local accumulation variable.
+ // This scope will contain the accumulation variable used to compute the result.
+ c.env = c.env.enterScope()
+ c.env.Add(decls.NewIdent(comp.AccuVar, accuType, nil))
+ // Create a block scope for the loop.
+ c.env = c.env.enterScope()
+ c.env.Add(decls.NewIdent(comp.IterVar, varType, nil))
+ // Check the variable references in the condition and step.
+ c.check(comp.LoopCondition)
+ c.assertType(comp.LoopCondition, decls.Bool)
+ c.check(comp.LoopStep)
+ c.assertType(comp.LoopStep, accuType)
+ // Exit the loop's block scope before checking the result.
+ c.env = c.env.exitScope()
+ c.check(comp.Result)
+ // Exit the comprehension scope.
+ c.env = c.env.exitScope()
+ c.setType(e, c.getType(comp.Result))
+}
+
+// Checks compatibility of joined types, and returns the most general common type.
+func (c *checker) joinTypes(loc common.Location,
+ previous *exprpb.Type,
+ current *exprpb.Type) *exprpb.Type {
+ if previous == nil {
+ return current
+ }
+ if c.isAssignable(previous, current) {
+ return mostGeneral(previous, current)
+ }
+ if c.dynAggregateLiteralElementTypesEnabled() {
+ return decls.Dyn
+ }
+ c.errors.typeMismatch(loc, previous, current)
+ return decls.Error
+}
+
+func (c *checker) dynAggregateLiteralElementTypesEnabled() bool {
+ return c.env.aggLitElemType == dynElementType
+}
+
+func (c *checker) newTypeVar() *exprpb.Type {
+ id := c.freeTypeVarCounter
+ c.freeTypeVarCounter++
+ return decls.NewTypeParamType(fmt.Sprintf("_var%d", id))
+}
+
+func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool {
+ subs := isAssignable(c.mappings, t1, t2)
+ if subs != nil {
+ c.mappings = subs
+ return true
+ }
+
+ return false
+}
+
+func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
+ subs := isAssignableList(c.mappings, l1, l2)
+ if subs != nil {
+ c.mappings = subs
+ return true
+ }
+
+ return false
+}
+
+func (c *checker) lookupFieldType(l common.Location, messageType string, fieldName string) (*ref.FieldType, bool) {
+ if _, found := c.env.provider.FindType(messageType); !found {
+ // This should not happen, anyway, report an error.
+ c.errors.unexpectedFailedResolution(l, messageType)
+ return nil, false
+ }
+
+ if ft, found := c.env.provider.FindFieldType(messageType, fieldName); found {
+ return ft, found
+ }
+
+ c.errors.undefinedField(l, fieldName)
+ return nil, false
+}
+
+func (c *checker) setType(e *exprpb.Expr, t *exprpb.Type) {
+ if old, found := c.types[e.Id]; found && !proto.Equal(old, t) {
+ panic(fmt.Sprintf("(Incompatible) Type already exists for expression: %v(%d) old:%v, new:%v", e, e.Id, old, t))
+ }
+ c.types[e.Id] = t
+}
+
+func (c *checker) getType(e *exprpb.Expr) *exprpb.Type {
+ return c.types[e.Id]
+}
+
+func (c *checker) setReference(e *exprpb.Expr, r *exprpb.Reference) {
+ if old, found := c.references[e.Id]; found && !proto.Equal(old, r) {
+ panic(fmt.Sprintf("Reference already exists for expression: %v(%d) old:%v, new:%v", e, e.Id, old, r))
+ }
+ c.references[e.Id] = r
+}
+
+func (c *checker) assertType(e *exprpb.Expr, t *exprpb.Type) {
+ if !c.isAssignable(t, c.getType(e)) {
+ c.errors.typeMismatch(c.location(e), t, c.getType(e))
+ }
+}
+
+type overloadResolution struct {
+ Reference *exprpb.Reference
+ Type *exprpb.Type
+}
+
+func newResolution(checkedRef *exprpb.Reference, t *exprpb.Type) *overloadResolution {
+ return &overloadResolution{
+ Reference: checkedRef,
+ Type: t,
+ }
+}
+
+func (c *checker) location(e *exprpb.Expr) common.Location {
+ return c.locationByID(e.Id)
+}
+
+func (c *checker) locationByID(id int64) common.Location {
+ positions := c.sourceInfo.GetPositions()
+ var line = 1
+ var col = 0
+ if offset, found := positions[id]; found {
+ col = int(offset)
+ for _, lineOffset := range c.sourceInfo.LineOffsets {
+ if lineOffset < offset {
+ line++
+ col = int(offset - lineOffset)
+ } else {
+ break
+ }
+ }
+ return common.NewLocation(line, col)
+ }
+ return common.NoLocation
+}
+
+func newIdentReference(name string, value *exprpb.Constant) *exprpb.Reference {
+ return &exprpb.Reference{Name: name, Value: value}
+}
+
+func newFunctionReference(overloads ...string) *exprpb.Reference {
+ return &exprpb.Reference{OverloadId: overloads}
+}
+
+// Attempt to interpret an expression as a qualified name. This traverses select and getIdent
+// expression and returns the name they constitute, or null if the expression cannot be
+// interpreted like this.
+func toQualifiedName(e *exprpb.Expr) (string, bool) {
+ switch e.ExprKind.(type) {
+ case *exprpb.Expr_IdentExpr:
+ i := e.GetIdentExpr()
+ return i.Name, true
+ case *exprpb.Expr_SelectExpr:
+ s := e.GetSelectExpr()
+ if qname, found := toQualifiedName(s.Operand); found {
+ return qname + "." + s.Field, true
+ }
+ }
+ return "", false
+}
diff --git a/vendor/github.com/google/cel-go/checker/decls/decls.go b/vendor/github.com/google/cel-go/checker/decls/decls.go
new file mode 100644
index 00000000000..76bae0c14cf
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/decls/decls.go
@@ -0,0 +1,215 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package decls provides helpers for creating variable and function declarations.
+package decls
+
+import (
+ emptypb "github.com/golang/protobuf/ptypes/empty"
+ structpb "github.com/golang/protobuf/ptypes/struct"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+var (
+ // Error type used to communicate issues during type-checking.
+ Error = &exprpb.Type{
+ TypeKind: &exprpb.Type_Error{
+ Error: &emptypb.Empty{}}}
+
+ // Dyn is a top-type used to represent any value.
+ Dyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_Dyn{
+ Dyn: &emptypb.Empty{}}}
+)
+
+// Commonly used types.
+var (
+ Bool = NewPrimitiveType(exprpb.Type_BOOL)
+ Bytes = NewPrimitiveType(exprpb.Type_BYTES)
+ Double = NewPrimitiveType(exprpb.Type_DOUBLE)
+ Int = NewPrimitiveType(exprpb.Type_INT64)
+ Null = &exprpb.Type{
+ TypeKind: &exprpb.Type_Null{
+ Null: structpb.NullValue_NULL_VALUE}}
+ String = NewPrimitiveType(exprpb.Type_STRING)
+ Uint = NewPrimitiveType(exprpb.Type_UINT64)
+)
+
+// Well-known types.
+// TODO: Replace with an abstract type registry.
+var (
+ Any = NewWellKnownType(exprpb.Type_ANY)
+ Duration = NewWellKnownType(exprpb.Type_DURATION)
+ Timestamp = NewWellKnownType(exprpb.Type_TIMESTAMP)
+)
+
+// NewAbstractType creates an abstract type declaration which references a proto
+// message name and may also include type parameters.
+func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_AbstractType_{
+ AbstractType: &exprpb.Type_AbstractType{
+ Name: name,
+ ParameterTypes: paramTypes}}}
+}
+
+// NewFunctionType creates a function invocation contract, typically only used
+// by type-checking steps after overload resolution.
+func NewFunctionType(resultType *exprpb.Type,
+ argTypes ...*exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Function{
+ Function: &exprpb.Type_FunctionType{
+ ResultType: resultType,
+ ArgTypes: argTypes}}}
+}
+
+// NewFunction creates a named function declaration with one or more overloads.
+func NewFunction(name string,
+ overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl {
+ return &exprpb.Decl{
+ Name: name,
+ DeclKind: &exprpb.Decl_Function{
+ Function: &exprpb.Decl_FunctionDecl{
+ Overloads: overloads}}}
+}
+
+// NewIdent creates a named identifier declaration with an optional literal
+// value.
+//
+// Literal values are typically only associated with enum identifiers.
+func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl {
+ return &exprpb.Decl{
+ Name: name,
+ DeclKind: &exprpb.Decl_Ident{
+ Ident: &exprpb.Decl_IdentDecl{
+ Type: t,
+ Value: v}}}
+}
+
+// NewInstanceOverload creates a instance function overload contract.
+func NewInstanceOverload(id string, argTypes []*exprpb.Type,
+ resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ IsInstanceFunction: true}
+}
+
+// NewListType generates a new list with elements of a certain type.
+func NewListType(elem *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{
+ ElemType: elem}}}
+}
+
+// NewMapType generates a new map with typed keys and values.
+func NewMapType(key *exprpb.Type, value *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: key,
+ ValueType: value}}}
+}
+
+// NewObjectType creates an object type for a qualified type name.
+func NewObjectType(typeName string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{
+ MessageType: typeName}}
+}
+
+// NewOverload creates a function overload declaration which contains a unique
+// overload id as well as the expected argument and result types. Overloads
+// must be aggregated within a Function declaration.
+func NewOverload(id string, argTypes []*exprpb.Type,
+ resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ IsInstanceFunction: false}
+}
+
+// NewParameterizedInstanceOverload creates a parametric function instance overload type.
+func NewParameterizedInstanceOverload(id string,
+ argTypes []*exprpb.Type,
+ resultType *exprpb.Type,
+ typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ TypeParams: typeParams,
+ IsInstanceFunction: true}
+}
+
+// NewParameterizedOverload creates a parametric function overload type.
+func NewParameterizedOverload(id string,
+ argTypes []*exprpb.Type,
+ resultType *exprpb.Type,
+ typeParams []string) *exprpb.Decl_FunctionDecl_Overload {
+ return &exprpb.Decl_FunctionDecl_Overload{
+ OverloadId: id,
+ ResultType: resultType,
+ Params: argTypes,
+ TypeParams: typeParams,
+ IsInstanceFunction: false}
+}
+
+// NewPrimitiveType creates a type for a primitive value. See the var declarations
+// for Int, Uint, etc.
+func NewPrimitiveType(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Primitive{
+ Primitive: primitive}}
+}
+
+// NewTypeType creates a new type designating a type.
+func NewTypeType(nested *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Type{
+ Type: nested}}
+}
+
+// NewTypeParamType creates a type corresponding to a named, contextual parameter.
+func NewTypeParamType(name string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_TypeParam{
+ TypeParam: name}}
+}
+
+// NewWellKnownType creates a type corresponding to a protobuf well-known type
+// value.
+func NewWellKnownType(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_WellKnown{
+ WellKnown: wellKnown}}
+}
+
+// NewWrapperType creates a wrapped primitive type instance. Wrapped types
+// are roughly equivalent to a nullable, or optionally valued type.
+func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type {
+ primitive := wrapped.GetPrimitive()
+ if primitive == exprpb.Type_PRIMITIVE_TYPE_UNSPECIFIED {
+ // TODO: return an error
+ panic("Wrapped type must be a primitive")
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Wrapper{
+ Wrapper: primitive}}
+}
diff --git a/vendor/github.com/google/cel-go/checker/decls/scopes.go b/vendor/github.com/google/cel-go/checker/decls/scopes.go
new file mode 100644
index 00000000000..1064c73b1f5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/decls/scopes.go
@@ -0,0 +1,115 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package decls
+
+import exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+
+// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all
+// identifiers in scope and an optional parent representing outer scopes.
+// Each Groups value is a mapping of names to Decls in the ident and function namespaces.
+// Lookups are performed such that bindings in inner scopes shadow those in outer scopes.
+type Scopes struct {
+ parent *Scopes
+ scopes *Group
+}
+
+// NewScopes creates a new, empty Scopes.
+// Some operations can't be safely performed until a Group is added with Push.
+func NewScopes() *Scopes {
+ return &Scopes{
+ scopes: newGroup(),
+ }
+}
+
+// Push creates a new Scopes value which references the current Scope as its parent.
+func (s *Scopes) Push() *Scopes {
+ return &Scopes{
+ parent: s,
+ scopes: newGroup(),
+ }
+}
+
+// Pop returns the parent Scopes value for the current scope, or the current scope if the parent
+// is nil.
+func (s *Scopes) Pop() *Scopes {
+ if s.parent != nil {
+ return s.parent
+ }
+ // TODO: Consider whether this should be an error / panic.
+ return s
+}
+
+// AddIdent adds the ident Decl in the current scope.
+// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten.
+func (s *Scopes) AddIdent(decl *exprpb.Decl) {
+ s.scopes.idents[decl.Name] = decl
+}
+
+// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be
+// found.
+// Note: The search is performed from innermost to outermost.
+func (s *Scopes) FindIdent(name string) *exprpb.Decl {
+ if ident, found := s.scopes.idents[name]; found {
+ return ident
+ }
+ if s.parent != nil {
+ return s.parent.FindIdent(name)
+ }
+ return nil
+}
+
+// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or
+// nil if one does not exist.
+// Note: The search is only performed on the current scope and does not search outer scopes.
+func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl {
+ if ident, found := s.scopes.idents[name]; found {
+ return ident
+ }
+ return nil
+}
+
+// AddFunction adds the function Decl to the current scope.
+// Note: Any previous entry for a function in the current scope with the same name is overwritten.
+func (s *Scopes) AddFunction(fn *exprpb.Decl) {
+ s.scopes.functions[fn.Name] = fn
+}
+
+// FindFunction finds the first function Decl with a matching name in Scopes.
+// The search is performed from innermost to outermost.
+// Returns nil if no such function in Scopes.
+func (s *Scopes) FindFunction(name string) *exprpb.Decl {
+ if fn, found := s.scopes.functions[name]; found {
+ return fn
+ }
+ if s.parent != nil {
+ return s.parent.FindFunction(name)
+ }
+ return nil
+}
+
+// Group is a set of Decls that is pushed on or popped off a Scopes as a unit.
+// Contains separate namespaces for idenifier and function Decls.
+// (Should be named "Scope" perhaps?)
+type Group struct {
+ idents map[string]*exprpb.Decl
+ functions map[string]*exprpb.Decl
+}
+
+func newGroup() *Group {
+ return &Group{
+ idents: make(map[string]*exprpb.Decl),
+ functions: make(map[string]*exprpb.Decl),
+ }
+}
diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go
new file mode 100644
index 00000000000..456e127ac05
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/env.go
@@ -0,0 +1,352 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/packages"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+type aggregateLiteralElementType int
+
+const (
+ dynElementType aggregateLiteralElementType = iota
+ homogenousElementType aggregateLiteralElementType = 1 << iota
+)
+
+// Env is the environment for type checking.
+// It consists of a Packager, a Type Provider, declarations, and collection of errors encountered
+// during checking.
+type Env struct {
+ packager packages.Packager
+ provider ref.TypeProvider
+ declarations *decls.Scopes
+ aggLitElemType aggregateLiteralElementType
+}
+
+// NewEnv returns a new *Env with the given parameters.
+func NewEnv(packager packages.Packager, provider ref.TypeProvider) *Env {
+ declarations := decls.NewScopes()
+ declarations.Push()
+
+ return &Env{
+ packager: packager,
+ provider: provider,
+ declarations: declarations,
+ }
+}
+
+// NewStandardEnv returns a new *Env with the given params plus standard declarations.
+func NewStandardEnv(packager packages.Packager, provider ref.TypeProvider) *Env {
+ e := NewEnv(packager, provider)
+ if err := e.Add(StandardDeclarations()...); err != nil {
+ // The standard declaration set should never have duplicate declarations.
+ panic(err)
+ }
+ // TODO: isolate standard declarations from the custom set which may be provided layer.
+ return e
+}
+
+// EnableDynamicAggregateLiterals detmerines whether list and map literals may support mixed
+// element types at check-time. This does not preclude the presence of a dynamic list or map
+// somewhere in the CEL evaluation process.
+func (e *Env) EnableDynamicAggregateLiterals(enabled bool) *Env {
+ e.aggLitElemType = dynElementType
+ if !enabled {
+ e.aggLitElemType = homogenousElementType
+ }
+ return e
+}
+
+// Add adds new Decl protos to the Env.
+// Returns an error for identifier redeclarations.
+func (e *Env) Add(decls ...*exprpb.Decl) error {
+ errMsgs := make([]errorMsg, 0)
+ for _, decl := range decls {
+ switch decl.DeclKind.(type) {
+ case *exprpb.Decl_Ident:
+ errMsgs = append(errMsgs, e.addIdent(sanitizeIdent(decl)))
+ case *exprpb.Decl_Function:
+ errMsgs = append(errMsgs, e.addFunction(sanitizeFunction(decl))...)
+ }
+ }
+ return formatError(errMsgs)
+}
+
+// LookupIdent returns a Decl proto for typeName as an identifier in the Env.
+// Returns nil if no such identifier is found in the Env.
+func (e *Env) LookupIdent(typeName string) *exprpb.Decl {
+ for _, candidate := range e.packager.ResolveCandidateNames(typeName) {
+ if ident := e.declarations.FindIdent(candidate); ident != nil {
+ return ident
+ }
+
+ // Next try to import the name as a reference to a message type. If found,
+ // the declaration is added to the outest (global) scope of the
+ // environment, so next time we can access it faster.
+ if t, found := e.provider.FindType(candidate); found {
+ decl := decls.NewIdent(candidate, t, nil)
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+
+ // Next try to import this as an enum value by splitting the name in a type prefix and
+ // the enum inside.
+ if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType {
+ decl := decls.NewIdent(candidate,
+ decls.Int,
+ &exprpb.Constant{
+ ConstantKind: &exprpb.Constant_Int64Value{
+ Int64Value: int64(enumValue.(types.Int))}})
+ e.declarations.AddIdent(decl)
+ return decl
+ }
+ }
+ return nil
+}
+
+// LookupFunction returns a Decl proto for typeName as a function in env.
+// Returns nil if no such function is found in env.
+func (e *Env) LookupFunction(typeName string) *exprpb.Decl {
+ for _, candidate := range e.packager.ResolveCandidateNames(typeName) {
+ if fn := e.declarations.FindFunction(candidate); fn != nil {
+ return fn
+ }
+ }
+ return nil
+}
+
+// addOverload adds overload to function declaration f.
+// Returns one or more errorMsg values if the overload overlaps with an existing overload or macro.
+func (e *Env) addOverload(f *exprpb.Decl, overload *exprpb.Decl_FunctionDecl_Overload) []errorMsg {
+ errMsgs := make([]errorMsg, 0)
+ function := f.GetFunction()
+ emptyMappings := newMapping()
+ overloadFunction := decls.NewFunctionType(overload.GetResultType(),
+ overload.GetParams()...)
+ overloadErased := substitute(emptyMappings, overloadFunction, true)
+ for _, existing := range function.GetOverloads() {
+ existingFunction := decls.NewFunctionType(existing.GetResultType(),
+ existing.GetParams()...)
+ existingErased := substitute(emptyMappings, existingFunction, true)
+ overlap := isAssignable(emptyMappings, overloadErased, existingErased) != nil ||
+ isAssignable(emptyMappings, existingErased, overloadErased) != nil
+ if overlap &&
+ overload.GetIsInstanceFunction() == existing.GetIsInstanceFunction() {
+ errMsgs = append(errMsgs,
+ overlappingOverloadError(f.Name,
+ overload.GetOverloadId(), overloadFunction,
+ existing.GetOverloadId(), existingFunction))
+ }
+ }
+
+ for _, macro := range parser.AllMacros {
+ if macro.Function() == f.Name && macro.IsReceiverStyle() == overload.GetIsInstanceFunction() &&
+ macro.ArgCount() == len(overload.GetParams()) {
+ errMsgs = append(errMsgs, overlappingMacroError(f.Name, macro.ArgCount()))
+ }
+ }
+ if len(errMsgs) > 0 {
+ return errMsgs
+ }
+ function.Overloads = append(function.GetOverloads(), overload)
+ return errMsgs
+}
+
+// addFunction adds the function Decl to the Env.
+// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl.
+// If overload overlaps with an existing overload, adds to the errors in the Env instead.
+func (e *Env) addFunction(decl *exprpb.Decl) []errorMsg {
+ current := e.declarations.FindFunction(decl.Name)
+ if current == nil {
+ //Add the function declaration without overloads and check the overloads below.
+ current = decls.NewFunction(decl.Name)
+ e.declarations.AddFunction(current)
+ }
+
+ errorMsgs := make([]errorMsg, 0)
+ for _, overload := range decl.GetFunction().GetOverloads() {
+ errorMsgs = append(errorMsgs, e.addOverload(current, overload)...)
+ }
+ return errorMsgs
+}
+
+// addIdent adds the Decl to the declarations in the Env.
+// Returns a non-empty errorMsg if the identifier is already declared in the scope.
+func (e *Env) addIdent(decl *exprpb.Decl) errorMsg {
+ current := e.declarations.FindIdentInScope(decl.Name)
+ if current != nil {
+ return overlappingIdentifierError(decl.Name)
+ }
+ e.declarations.AddIdent(decl)
+ return ""
+}
+
+// sanitizeFunction replaces well-known types referenced by message name with their equivalent
+// CEL built-in type instances.
+func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl {
+ fn := decl.GetFunction()
+ // Determine whether the declaration requires replacements from proto-based message type
+ // references to well-known CEL type references.
+ var needsSanitizing bool
+ for _, o := range fn.GetOverloads() {
+ if isObjectWellKnownType(o.GetResultType()) {
+ needsSanitizing = true
+ break
+ }
+ for _, p := range o.GetParams() {
+ if isObjectWellKnownType(p) {
+ needsSanitizing = true
+ break
+ }
+ }
+ }
+
+ // Early return if the declaration requires no modification.
+ if !needsSanitizing {
+ return decl
+ }
+
+ // Sanitize all of the overloads if any overload requires an update to its type references.
+ overloads := make([]*exprpb.Decl_FunctionDecl_Overload, 0, len(fn.GetOverloads()))
+ for i, o := range fn.GetOverloads() {
+ var sanitized bool
+ rt := o.GetResultType()
+ if isObjectWellKnownType(rt) {
+ rt = getObjectWellKnownType(rt)
+ sanitized = true
+ }
+ params := make([]*exprpb.Type, 0, len(o.GetParams()))
+ copy(params, o.GetParams())
+ for j, p := range params {
+ if isObjectWellKnownType(p) {
+ params[j] = getObjectWellKnownType(p)
+ sanitized = true
+ }
+ }
+ // If sanitized, replace the overload definition.
+ if sanitized {
+ if o.IsInstanceFunction {
+ overloads[i] =
+ decls.NewInstanceOverload(o.GetOverloadId(), params, rt)
+ } else {
+ overloads[i] =
+ decls.NewOverload(o.GetOverloadId(), params, rt)
+ }
+ } else {
+ // Otherwise, preserve the original overload.
+ overloads[i] = o
+ }
+ }
+ return decls.NewFunction(decl.GetName(), overloads...)
+}
+
+// sanitizeIdent replaces the identifier's well-known types referenced by message name with
+// references to CEL built-in type instances.
+func sanitizeIdent(decl *exprpb.Decl) *exprpb.Decl {
+ id := decl.GetIdent()
+ t := id.GetType()
+ if !isObjectWellKnownType(t) {
+ return decl
+ }
+ return decls.NewIdent(decl.GetName(), getObjectWellKnownType(t), id.GetValue())
+}
+
+// isObjectWellKnownType returns true if the input type is an OBJECT type with a message name
+// that corresponds the message name of a built-in CEL type.
+func isObjectWellKnownType(t *exprpb.Type) bool {
+ if kindOf(t) != kindObject {
+ return false
+ }
+ _, found := pb.CheckedWellKnowns[t.GetMessageType()]
+ return found
+}
+
+// getObjectWellKnownType returns the built-in CEL type declaration for input type's message name.
+func getObjectWellKnownType(t *exprpb.Type) *exprpb.Type {
+ return pb.CheckedWellKnowns[t.GetMessageType()]
+}
+
+// enterScope creates a new Env instance with a new innermost declaration scope.
+func (e *Env) enterScope() *Env {
+ childDecls := e.declarations.Push()
+ return &Env{
+ declarations: childDecls,
+ packager: e.packager,
+ provider: e.provider,
+ aggLitElemType: e.aggLitElemType,
+ }
+}
+
+// exitScope creates a new Env instance with the nearest outer declaration scope.
+func (e *Env) exitScope() *Env {
+ parentDecls := e.declarations.Pop()
+ return &Env{
+ declarations: parentDecls,
+ packager: e.packager,
+ provider: e.provider,
+ aggLitElemType: e.aggLitElemType,
+ }
+}
+
+// errorMsg is a type alias meant to represent error-based return values which
+// may be accumulated into an error at a later point in execution.
+type errorMsg string
+
+func overlappingIdentifierError(name string) errorMsg {
+ return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name))
+}
+
+func overlappingOverloadError(name string,
+ overloadID1 string, f1 *exprpb.Type,
+ overloadID2 string, f2 *exprpb.Type) errorMsg {
+ return errorMsg(fmt.Sprintf(
+ "overlapping overload for name '%s' (type '%s' with overloadId: '%s' "+
+ "cannot be distinguished from '%s' with overloadId: '%s')",
+ name,
+ FormatCheckedType(f1),
+ overloadID1,
+ FormatCheckedType(f2),
+ overloadID2))
+}
+
+func overlappingMacroError(name string, argCount int) errorMsg {
+ return errorMsg(fmt.Sprintf(
+ "overlapping macro for name '%s' with %d args", name, argCount))
+}
+
+func formatError(errMsgs []errorMsg) error {
+ errStrs := make([]string, 0)
+ if len(errMsgs) > 0 {
+ for i := 0; i < len(errMsgs); i++ {
+ if errMsgs[i] != "" {
+ errStrs = append(errStrs, string(errMsgs[i]))
+ }
+ }
+ }
+ if len(errStrs) > 0 {
+ return fmt.Errorf("%s", strings.Join(errStrs, "\n"))
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go
new file mode 100644
index 00000000000..33af57cd216
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/errors.go
@@ -0,0 +1,123 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// typeErrors is a specialization of Errors.
+type typeErrors struct {
+ *common.Errors
+}
+
+func (e *typeErrors) undeclaredReference(l common.Location, container string, name string) {
+ e.ReportError(l, "undeclared reference to '%s' (in container '%s')", name, container)
+}
+
+func (e *typeErrors) expressionDoesNotSelectField(l common.Location) {
+ e.ReportError(l, "expression does not select a field")
+}
+
+func (e *typeErrors) typeDoesNotSupportFieldSelection(l common.Location, t *exprpb.Type) {
+ e.ReportError(l, "type '%s' does not support field selection", t)
+}
+
+func (e *typeErrors) undefinedField(l common.Location, field string) {
+ e.ReportError(l, "undefined field '%s'", field)
+}
+
+func (e *typeErrors) fieldDoesNotSupportPresenceCheck(l common.Location, field string) {
+ e.ReportError(l, "field '%s' does not support presence check", field)
+}
+
+func (e *typeErrors) overlappingOverload(l common.Location, name string, overloadID1 string, f1 *exprpb.Type,
+ overloadID2 string, f2 *exprpb.Type) {
+ e.ReportError(l, "overlapping overload for name '%s' (type '%s' with overloadId: '%s' cannot be distinguished from '%s' with "+
+ "overloadId: '%s')", name, FormatCheckedType(f1), overloadID1, FormatCheckedType(f2), overloadID2)
+}
+
+func (e *typeErrors) overlappingMacro(l common.Location, name string, args int) {
+ e.ReportError(l, "overload for name '%s' with %d argument(s) overlaps with predefined macro",
+ name, args)
+}
+
+func (e *typeErrors) noMatchingOverload(l common.Location, name string, args []*exprpb.Type, isInstance bool) {
+ signature := formatFunction(nil, args, isInstance)
+ e.ReportError(l, "found no matching overload for '%s' applied to '%s'", name, signature)
+}
+
+func (e *typeErrors) aggregateTypeMismatch(l common.Location, aggregate *exprpb.Type, member *exprpb.Type) {
+ e.ReportError(
+ l,
+ "type '%s' does not match previous type '%s' in aggregate. Use 'dyn(x)' to make the aggregate dynamic.",
+ FormatCheckedType(member),
+ FormatCheckedType(aggregate))
+}
+
+func (e *typeErrors) notAType(l common.Location, t *exprpb.Type) {
+ e.ReportError(l, "'%s(%v)' is not a type", FormatCheckedType(t), t)
+}
+
+func (e *typeErrors) notAMessageType(l common.Location, t *exprpb.Type) {
+ e.ReportError(l, "'%s' is not a message type", FormatCheckedType(t))
+}
+
+func (e *typeErrors) fieldTypeMismatch(l common.Location, name string, field *exprpb.Type, value *exprpb.Type) {
+ e.ReportError(l, "expected type of field '%s' is '%s' but provided type is '%s'",
+ name, FormatCheckedType(field), FormatCheckedType(value))
+}
+
+func (e *typeErrors) unexpectedFailedResolution(l common.Location, typeName string) {
+ e.ReportError(l, "[internal] unexpected failed resolution of '%s'", typeName)
+}
+
+func (e *typeErrors) notAComprehensionRange(l common.Location, t *exprpb.Type) {
+ e.ReportError(l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)",
+ FormatCheckedType(t))
+}
+
+func (e *typeErrors) typeMismatch(l common.Location, expected *exprpb.Type, actual *exprpb.Type) {
+ e.ReportError(l, "expected type '%s' but found '%s'",
+ FormatCheckedType(expected), FormatCheckedType(actual))
+}
+
+func formatFunction(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string {
+ result := ""
+ if isInstance {
+ target := argTypes[0]
+ argTypes = argTypes[1:]
+
+ result += FormatCheckedType(target)
+ result += "."
+ }
+
+ result += "("
+ for i, arg := range argTypes {
+ if i > 0 {
+ result += ", "
+ }
+ result += FormatCheckedType(arg)
+ }
+ result += ")"
+ if resultType != nil {
+ result += " -> "
+ result += FormatCheckedType(resultType)
+ }
+
+ return result
+}
diff --git a/vendor/github.com/google/cel-go/checker/mapping.go b/vendor/github.com/google/cel-go/checker/mapping.go
new file mode 100644
index 00000000000..bd5e412db67
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/mapping.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "fmt"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+type mapping struct {
+ mapping map[string]*exprpb.Type
+}
+
+func newMapping() *mapping {
+ return &mapping{
+ mapping: make(map[string]*exprpb.Type),
+ }
+}
+
+func (m *mapping) add(from *exprpb.Type, to *exprpb.Type) {
+ m.mapping[typeKey(from)] = to
+}
+
+func (m *mapping) find(from *exprpb.Type) (*exprpb.Type, bool) {
+ if r, found := m.mapping[typeKey(from)]; found {
+ return r, found
+ }
+ return nil, false
+}
+
+func (m *mapping) copy() *mapping {
+ c := newMapping()
+
+ for k, v := range m.mapping {
+ c.mapping[k] = v
+ }
+ return c
+}
+
+func (m *mapping) String() string {
+ result := "{"
+
+ for k, v := range m.mapping {
+ result += fmt.Sprintf("%v => %v ", k, v)
+ }
+
+ result += "}"
+ return result
+}
diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go
new file mode 100644
index 00000000000..15c25ecc686
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/printer.go
@@ -0,0 +1,71 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/common/debug"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+type semanticAdorner struct {
+ checks *exprpb.CheckedExpr
+}
+
+var _ debug.Adorner = &semanticAdorner{}
+
+func (a *semanticAdorner) GetMetadata(elem interface{}) string {
+ result := ""
+ e, isExpr := elem.(*exprpb.Expr)
+ if !isExpr {
+ return result
+ }
+ t := a.checks.TypeMap[e.Id]
+ if t != nil {
+ result += "~"
+ result += FormatCheckedType(t)
+ }
+
+ switch e.ExprKind.(type) {
+ case *exprpb.Expr_IdentExpr,
+ *exprpb.Expr_CallExpr,
+ *exprpb.Expr_StructExpr,
+ *exprpb.Expr_SelectExpr:
+ if ref, found := a.checks.ReferenceMap[e.Id]; found {
+ if len(ref.GetOverloadId()) == 0 {
+ result += "^" + ref.Name
+ } else {
+ for i, overload := range ref.OverloadId {
+ if i == 0 {
+ result += "^"
+ } else {
+ result += "|"
+ }
+ result += overload
+ }
+ }
+ }
+ }
+
+ return result
+}
+
+// Print returns a string representation of the Expr message,
+// annotated with types from the CheckedExpr. The Expr must
+// be a sub-expression embedded in the CheckedExpr.
+func Print(e *exprpb.Expr, checks *exprpb.CheckedExpr) string {
+ a := &semanticAdorner{checks: checks}
+ return debug.ToAdornedDebugString(e, a)
+}
diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go
new file mode 100644
index 00000000000..773ab44a258
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/standard.go
@@ -0,0 +1,440 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// StandardDeclarations returns the Decls for all functions and constants in the evaluator.
+func StandardDeclarations() []*exprpb.Decl {
+ // Some shortcuts we use when building declarations.
+ paramA := decls.NewTypeParamType("A")
+ typeParamAList := []string{"A"}
+ listOfA := decls.NewListType(paramA)
+ paramB := decls.NewTypeParamType("B")
+ typeParamABList := []string{"A", "B"}
+ mapOfAB := decls.NewMapType(paramA, paramB)
+
+ var idents []*exprpb.Decl
+ for _, t := range []*exprpb.Type{
+ decls.Int, decls.Uint, decls.Bool,
+ decls.Double, decls.Bytes, decls.String} {
+ idents = append(idents,
+ decls.NewIdent(FormatCheckedType(t), decls.NewTypeType(t), nil))
+ }
+ idents = append(idents,
+ decls.NewIdent("list", decls.NewTypeType(listOfA), nil),
+ decls.NewIdent("map", decls.NewTypeType(mapOfAB), nil),
+ decls.NewIdent("null_type", decls.NewTypeType(decls.Null), nil),
+ decls.NewIdent("type", decls.NewTypeType(decls.NewTypeType(nil)), nil))
+
+ // Booleans
+ // TODO: allow the conditional to return a heterogenous type.
+ return append(idents, []*exprpb.Decl{
+ decls.NewFunction(operators.Conditional,
+ decls.NewParameterizedOverload(overloads.Conditional,
+ []*exprpb.Type{decls.Bool, paramA, paramA}, paramA,
+ typeParamAList)),
+
+ decls.NewFunction(operators.LogicalAnd,
+ decls.NewOverload(overloads.LogicalAnd,
+ []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
+
+ decls.NewFunction(operators.LogicalOr,
+ decls.NewOverload(overloads.LogicalOr,
+ []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)),
+
+ decls.NewFunction(operators.LogicalNot,
+ decls.NewOverload(overloads.LogicalNot,
+ []*exprpb.Type{decls.Bool}, decls.Bool)),
+
+ decls.NewFunction(operators.NotStrictlyFalse,
+ decls.NewOverload(overloads.NotStrictlyFalse,
+ []*exprpb.Type{decls.Bool}, decls.Bool)),
+
+ // Relations.
+
+ decls.NewFunction(operators.Less,
+ decls.NewOverload(overloads.LessBool,
+ []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
+ decls.NewOverload(overloads.LessInt64,
+ []*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
+ decls.NewOverload(overloads.LessUint64,
+ []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
+ decls.NewOverload(overloads.LessDouble,
+ []*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
+ decls.NewOverload(overloads.LessString,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool),
+ decls.NewOverload(overloads.LessBytes,
+ []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
+ decls.NewOverload(overloads.LessTimestamp,
+ []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
+ decls.NewOverload(overloads.LessDuration,
+ []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
+
+ decls.NewFunction(operators.LessEquals,
+ decls.NewOverload(overloads.LessEqualsBool,
+ []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
+ decls.NewOverload(overloads.LessEqualsInt64,
+ []*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
+ decls.NewOverload(overloads.LessEqualsUint64,
+ []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
+ decls.NewOverload(overloads.LessEqualsDouble,
+ []*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
+ decls.NewOverload(overloads.LessEqualsString,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool),
+ decls.NewOverload(overloads.LessEqualsBytes,
+ []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
+ decls.NewOverload(overloads.LessEqualsTimestamp,
+ []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
+ decls.NewOverload(overloads.LessEqualsDuration,
+ []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
+
+ decls.NewFunction(operators.Greater,
+ decls.NewOverload(overloads.GreaterBool,
+ []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
+ decls.NewOverload(overloads.GreaterInt64,
+ []*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
+ decls.NewOverload(overloads.GreaterUint64,
+ []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
+ decls.NewOverload(overloads.GreaterDouble,
+ []*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
+ decls.NewOverload(overloads.GreaterString,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool),
+ decls.NewOverload(overloads.GreaterBytes,
+ []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
+ decls.NewOverload(overloads.GreaterTimestamp,
+ []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
+ decls.NewOverload(overloads.GreaterDuration,
+ []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
+
+ decls.NewFunction(operators.GreaterEquals,
+ decls.NewOverload(overloads.GreaterEqualsBool,
+ []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool),
+ decls.NewOverload(overloads.GreaterEqualsInt64,
+ []*exprpb.Type{decls.Int, decls.Int}, decls.Bool),
+ decls.NewOverload(overloads.GreaterEqualsUint64,
+ []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool),
+ decls.NewOverload(overloads.GreaterEqualsDouble,
+ []*exprpb.Type{decls.Double, decls.Double}, decls.Bool),
+ decls.NewOverload(overloads.GreaterEqualsString,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool),
+ decls.NewOverload(overloads.GreaterEqualsBytes,
+ []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool),
+ decls.NewOverload(overloads.GreaterEqualsTimestamp,
+ []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool),
+ decls.NewOverload(overloads.GreaterEqualsDuration,
+ []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)),
+
+ decls.NewFunction(operators.Equals,
+ decls.NewParameterizedOverload(overloads.Equals,
+ []*exprpb.Type{paramA, paramA}, decls.Bool,
+ typeParamAList)),
+
+ decls.NewFunction(operators.NotEquals,
+ decls.NewParameterizedOverload(overloads.NotEquals,
+ []*exprpb.Type{paramA, paramA}, decls.Bool,
+ typeParamAList)),
+
+ // Algebra.
+
+ decls.NewFunction(operators.Subtract,
+ decls.NewOverload(overloads.SubtractInt64,
+ []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
+ decls.NewOverload(overloads.SubtractUint64,
+ []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
+ decls.NewOverload(overloads.SubtractDouble,
+ []*exprpb.Type{decls.Double, decls.Double}, decls.Double),
+ decls.NewOverload(overloads.SubtractTimestampTimestamp,
+ []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Duration),
+ decls.NewOverload(overloads.SubtractTimestampDuration,
+ []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
+ decls.NewOverload(overloads.SubtractDurationDuration,
+ []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
+
+ decls.NewFunction(operators.Multiply,
+ decls.NewOverload(overloads.MultiplyInt64,
+ []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
+ decls.NewOverload(overloads.MultiplyUint64,
+ []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
+ decls.NewOverload(overloads.MultiplyDouble,
+ []*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
+
+ decls.NewFunction(operators.Divide,
+ decls.NewOverload(overloads.DivideInt64,
+ []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
+ decls.NewOverload(overloads.DivideUint64,
+ []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
+ decls.NewOverload(overloads.DivideDouble,
+ []*exprpb.Type{decls.Double, decls.Double}, decls.Double)),
+
+ decls.NewFunction(operators.Modulo,
+ decls.NewOverload(overloads.ModuloInt64,
+ []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
+ decls.NewOverload(overloads.ModuloUint64,
+ []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint)),
+
+ decls.NewFunction(operators.Add,
+ decls.NewOverload(overloads.AddInt64,
+ []*exprpb.Type{decls.Int, decls.Int}, decls.Int),
+ decls.NewOverload(overloads.AddUint64,
+ []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint),
+ decls.NewOverload(overloads.AddDouble,
+ []*exprpb.Type{decls.Double, decls.Double}, decls.Double),
+ decls.NewOverload(overloads.AddString,
+ []*exprpb.Type{decls.String, decls.String}, decls.String),
+ decls.NewOverload(overloads.AddBytes,
+ []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bytes),
+ decls.NewParameterizedOverload(overloads.AddList,
+ []*exprpb.Type{listOfA, listOfA}, listOfA,
+ typeParamAList),
+ decls.NewOverload(overloads.AddTimestampDuration,
+ []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp),
+ decls.NewOverload(overloads.AddDurationTimestamp,
+ []*exprpb.Type{decls.Duration, decls.Timestamp}, decls.Timestamp),
+ decls.NewOverload(overloads.AddDurationDuration,
+ []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)),
+
+ decls.NewFunction(operators.Negate,
+ decls.NewOverload(overloads.NegateInt64,
+ []*exprpb.Type{decls.Int}, decls.Int),
+ decls.NewOverload(overloads.NegateDouble,
+ []*exprpb.Type{decls.Double}, decls.Double)),
+
+ // Index.
+
+ decls.NewFunction(operators.Index,
+ decls.NewParameterizedOverload(overloads.IndexList,
+ []*exprpb.Type{listOfA, decls.Int}, paramA,
+ typeParamAList),
+ decls.NewParameterizedOverload(overloads.IndexMap,
+ []*exprpb.Type{mapOfAB, paramA}, paramB,
+ typeParamABList)),
+ //decls.NewOverload(overloads.IndexMessage,
+ // []*expr.Type{decls.Dyn, decls.String}, decls.Dyn)),
+
+ // Collections.
+
+ decls.NewFunction(overloads.Size,
+ decls.NewInstanceOverload(overloads.SizeStringInst,
+ []*exprpb.Type{decls.String}, decls.Int),
+ decls.NewInstanceOverload(overloads.SizeBytesInst,
+ []*exprpb.Type{decls.Bytes}, decls.Int),
+ decls.NewParameterizedInstanceOverload(overloads.SizeListInst,
+ []*exprpb.Type{listOfA}, decls.Int, typeParamAList),
+ decls.NewParameterizedInstanceOverload(overloads.SizeMapInst,
+ []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList),
+ decls.NewOverload(overloads.SizeString,
+ []*exprpb.Type{decls.String}, decls.Int),
+ decls.NewOverload(overloads.SizeBytes,
+ []*exprpb.Type{decls.Bytes}, decls.Int),
+ decls.NewParameterizedOverload(overloads.SizeList,
+ []*exprpb.Type{listOfA}, decls.Int, typeParamAList),
+ decls.NewParameterizedOverload(overloads.SizeMap,
+ []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList)),
+
+ decls.NewFunction(operators.In,
+ decls.NewParameterizedOverload(overloads.InList,
+ []*exprpb.Type{paramA, listOfA}, decls.Bool,
+ typeParamAList),
+ decls.NewParameterizedOverload(overloads.InMap,
+ []*exprpb.Type{paramA, mapOfAB}, decls.Bool,
+ typeParamABList)),
+
+ // Deprecated 'in()' function.
+
+ decls.NewFunction(overloads.DeprecatedIn,
+ decls.NewParameterizedOverload(overloads.InList,
+ []*exprpb.Type{paramA, listOfA}, decls.Bool,
+ typeParamAList),
+ decls.NewParameterizedOverload(overloads.InMap,
+ []*exprpb.Type{paramA, mapOfAB}, decls.Bool,
+ typeParamABList)),
+ //decls.NewOverload(overloads.InMessage,
+ // []*expr.Type{Dyn, decls.String},decls.Bool)),
+
+ // Conversions to type.
+
+ decls.NewFunction(overloads.TypeConvertType,
+ decls.NewParameterizedOverload(overloads.TypeConvertType,
+ []*exprpb.Type{paramA}, decls.NewTypeType(paramA), typeParamAList)),
+
+ // Conversions to int.
+
+ decls.NewFunction(overloads.TypeConvertInt,
+ decls.NewOverload(overloads.IntToInt, []*exprpb.Type{decls.Int}, decls.Int),
+ decls.NewOverload(overloads.UintToInt, []*exprpb.Type{decls.Uint}, decls.Int),
+ decls.NewOverload(overloads.DoubleToInt, []*exprpb.Type{decls.Double}, decls.Int),
+ decls.NewOverload(overloads.StringToInt, []*exprpb.Type{decls.String}, decls.Int),
+ decls.NewOverload(overloads.TimestampToInt, []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewOverload(overloads.DurationToInt, []*exprpb.Type{decls.Duration}, decls.Int)),
+
+ // Conversions to uint.
+
+ decls.NewFunction(overloads.TypeConvertUint,
+ decls.NewOverload(overloads.UintToUint, []*exprpb.Type{decls.Uint}, decls.Uint),
+ decls.NewOverload(overloads.IntToUint, []*exprpb.Type{decls.Int}, decls.Uint),
+ decls.NewOverload(overloads.DoubleToUint, []*exprpb.Type{decls.Double}, decls.Uint),
+ decls.NewOverload(overloads.StringToUint, []*exprpb.Type{decls.String}, decls.Uint)),
+
+ // Conversions to double.
+
+ decls.NewFunction(overloads.TypeConvertDouble,
+ decls.NewOverload(overloads.DoubleToDouble, []*exprpb.Type{decls.Double}, decls.Double),
+ decls.NewOverload(overloads.IntToDouble, []*exprpb.Type{decls.Int}, decls.Double),
+ decls.NewOverload(overloads.UintToDouble, []*exprpb.Type{decls.Uint}, decls.Double),
+ decls.NewOverload(overloads.StringToDouble, []*exprpb.Type{decls.String}, decls.Double)),
+
+ // Conversions to bool.
+
+ decls.NewFunction(overloads.TypeConvertBool,
+ decls.NewOverload(overloads.BoolToBool, []*exprpb.Type{decls.Bool}, decls.Bool),
+ decls.NewOverload(overloads.StringToBool, []*exprpb.Type{decls.String}, decls.Bool)),
+
+ // Conversions to string.
+
+ decls.NewFunction(overloads.TypeConvertString,
+ decls.NewOverload(overloads.StringToString, []*exprpb.Type{decls.String}, decls.String),
+ decls.NewOverload(overloads.BoolToString, []*exprpb.Type{decls.Bool}, decls.String),
+ decls.NewOverload(overloads.IntToString, []*exprpb.Type{decls.Int}, decls.String),
+ decls.NewOverload(overloads.UintToString, []*exprpb.Type{decls.Uint}, decls.String),
+ decls.NewOverload(overloads.DoubleToString, []*exprpb.Type{decls.Double}, decls.String),
+ decls.NewOverload(overloads.BytesToString, []*exprpb.Type{decls.Bytes}, decls.String),
+ decls.NewOverload(overloads.TimestampToString, []*exprpb.Type{decls.Timestamp}, decls.String),
+ decls.NewOverload(overloads.DurationToString, []*exprpb.Type{decls.Duration}, decls.String)),
+
+ // Conversions to bytes.
+
+ decls.NewFunction(overloads.TypeConvertBytes,
+ decls.NewOverload(overloads.BytesToBytes, []*exprpb.Type{decls.Bytes}, decls.Bytes),
+ decls.NewOverload(overloads.StringToBytes, []*exprpb.Type{decls.String}, decls.Bytes)),
+
+ // Conversions to timestamps.
+
+ decls.NewFunction(overloads.TypeConvertTimestamp,
+ decls.NewOverload(overloads.TimestampToTimestamp,
+ []*exprpb.Type{decls.Timestamp}, decls.Timestamp),
+ decls.NewOverload(overloads.StringToTimestamp,
+ []*exprpb.Type{decls.String}, decls.Timestamp),
+ decls.NewOverload(overloads.IntToTimestamp,
+ []*exprpb.Type{decls.Int}, decls.Timestamp)),
+
+ // Conversions to durations.
+
+ decls.NewFunction(overloads.TypeConvertDuration,
+ decls.NewOverload(overloads.DurationToDuration,
+ []*exprpb.Type{decls.Duration}, decls.Duration),
+ decls.NewOverload(overloads.StringToDuration,
+ []*exprpb.Type{decls.String}, decls.Duration),
+ decls.NewOverload(overloads.IntToDuration,
+ []*exprpb.Type{decls.Int}, decls.Duration)),
+
+ // Conversions to Dyn.
+
+ decls.NewFunction(overloads.TypeConvertDyn,
+ decls.NewParameterizedOverload(overloads.ToDyn,
+ []*exprpb.Type{paramA}, decls.Dyn,
+ typeParamAList)),
+
+ // String functions.
+
+ decls.NewFunction(overloads.Contains,
+ decls.NewInstanceOverload(overloads.ContainsString,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool)),
+ decls.NewFunction(overloads.EndsWith,
+ decls.NewInstanceOverload(overloads.EndsWithString,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool)),
+ decls.NewFunction(overloads.Matches,
+ decls.NewInstanceOverload(overloads.MatchString,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool)),
+ decls.NewFunction(overloads.StartsWith,
+ decls.NewInstanceOverload(overloads.StartsWithString,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool)),
+
+ // Date/time functions.
+
+ decls.NewFunction(overloads.TimeGetFullYear,
+ decls.NewInstanceOverload(overloads.TimestampToYear,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToYearWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
+
+ decls.NewFunction(overloads.TimeGetMonth,
+ decls.NewInstanceOverload(overloads.TimestampToMonth,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToMonthWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
+
+ decls.NewFunction(overloads.TimeGetDayOfYear,
+ decls.NewInstanceOverload(overloads.TimestampToDayOfYear,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToDayOfYearWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
+
+ decls.NewFunction(overloads.TimeGetDayOfMonth,
+ decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBased,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
+
+ decls.NewFunction(overloads.TimeGetDate,
+ decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBased,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBasedWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
+
+ decls.NewFunction(overloads.TimeGetDayOfWeek,
+ decls.NewInstanceOverload(overloads.TimestampToDayOfWeek,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToDayOfWeekWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)),
+
+ decls.NewFunction(overloads.TimeGetHours,
+ decls.NewInstanceOverload(overloads.TimestampToHours,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToHoursWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
+ decls.NewInstanceOverload(overloads.DurationToHours,
+ []*exprpb.Type{decls.Duration}, decls.Int)),
+
+ decls.NewFunction(overloads.TimeGetMinutes,
+ decls.NewInstanceOverload(overloads.TimestampToMinutes,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToMinutesWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
+ decls.NewInstanceOverload(overloads.DurationToMinutes,
+ []*exprpb.Type{decls.Duration}, decls.Int)),
+
+ decls.NewFunction(overloads.TimeGetSeconds,
+ decls.NewInstanceOverload(overloads.TimestampToSeconds,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToSecondsWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
+ decls.NewInstanceOverload(overloads.DurationToSeconds,
+ []*exprpb.Type{decls.Duration}, decls.Int)),
+
+ decls.NewFunction(overloads.TimeGetMilliseconds,
+ decls.NewInstanceOverload(overloads.TimestampToMilliseconds,
+ []*exprpb.Type{decls.Timestamp}, decls.Int),
+ decls.NewInstanceOverload(overloads.TimestampToMillisecondsWithTz,
+ []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int),
+ decls.NewInstanceOverload(overloads.DurationToMilliseconds,
+ []*exprpb.Type{decls.Duration}, decls.Int))}...)
+}
diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go
new file mode 100644
index 00000000000..d3dbe0cad00
--- /dev/null
+++ b/vendor/github.com/google/cel-go/checker/types.go
@@ -0,0 +1,479 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package checker
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/google/cel-go/checker/decls"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+const (
+ kindUnknown = iota + 1
+ kindError
+ kindFunction
+ kindDyn
+ kindPrimitive
+ kindWellKnown
+ kindWrapper
+ kindNull
+ kindAbstract // TODO: Update the checker protos to include abstract
+ kindType
+ kindList
+ kindMap
+ kindObject
+ kindTypeParam
+)
+
+// FormatCheckedType converts a type message into a string representation.
+func FormatCheckedType(t *exprpb.Type) string {
+ switch kindOf(t) {
+ case kindDyn:
+ return "dyn"
+ case kindFunction:
+ return formatFunction(t.GetFunction().GetResultType(),
+ t.GetFunction().GetArgTypes(),
+ false)
+ case kindList:
+ return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().ElemType))
+ case kindObject:
+ return t.GetMessageType()
+ case kindMap:
+ return fmt.Sprintf("map(%s, %s)",
+ FormatCheckedType(t.GetMapType().KeyType),
+ FormatCheckedType(t.GetMapType().ValueType))
+ case kindNull:
+ return "null"
+ case kindPrimitive:
+ switch t.GetPrimitive() {
+ case exprpb.Type_UINT64:
+ return "uint"
+ case exprpb.Type_INT64:
+ return "int"
+ }
+ return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ")
+ case kindType:
+ if t.GetType() == nil {
+ return "type"
+ }
+ return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType()))
+ case kindWellKnown:
+ switch t.GetWellKnown() {
+ case exprpb.Type_ANY:
+ return "any"
+ case exprpb.Type_DURATION:
+ return "duration"
+ case exprpb.Type_TIMESTAMP:
+ return "timestamp"
+ }
+ case kindWrapper:
+ return fmt.Sprintf("wrapper(%s)",
+ FormatCheckedType(decls.NewPrimitiveType(t.GetWrapper())))
+ case kindError:
+ return "!error!"
+ }
+ return t.String()
+}
+
+// isDyn returns true if the input t is either type DYN or a well-known ANY message.
+func isDyn(t *exprpb.Type) bool {
+ // Note: object type values that are well-known and map to a DYN value in practice
+ // are sanitized prior to being added to the environment.
+ switch kindOf(t) {
+ case kindDyn:
+ return true
+ case kindWellKnown:
+ return t.GetWellKnown() == exprpb.Type_ANY
+ default:
+ return false
+ }
+}
+
+// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
+func isDynOrError(t *exprpb.Type) bool {
+ switch kindOf(t) {
+ case kindError:
+ return true
+ default:
+ return isDyn(t)
+ }
+}
+
+// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
+// A type is less specific if it matches the other type using the DYN type.
+func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool {
+ kind1, kind2 := kindOf(t1), kindOf(t2)
+ // The first type is less specific.
+ if isDyn(t1) || kind1 == kindTypeParam {
+ return true
+ }
+ // The first type is not less specific.
+ if isDyn(t2) || kind2 == kindTypeParam {
+ return false
+ }
+ // Types must be of the same kind to be equal.
+ if kind1 != kind2 {
+ return false
+ }
+
+ // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in
+ // order to return true.
+ switch kind1 {
+ case kindAbstract:
+ a1 := t1.GetAbstractType()
+ a2 := t2.GetAbstractType()
+ if a1.GetName() != a2.GetName() ||
+ len(a1.GetParameterTypes()) != len(a2.GetParameterTypes()) {
+ return false
+ }
+ for i, p1 := range a1.GetParameterTypes() {
+ if !isEqualOrLessSpecific(p1, a2.GetParameterTypes()[i]) {
+ return false
+ }
+ }
+ return true
+ case kindFunction:
+ fn1 := t1.GetFunction()
+ fn2 := t2.GetFunction()
+ if len(fn1.ArgTypes) != len(fn2.ArgTypes) {
+ return false
+ }
+ if !isEqualOrLessSpecific(fn1.ResultType, fn2.ResultType) {
+ return false
+ }
+ for i, a1 := range fn1.ArgTypes {
+ if !isEqualOrLessSpecific(a1, fn2.ArgTypes[i]) {
+ return false
+ }
+ }
+ return true
+ case kindList:
+ return isEqualOrLessSpecific(t1.GetListType().ElemType, t2.GetListType().ElemType)
+ case kindMap:
+ m1 := t1.GetMapType()
+ m2 := t2.GetMapType()
+ return isEqualOrLessSpecific(m1.KeyType, m2.KeyType) &&
+ isEqualOrLessSpecific(m1.KeyType, m2.KeyType)
+ case kindType:
+ return true
+ default:
+ return proto.Equal(t1, t2)
+ }
+}
+
+/// internalIsAssignable returns true if t1 is assignable to t2.
+func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
+ // Process type parameters.
+ kind1, kind2 := kindOf(t1), kindOf(t2)
+ if kind2 == kindTypeParam {
+ if t2Sub, found := m.find(t2); found {
+ // Adjust the existing substitution to a more common type if possible. This is sound
+ // because any previous substitution will be compatible with the common type. This
+ // deals with the case the we have e.g. A -> int assigned, but now encounter a test
+ // against DYN, and want to widen A to DYN.
+ if isEqualOrLessSpecific(t1, t2Sub) && notReferencedIn(t2, t1) {
+ m.add(t2, t1)
+ return true
+ }
+ // Continue regular process with the assignment for type2.
+ return internalIsAssignable(m, t1, t2Sub)
+ }
+ if notReferencedIn(t2, t1) {
+ m.add(t2, t1)
+ return true
+ }
+ }
+ if kind1 == kindTypeParam {
+ // For the lower type bound, we currently do not perform adjustment. The restricted
+ // way we use type parameters in lower type bounds, it is not necessary, but may
+ // become if we generalize type unification.
+ if t1Sub, found := m.find(t1); found {
+ return internalIsAssignable(m, t1Sub, t2)
+ }
+ if notReferencedIn(t1, t2) {
+ m.add(t1, t2)
+ return true
+ }
+ }
+
+ // Next check for wildcard types.
+ if isDynOrError(t1) || isDynOrError(t2) {
+ return true
+ }
+
+ // Test for when the types do not need to agree, but are more specific than dyn.
+ switch kind1 {
+ case kindNull:
+ return internalIsAssignableNull(t2)
+ case kindPrimitive:
+ return internalIsAssignablePrimitive(t1.GetPrimitive(), t2)
+ case kindWrapper:
+ return internalIsAssignable(m, decls.NewPrimitiveType(t1.GetWrapper()), t2)
+ default:
+ if kind1 != kind2 {
+ return false
+ }
+ }
+
+ // Test for when the types must agree.
+ switch kind1 {
+ // ERROR, TYPE_PARAM, and DYN handled above.
+ case kindAbstract:
+ return internalIsAssignableAbstractType(m,
+ t1.GetAbstractType(), t2.GetAbstractType())
+ case kindFunction:
+ return internalIsAssignableFunction(m,
+ t1.GetFunction(), t2.GetFunction())
+ case kindList:
+ return internalIsAssignable(m,
+ t1.GetListType().GetElemType(),
+ t2.GetListType().GetElemType())
+ case kindMap:
+ return internalIsAssignableMap(m, t1.GetMapType(), t2.GetMapType())
+ case kindObject:
+ return t1.GetMessageType() == t2.GetMessageType()
+ case kindType:
+ // A type is a type is a type, any additional parameterization of the
+ // type cannot affect method resolution or assignability.
+ return true
+ case kindWellKnown:
+ return t1.GetWellKnown() == t2.GetWellKnown()
+ default:
+ return false
+ }
+}
+
+// internalIsAssignableAbstractType returns true if the abstract type names agree and all type
+// parameters are assignable.
+func internalIsAssignableAbstractType(m *mapping,
+ a1 *exprpb.Type_AbstractType,
+ a2 *exprpb.Type_AbstractType) bool {
+ if a1.GetName() != a2.GetName() {
+ return false
+ }
+ if internalIsAssignableList(m, a1.GetParameterTypes(), a2.GetParameterTypes()) {
+ return true
+ }
+ return false
+}
+
+// internalIsAssignableFunction returns true if the function return type and arg types are
+// assignable.
+func internalIsAssignableFunction(m *mapping,
+ f1 *exprpb.Type_FunctionType,
+ f2 *exprpb.Type_FunctionType) bool {
+ if internalIsAssignableList(m,
+ append(f1.GetArgTypes(), f1.GetResultType()),
+ append(f2.GetArgTypes(), f2.GetResultType())) {
+ return true
+ }
+ return false
+}
+
+// internalIsAssignableList returns true if the element types at each index in the list are
+// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be
+// assignable.
+func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) bool {
+ if len(l1) != len(l2) {
+ return false
+ }
+ for i, t1 := range l1 {
+ if !internalIsAssignable(m, t1, l2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// internalIsAssignableMap returns true if map m1 may be assigned to map m2.
+func internalIsAssignableMap(m *mapping, m1 *exprpb.Type_MapType, m2 *exprpb.Type_MapType) bool {
+ if internalIsAssignableList(m,
+ []*exprpb.Type{m1.GetKeyType(), m1.GetValueType()},
+ []*exprpb.Type{m2.GetKeyType(), m2.GetValueType()}) {
+ return true
+ }
+ return false
+}
+
+// internalIsAssignableNull returns true if the type is nullable.
+func internalIsAssignableNull(t *exprpb.Type) bool {
+ switch kindOf(t) {
+ case kindAbstract, kindObject, kindWellKnown, kindWrapper:
+ return true
+ default:
+ return false
+ }
+}
+
+// internalIsAssignablePrimitive returns true if the target type is the same or if it is a wrapper
+// for the primitive type.
+func internalIsAssignablePrimitive(p exprpb.Type_PrimitiveType, target *exprpb.Type) bool {
+ switch kindOf(target) {
+ case kindPrimitive:
+ return p == target.GetPrimitive()
+ case kindWrapper:
+ return p == target.GetWrapper()
+ default:
+ return false
+ }
+}
+
+// isAssignable returns an updated type substitution mapping if t1 is assignable to t2.
+func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping {
+ mCopy := m.copy()
+ if internalIsAssignable(mCopy, t1, t2) {
+ return mCopy
+ }
+ return nil
+}
+
+// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2.
+func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping {
+ mCopy := m.copy()
+ if internalIsAssignableList(mCopy, l1, l2) {
+ return mCopy
+ }
+ return nil
+}
+
+// kindOf returns the kind of the type as defined in the checked.proto.
+func kindOf(t *exprpb.Type) int {
+ if t == nil || t.TypeKind == nil {
+ return kindUnknown
+ }
+ switch t.TypeKind.(type) {
+ case *exprpb.Type_Error:
+ return kindError
+ case *exprpb.Type_Function:
+ return kindFunction
+ case *exprpb.Type_Dyn:
+ return kindDyn
+ case *exprpb.Type_Primitive:
+ return kindPrimitive
+ case *exprpb.Type_WellKnown:
+ return kindWellKnown
+ case *exprpb.Type_Wrapper:
+ return kindWrapper
+ case *exprpb.Type_Null:
+ return kindNull
+ case *exprpb.Type_Type:
+ return kindType
+ case *exprpb.Type_ListType_:
+ return kindList
+ case *exprpb.Type_MapType_:
+ return kindMap
+ case *exprpb.Type_MessageType:
+ return kindObject
+ case *exprpb.Type_TypeParam:
+ return kindTypeParam
+ }
+ return kindUnknown
+}
+
+// mostGeneral returns the more general of two types which are known to unify.
+func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type {
+ if isEqualOrLessSpecific(t1, t2) {
+ return t1
+ }
+ return t2
+}
+
+// notReferencedIn checks whether the type doesn't appear directly or transitively within the other
+// type. This is a standard requirement for type unification, commonly referred to as the "occurs
+// check".
+func notReferencedIn(t *exprpb.Type, withinType *exprpb.Type) bool {
+ if proto.Equal(t, withinType) {
+ return false
+ }
+ withinKind := kindOf(withinType)
+ switch withinKind {
+ case kindAbstract:
+ for _, pt := range withinType.GetAbstractType().GetParameterTypes() {
+ if !notReferencedIn(t, pt) {
+ return false
+ }
+ }
+ return true
+ case kindFunction:
+ fn := withinType.GetFunction()
+ types := append(fn.ArgTypes, fn.ResultType)
+ for _, a := range types {
+ if !notReferencedIn(t, a) {
+ return false
+ }
+ }
+ return true
+ case kindList:
+ return notReferencedIn(t, withinType.GetListType().ElemType)
+ case kindMap:
+ m := withinType.GetMapType()
+ return notReferencedIn(t, m.KeyType) && notReferencedIn(t, m.ValueType)
+ case kindWrapper:
+ return notReferencedIn(t, decls.NewPrimitiveType(withinType.GetWrapper()))
+ default:
+ return true
+ }
+}
+
+// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type
+// parameters are replaced by DYN if typeParamToDyn is true.
+func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type {
+ if tSub, found := m.find(t); found {
+ return substitute(m, tSub, typeParamToDyn)
+ }
+ kind := kindOf(t)
+ if typeParamToDyn && kind == kindTypeParam {
+ return decls.Dyn
+ }
+ switch kind {
+ case kindAbstract:
+ // TODO: implement!
+ at := t.GetAbstractType()
+ params := make([]*exprpb.Type, len(at.GetParameterTypes()))
+ for i, p := range at.GetParameterTypes() {
+ params[i] = substitute(m, p, typeParamToDyn)
+ }
+ return decls.NewAbstractType(at.GetName(), params...)
+ case kindFunction:
+ fn := t.GetFunction()
+ rt := substitute(m, fn.ResultType, typeParamToDyn)
+ args := make([]*exprpb.Type, len(fn.ArgTypes))
+ for i, a := range fn.ArgTypes {
+ args[i] = substitute(m, a, typeParamToDyn)
+ }
+ return decls.NewFunctionType(rt, args...)
+ case kindList:
+ return decls.NewListType(substitute(m, t.GetListType().ElemType, typeParamToDyn))
+ case kindMap:
+ mt := t.GetMapType()
+ return decls.NewMapType(substitute(m, mt.KeyType, typeParamToDyn),
+ substitute(m, mt.ValueType, typeParamToDyn))
+ case kindType:
+ if t.GetType() != nil {
+ return decls.NewTypeType(substitute(m, t.GetType(), typeParamToDyn))
+ }
+ return t
+ default:
+ return t
+ }
+}
+
+func typeKey(t *exprpb.Type) string {
+ return FormatCheckedType(t)
+}
diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go
new file mode 100644
index 00000000000..5a4894733d4
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/debug/debug.go
@@ -0,0 +1,305 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package debug provides tools to print a parsed expression graph and
+// adorn each expression element with additional metadata.
+package debug
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Adorner returns debug metadata that will be tacked on to the string
+// representation of an expression.
+type Adorner interface {
+ // GetMetadata for the input context.
+ GetMetadata(ctx interface{}) string
+}
+
+// Writer manages writing expressions to an internal string.
+type Writer interface {
+ fmt.Stringer
+
+ // Buffer pushes an expression into an internal queue of expressions to
+ // write to a string.
+ Buffer(e *exprpb.Expr)
+}
+
+type emptyDebugAdorner struct {
+}
+
+var emptyAdorner Adorner = &emptyDebugAdorner{}
+
+func (a *emptyDebugAdorner) GetMetadata(e interface{}) string {
+ return ""
+}
+
+// ToDebugString gives the unadorned string representation of the Expr.
+func ToDebugString(e *exprpb.Expr) string {
+ return ToAdornedDebugString(e, emptyAdorner)
+}
+
+// ToAdornedDebugString gives the adorned string representation of the Expr.
+func ToAdornedDebugString(e *exprpb.Expr, adorner Adorner) string {
+ w := newDebugWriter(adorner)
+ w.Buffer(e)
+ return w.String()
+}
+
+// debugWriter is used to print out pretty-printed debug strings.
+type debugWriter struct {
+ adorner Adorner
+ buffer bytes.Buffer
+ indent int
+ lineStart bool
+}
+
+func newDebugWriter(a Adorner) *debugWriter {
+ return &debugWriter{
+ adorner: a,
+ indent: 0,
+ lineStart: true,
+ }
+}
+
+func (w *debugWriter) Buffer(e *exprpb.Expr) {
+ if e == nil {
+ return
+ }
+ switch e.ExprKind.(type) {
+ case *exprpb.Expr_ConstExpr:
+ w.append(formatLiteral(e.GetConstExpr()))
+ case *exprpb.Expr_IdentExpr:
+ w.append(e.GetIdentExpr().Name)
+ case *exprpb.Expr_SelectExpr:
+ w.appendSelect(e.GetSelectExpr())
+ case *exprpb.Expr_CallExpr:
+ w.appendCall(e.GetCallExpr())
+ case *exprpb.Expr_ListExpr:
+ w.appendList(e.GetListExpr())
+ case *exprpb.Expr_StructExpr:
+ w.appendStruct(e.GetStructExpr())
+ case *exprpb.Expr_ComprehensionExpr:
+ w.appendComprehension(e.GetComprehensionExpr())
+ }
+ w.adorn(e)
+}
+
+func (w *debugWriter) appendSelect(sel *exprpb.Expr_Select) {
+ w.Buffer(sel.Operand)
+ w.append(".")
+ w.append(sel.Field)
+ if sel.TestOnly {
+ w.append("~test-only~")
+ }
+}
+
+func (w *debugWriter) appendCall(call *exprpb.Expr_Call) {
+ if call.Target != nil {
+ w.Buffer(call.Target)
+ w.append(".")
+ }
+ w.append(call.Function)
+ w.append("(")
+ if len(call.GetArgs()) > 0 {
+ w.addIndent()
+ w.appendLine()
+ for i, arg := range call.Args {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.Buffer(arg)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append(")")
+}
+
+func (w *debugWriter) appendList(list *exprpb.Expr_CreateList) {
+ w.append("[")
+ if len(list.GetElements()) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, elem := range list.Elements {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.Buffer(elem)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("]")
+}
+
+func (w *debugWriter) appendStruct(obj *exprpb.Expr_CreateStruct) {
+ if obj.MessageName != "" {
+ w.appendObject(obj)
+ } else {
+ w.appendMap(obj)
+ }
+}
+
+func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
+ w.append(obj.MessageName)
+ w.append("{")
+ if len(obj.Entries) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, entry := range obj.Entries {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.append(entry.GetFieldKey())
+ w.append(":")
+ w.Buffer(entry.Value)
+ w.adorn(entry)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("}")
+}
+
+func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
+ w.append("{")
+ if len(obj.Entries) > 0 {
+ w.appendLine()
+ w.addIndent()
+ for i, entry := range obj.Entries {
+ if i > 0 {
+ w.append(",")
+ w.appendLine()
+ }
+ w.Buffer(entry.GetMapKey())
+ w.append(":")
+ w.Buffer(entry.Value)
+ w.adorn(entry)
+ }
+ w.removeIndent()
+ w.appendLine()
+ }
+ w.append("}")
+}
+
+func (w *debugWriter) appendComprehension(comprehension *exprpb.Expr_Comprehension) {
+ w.append("__comprehension__(")
+ w.addIndent()
+ w.appendLine()
+ w.append("// Variable")
+ w.appendLine()
+ w.append(comprehension.IterVar)
+ w.append(",")
+ w.appendLine()
+ w.append("// Target")
+ w.appendLine()
+ w.Buffer(comprehension.IterRange)
+ w.append(",")
+ w.appendLine()
+ w.append("// Accumulator")
+ w.appendLine()
+ w.append(comprehension.AccuVar)
+ w.append(",")
+ w.appendLine()
+ w.append("// Init")
+ w.appendLine()
+ w.Buffer(comprehension.AccuInit)
+ w.append(",")
+ w.appendLine()
+ w.append("// LoopCondition")
+ w.appendLine()
+ w.Buffer(comprehension.LoopCondition)
+ w.append(",")
+ w.appendLine()
+ w.append("// LoopStep")
+ w.appendLine()
+ w.Buffer(comprehension.LoopStep)
+ w.append(",")
+ w.appendLine()
+ w.append("// Result")
+ w.appendLine()
+ w.Buffer(comprehension.Result)
+ w.append(")")
+ w.removeIndent()
+}
+
+func formatLiteral(c *exprpb.Constant) string {
+ switch c.ConstantKind.(type) {
+ case *exprpb.Constant_BoolValue:
+ return fmt.Sprintf("%t", c.GetBoolValue())
+ case *exprpb.Constant_BytesValue:
+ return fmt.Sprintf("b\"%s\"", string(c.GetBytesValue()))
+ case *exprpb.Constant_DoubleValue:
+ return fmt.Sprintf("%v", c.GetDoubleValue())
+ case *exprpb.Constant_Int64Value:
+ return fmt.Sprintf("%d", c.GetInt64Value())
+ case *exprpb.Constant_StringValue:
+ return strconv.Quote(c.GetStringValue())
+ case *exprpb.Constant_Uint64Value:
+ return fmt.Sprintf("%du", c.GetUint64Value())
+ case *exprpb.Constant_NullValue:
+ return "null"
+ default:
+ panic("Unknown constant type")
+ }
+}
+
+func (w *debugWriter) append(s string) {
+ w.doIndent()
+ w.buffer.WriteString(s)
+}
+
+func (w *debugWriter) appendFormat(f string, args ...interface{}) {
+ w.append(fmt.Sprintf(f, args...))
+}
+
+func (w *debugWriter) doIndent() {
+ if w.lineStart {
+ w.lineStart = false
+ w.buffer.WriteString(strings.Repeat(" ", w.indent))
+ }
+}
+
+func (w *debugWriter) adorn(e interface{}) {
+ w.append(w.adorner.GetMetadata(e))
+}
+
+func (w *debugWriter) appendLine() {
+ w.buffer.WriteString("\n")
+ w.lineStart = true
+}
+
+func (w *debugWriter) addIndent() {
+ w.indent++
+}
+
+func (w *debugWriter) removeIndent() {
+ w.indent--
+ if w.indent < 0 {
+ panic("negative indent")
+ }
+}
+
+func (w *debugWriter) String() string {
+ return w.buffer.String()
+}
diff --git a/vendor/github.com/google/cel-go/common/doc.go b/vendor/github.com/google/cel-go/common/doc.go
new file mode 100644
index 00000000000..5362fdfe4b3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package common defines types and utilities common to expression parsing,
+// checking, and interpretation
+package common
diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go
new file mode 100644
index 00000000000..bfe93d9737f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/error.go
@@ -0,0 +1,70 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/text/width"
+)
+
+// Error type which references a location within source and a message.
+type Error struct {
+ Location Location
+ Message string
+}
+
+const (
+ dot = "."
+ ind = "^"
+)
+
+var (
+ wideDot = width.Widen.String(dot)
+ wideInd = width.Widen.String(ind)
+)
+
+// ToDisplayString decorates the error message with the source location.
+func (e *Error) ToDisplayString(source Source) string {
+ var result = fmt.Sprintf("ERROR: %s:%d:%d: %s",
+ source.Description(),
+ e.Location.Line(),
+ e.Location.Column()+1, // add one to the 0-based column for display
+ e.Message)
+ if snippet, found := source.Snippet(e.Location.Line()); found {
+ snippet := strings.Replace(snippet, "\t", " ", -1)
+ srcLine := "\n | " + snippet
+ var bytes = []byte(snippet)
+ var indLine = "\n | "
+ for i := 0; i < e.Location.Column() && len(bytes) > 0; i++ {
+ _, sz := utf8.DecodeRune(bytes)
+ bytes = bytes[sz:]
+ if sz > 1 {
+ indLine += wideDot
+ } else {
+ indLine += dot
+ }
+ }
+ if _, sz := utf8.DecodeRune(bytes); sz > 1 {
+ indLine += wideInd
+ } else {
+ indLine += ind
+ }
+ result += srcLine + indLine
+ }
+ return result
+}
diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go
new file mode 100644
index 00000000000..e7045a5c784
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/errors.go
@@ -0,0 +1,58 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "fmt"
+)
+
+// Errors type which contains a list of errors observed during parsing.
+type Errors struct {
+ errors []Error
+ source Source
+}
+
+// NewErrors creates a new instance of the Errors type.
+func NewErrors(source Source) *Errors {
+ return &Errors{
+ errors: []Error{},
+ source: source}
+}
+
+// ReportError records an error at a source location.
+func (e *Errors) ReportError(l Location, format string, args ...interface{}) {
+ err := Error{
+ Location: l,
+ Message: fmt.Sprintf(format, args...),
+ }
+ e.errors = append(e.errors, err)
+}
+
+// GetErrors returns the list of observed errors.
+func (e *Errors) GetErrors() []Error {
+ return e.errors[:]
+}
+
+// ToDisplayString returns the error set to a newline delimited string.
+func (e *Errors) ToDisplayString() string {
+ var result = ""
+ for i, err := range e.errors {
+ if i >= 1 {
+ result += "\n"
+ }
+ result += err.ToDisplayString(e.source)
+ }
+ return result
+}
diff --git a/vendor/github.com/google/cel-go/common/location.go b/vendor/github.com/google/cel-go/common/location.go
new file mode 100644
index 00000000000..31fce6cd317
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/location.go
@@ -0,0 +1,51 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+// Location interface to represent a location within Source.
+type Location interface {
+ Line() int // 1-based line number within source.
+ Column() int // 0-based column number within source.
+}
+
+// SourceLocation helper type to manually construct a location.
+type SourceLocation struct {
+ line int
+ column int
+}
+
+var (
+ // Location implements the SourcceLocation interface.
+ _ Location = &SourceLocation{}
+ // NoLocation is a particular illegal location.
+ NoLocation = &SourceLocation{-1, -1}
+)
+
+// NewLocation creates a new location.
+func NewLocation(line, column int) Location {
+ return &SourceLocation{
+ line: line,
+ column: column}
+}
+
+// Line returns the 1-based line of the location.
+func (l *SourceLocation) Line() int {
+ return l.line
+}
+
+// Column returns the 0-based column number of the location.
+func (l *SourceLocation) Column() int {
+ return l.column
+}
diff --git a/vendor/github.com/google/cel-go/common/operators/operators.go b/vendor/github.com/google/cel-go/common/operators/operators.go
new file mode 100644
index 00000000000..45be3f6ed77
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/operators/operators.go
@@ -0,0 +1,77 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package operators defines the internal function names of operators.
+//
+// ALl operators in the expression language are modelled as function calls.
+package operators
+
+// String "names" for CEL operators.
+const (
+ // Symbolic operators.
+ Conditional = "_?_:_"
+ LogicalAnd = "_&&_"
+ LogicalOr = "_||_"
+ LogicalNot = "!_"
+ Equals = "_==_"
+ NotEquals = "_!=_"
+ Less = "_<_"
+ LessEquals = "_<=_"
+ Greater = "_>_"
+ GreaterEquals = "_>=_"
+ Add = "_+_"
+ Subtract = "_-_"
+ Multiply = "_*_"
+ Divide = "_/_"
+ Modulo = "_%_"
+ Negate = "-_"
+ Index = "_[_]"
+
+ // Macros, must have a valid identifier.
+ Has = "has"
+ All = "all"
+ Exists = "exists"
+ ExistsOne = "exists_one"
+ Map = "map"
+ Filter = "filter"
+
+ // Named operators, must not have be valid identifiers.
+ NotStrictlyFalse = "@not_strictly_false"
+ In = "@in"
+
+ // Deprecated: named operators with valid identifiers.
+ OldNotStrictlyFalse = "__not_strictly_false__"
+ OldIn = "_in_"
+)
+
+var operators = map[string]string{
+ "+": Add,
+ "-": Subtract,
+ "*": Multiply,
+ "/": Divide,
+ "%": Modulo,
+ "in": In,
+ "==": Equals,
+ "!=": NotEquals,
+ "<": Less,
+ "<=": LessEquals,
+ ">": Greater,
+ ">=": GreaterEquals,
+}
+
+// Find the internal function name for an operator, if the input text is one.
+func Find(text string) (string, bool) {
+ op, found := operators[text]
+ return op, found
+}
diff --git a/vendor/github.com/google/cel-go/common/overloads/overloads.go b/vendor/github.com/google/cel-go/common/overloads/overloads.go
new file mode 100644
index 00000000000..5ec4ef9a514
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/overloads/overloads.go
@@ -0,0 +1,273 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package overloads defines the internal overload identifiers for function and
+// operator overloads.
+package overloads
+
+// Boolean logic overloads
+const (
+ Conditional = "conditional"
+ LogicalAnd = "logical_and"
+ LogicalOr = "logical_or"
+ LogicalNot = "logical_not"
+ NotStrictlyFalse = "not_strictly_false"
+ Equals = "equals"
+ NotEquals = "not_equals"
+ LessBool = "less_bool"
+ LessInt64 = "less_int64"
+ LessUint64 = "less_uint64"
+ LessDouble = "less_double"
+ LessString = "less_string"
+ LessBytes = "less_bytes"
+ LessTimestamp = "less_timestamp"
+ LessDuration = "less_duration"
+ LessEqualsBool = "less_equals_bool"
+ LessEqualsInt64 = "less_equals_int64"
+ LessEqualsUint64 = "less_equals_uint64"
+ LessEqualsDouble = "less_equals_double"
+ LessEqualsString = "less_equals_string"
+ LessEqualsBytes = "less_equals_bytes"
+ LessEqualsTimestamp = "less_equals_timestamp"
+ LessEqualsDuration = "less_equals_duration"
+ GreaterBool = "greater_bool"
+ GreaterInt64 = "greater_int64"
+ GreaterUint64 = "greater_uint64"
+ GreaterDouble = "greater_double"
+ GreaterString = "greater_string"
+ GreaterBytes = "greater_bytes"
+ GreaterTimestamp = "greater_timestamp"
+ GreaterDuration = "greater_duration"
+ GreaterEqualsBool = "greater_equals_bool"
+ GreaterEqualsInt64 = "greater_equals_int64"
+ GreaterEqualsUint64 = "greater_equals_uint64"
+ GreaterEqualsDouble = "greater_equals_double"
+ GreaterEqualsString = "greater_equals_string"
+ GreaterEqualsBytes = "greater_equals_bytes"
+ GreaterEqualsTimestamp = "greater_equals_timestamp"
+ GreaterEqualsDuration = "greater_equals_duration"
+)
+
+// Math overloads
+const (
+ AddInt64 = "add_int64"
+ AddUint64 = "add_uint64"
+ AddDouble = "add_double"
+ AddString = "add_string"
+ AddBytes = "add_bytes"
+ AddList = "add_list"
+ AddTimestampDuration = "add_timestamp_duration"
+ AddDurationTimestamp = "add_duration_timestamp"
+ AddDurationDuration = "add_duration_duration"
+ SubtractInt64 = "subtract_int64"
+ SubtractUint64 = "subtract_uint64"
+ SubtractDouble = "subtract_double"
+ SubtractTimestampTimestamp = "subtract_timestamp_timestamp"
+ SubtractTimestampDuration = "subtract_timestamp_duration"
+ SubtractDurationDuration = "subtract_duration_duration"
+ MultiplyInt64 = "multiply_int64"
+ MultiplyUint64 = "multiply_uint64"
+ MultiplyDouble = "multiply_double"
+ DivideInt64 = "divide_int64"
+ DivideUint64 = "divide_uint64"
+ DivideDouble = "divide_double"
+ ModuloInt64 = "modulo_int64"
+ ModuloUint64 = "modulo_uint64"
+ NegateInt64 = "negate_int64"
+ NegateDouble = "negate_double"
+)
+
+// Index overloads
+const (
+ IndexList = "index_list"
+ IndexMap = "index_map"
+ IndexMessage = "index_message" // TODO: introduce concept of types.Message
+)
+
+// In operators
+const (
+ DeprecatedIn = "in"
+ InList = "in_list"
+ InMap = "in_map"
+ InMessage = "in_message" // TODO: introduce concept of types.Message
+)
+
+// Size overloads
+const (
+ Size = "size"
+ SizeString = "size_string"
+ SizeBytes = "size_bytes"
+ SizeList = "size_list"
+ SizeMap = "size_map"
+ SizeStringInst = "string_size"
+ SizeBytesInst = "bytes_size"
+ SizeListInst = "list_size"
+ SizeMapInst = "map_size"
+)
+
+// String function names.
+const (
+ Contains = "contains"
+ EndsWith = "endsWith"
+ Matches = "matches"
+ StartsWith = "startsWith"
+)
+
+// String function overload names.
+const (
+ ContainsString = "contains_string"
+ EndsWithString = "ends_with_string"
+ MatchString = "matches_string"
+ StartsWithString = "starts_with_string"
+)
+
+// Time-based functions.
+const (
+ TimeGetFullYear = "getFullYear"
+ TimeGetMonth = "getMonth"
+ TimeGetDayOfYear = "getDayOfYear"
+ TimeGetDate = "getDate"
+ TimeGetDayOfMonth = "getDayOfMonth"
+ TimeGetDayOfWeek = "getDayOfWeek"
+ TimeGetHours = "getHours"
+ TimeGetMinutes = "getMinutes"
+ TimeGetSeconds = "getSeconds"
+ TimeGetMilliseconds = "getMilliseconds"
+)
+
+// Timestamp overloads for time functions without timezones.
+const (
+ TimestampToYear = "timestamp_to_year"
+ TimestampToMonth = "timestamp_to_month"
+ TimestampToDayOfYear = "timestamp_to_day_of_year"
+ TimestampToDayOfMonthZeroBased = "timestamp_to_day_of_month"
+ TimestampToDayOfMonthOneBased = "timestamp_to_day_of_month_1_based"
+ TimestampToDayOfWeek = "timestamp_to_day_of_week"
+ TimestampToHours = "timestamp_to_hours"
+ TimestampToMinutes = "timestamp_to_minutes"
+ TimestampToSeconds = "timestamp_to_seconds"
+ TimestampToMilliseconds = "timestamp_to_milliseconds"
+)
+
+// Timestamp overloads for time functions with timezones.
+const (
+ TimestampToYearWithTz = "timestamp_to_year_with_tz"
+ TimestampToMonthWithTz = "timestamp_to_month_with_tz"
+ TimestampToDayOfYearWithTz = "timestamp_to_day_of_year_with_tz"
+ TimestampToDayOfMonthZeroBasedWithTz = "timestamp_to_day_of_month_with_tz"
+ TimestampToDayOfMonthOneBasedWithTz = "timestamp_to_day_of_month_1_based_with_tz"
+ TimestampToDayOfWeekWithTz = "timestamp_to_day_of_week_with_tz"
+ TimestampToHoursWithTz = "timestamp_to_hours_with_tz"
+ TimestampToMinutesWithTz = "timestamp_to_minutes_with_tz"
+ TimestampToSecondsWithTz = "timestamp_to_seconds_tz"
+ TimestampToMillisecondsWithTz = "timestamp_to_milliseconds_with_tz"
+)
+
+// Duration overloads for time functions.
+const (
+ DurationToHours = "duration_to_hours"
+ DurationToMinutes = "duration_to_minutes"
+ DurationToSeconds = "duration_to_seconds"
+ DurationToMilliseconds = "duration_to_milliseconds"
+)
+
+// Type conversion methods and overloads
+const (
+ TypeConvertInt = "int"
+ TypeConvertUint = "uint"
+ TypeConvertDouble = "double"
+ TypeConvertBool = "bool"
+ TypeConvertString = "string"
+ TypeConvertBytes = "bytes"
+ TypeConvertTimestamp = "timestamp"
+ TypeConvertDuration = "duration"
+ TypeConvertType = "type"
+ TypeConvertDyn = "dyn"
+)
+
+// Int conversion functions.
+const (
+ IntToInt = "int64_to_int64"
+ UintToInt = "uint64_to_int64"
+ DoubleToInt = "double_to_int64"
+ StringToInt = "string_to_int64"
+ TimestampToInt = "timestamp_to_int64"
+ DurationToInt = "duration_to_int64"
+)
+
+// Uint conversion functions.
+const (
+ UintToUint = "uint64_to_uint64"
+ IntToUint = "int64_to_uint64"
+ DoubleToUint = "double_to_uint64"
+ StringToUint = "string_to_uint64"
+)
+
+// Double conversion functions.
+const (
+ DoubleToDouble = "double_to_double"
+ IntToDouble = "int64_to_double"
+ UintToDouble = "uint64_to_double"
+ StringToDouble = "string_to_double"
+)
+
+// Bool conversion functions.
+const (
+ BoolToBool = "bool_to_bool"
+ StringToBool = "string_to_bool"
+)
+
+// Bytes conversion functions.
+const (
+ BytesToBytes = "bytes_to_bytes"
+ StringToBytes = "string_to_bytes"
+)
+
+// String conversion functions.
+const (
+ StringToString = "string_to_string"
+ BoolToString = "bool_to_string"
+ IntToString = "int64_to_string"
+ UintToString = "uint64_to_string"
+ DoubleToString = "double_to_string"
+ BytesToString = "bytes_to_string"
+ TimestampToString = "timestamp_to_string"
+ DurationToString = "duration_to_string"
+)
+
+// Timestamp conversion functions
+const (
+ TimestampToTimestamp = "timestamp_to_timestamp"
+ StringToTimestamp = "string_to_timestamp"
+ IntToTimestamp = "int64_to_timestamp"
+)
+
+// Convert duration from string
+const (
+ DurationToDuration = "duration_to_duration"
+ StringToDuration = "string_to_duration"
+ IntToDuration = "int64_to_duration"
+)
+
+// Convert to dyn
+const (
+ ToDyn = "to_dyn"
+)
+
+// Comprehensions helper methods, not directly accessible via a developer.
+const (
+ Iterator = "@iterator"
+ HasNext = "@hasNext"
+ Next = "@next"
+)
diff --git a/vendor/github.com/google/cel-go/common/packages/packager.go b/vendor/github.com/google/cel-go/common/packages/packager.go
new file mode 100644
index 00000000000..4a2c37d7993
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/packages/packager.go
@@ -0,0 +1,82 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package packages defines types for interpreting qualified names.
+*/
+package packages
+
+import (
+ "strings"
+)
+
+// Packager helps interpret qualified names.
+type Packager interface {
+ // Package returns the qualified package name of the packager.
+ //
+ // The package path may be a namespace, package, or type.
+ Package() string
+
+ // ResolveCandidateNames returns the list of possible qualified names
+ // visible within the module in name resolution order.
+ //
+ // Name candidates are returned in order of most to least qualified in
+ // order to ensure that shadowing names are encountered first.
+ ResolveCandidateNames(name string) []string
+}
+
+var (
+ // DefaultPackage has an empty package name.
+ DefaultPackage = NewPackage("")
+)
+
+// NewPackage creates a new Packager with the given qualified package name.
+func NewPackage(pkg string) Packager {
+ return &defaultPackage{pkg: pkg}
+}
+
+type defaultPackage struct {
+ pkg string
+}
+
+func (p *defaultPackage) Package() string {
+ return p.pkg
+}
+
+// ResolveCandidateNames returns the candidates name of namespaced
+// identifiers in C++ resolution order.
+//
+// Names which shadow other names are returned first. If a name includes a
+// leading dot ('.'), the name is treated as an absolute identifier which
+// cannot be shadowed.
+//
+// Given a package name a.b.c.M.N and a type name R.s, this will deliver in
+// order a.b.c.M.N.R.s, a.b.c.M.R.s, a.b.c.R.s, a.b.R.s, a.R.s, R.s.
+func (p *defaultPackage) ResolveCandidateNames(name string) []string {
+ if strings.HasPrefix(name, ".") {
+ return []string{name[1:]}
+ }
+
+ if p.pkg == "" {
+ return []string{name}
+ }
+
+ nextPkg := p.pkg
+ candidates := []string{nextPkg + "." + name}
+ for i := strings.LastIndex(nextPkg, "."); i >= 0; i = strings.LastIndex(nextPkg, ".") {
+ nextPkg = nextPkg[:i]
+ candidates = append(candidates, nextPkg+"."+name)
+ }
+ return append(candidates, name)
+}
diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go
new file mode 100644
index 00000000000..a2f550babe6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/source.go
@@ -0,0 +1,189 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "strings"
+ "unicode/utf8"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Source interface for filter source contents.
+type Source interface {
+ // Content returns the source content represented as a string.
+ // Examples contents are the single file contents, textbox field,
+ // or url parameter.
+ Content() string
+
+ // Description gives a brief description of the source.
+ // Example descriptions are a file name or ui element.
+ Description() string
+
+ // LineOffsets gives the character offsets at which lines occur.
+ // The zero-th entry should refer to the break between the first
+ // and second line, or EOF if there is only one line of source.
+ LineOffsets() []int32
+
+ // LocationOffset translates a Location to an offset.
+ // Given the line and column of the Location returns the
+ // Location's character offset in the Source, and a bool
+ // indicating whether the Location was found.
+ LocationOffset(location Location) (int32, bool)
+
+ // OffsetLocation translates a character offset to a Location, or
+ // false if the conversion was not feasible.
+ OffsetLocation(offset int32) (Location, bool)
+
+ // Snippet returns a line of content and whether the line was found.
+ Snippet(line int) (string, bool)
+
+ // IDOffset returns the raw character offset of an expression within
+ // the source, or false if the expression cannot be found.
+ IDOffset(exprID int64) (int32, bool)
+
+ // IDLocation returns a Location for the given expression id,
+ // or false if one cannot be found. It behaves as the obvious
+ // composition of IdOffset() and OffsetLocation().
+ IDLocation(exprID int64) (Location, bool)
+}
+
+// The sourceImpl type implementation of the Source interface.
+type sourceImpl struct {
+ contents []rune
+ description string
+ lineOffsets []int32
+ idOffsets map[int64]int32
+}
+
+// TODO(jimlarson) "Character offsets" should index the code points
+// within the UTF-8 encoded string. It currently indexes bytes.
+// Can be accomplished by using rune[] instead of string for contents.
+
+// NewTextSource creates a new Source from the input text string.
+func NewTextSource(text string) Source {
+ return NewStringSource(text, " ")
+}
+
+// NewStringSource creates a new Source from the given contents and description.
+func NewStringSource(contents string, description string) Source {
+ // Compute line offsets up front as they are referred to frequently.
+ lines := strings.Split(contents, "\n")
+ offsets := make([]int32, len(lines))
+ var offset int32
+ for i, line := range lines {
+ offset = offset + int32(utf8.RuneCountInString(line)) + 1
+ offsets[int32(i)] = offset
+ }
+ return &sourceImpl{
+ contents: []rune(contents),
+ description: description,
+ lineOffsets: offsets,
+ idOffsets: map[int64]int32{},
+ }
+}
+
+// NewInfoSource creates a new Source from a SourceInfo.
+func NewInfoSource(info *exprpb.SourceInfo) Source {
+ return &sourceImpl{
+ contents: []rune(""),
+ description: info.Location,
+ lineOffsets: info.LineOffsets,
+ idOffsets: info.Positions,
+ }
+}
+
+func (s *sourceImpl) Content() string {
+ return string(s.contents)
+}
+
+func (s *sourceImpl) Description() string {
+ return s.description
+}
+
+func (s *sourceImpl) LineOffsets() []int32 {
+ return s.lineOffsets
+}
+
+func (s *sourceImpl) LocationOffset(location Location) (int32, bool) {
+ if lineOffset, found := s.findLineOffset(location.Line()); found {
+ return lineOffset + int32(location.Column()), true
+ }
+ return -1, false
+}
+
+func (s *sourceImpl) OffsetLocation(offset int32) (Location, bool) {
+ line, lineOffset := s.findLine(offset)
+ return NewLocation(int(line), int(offset-lineOffset)), true
+}
+
+func (s *sourceImpl) Snippet(line int) (string, bool) {
+ charStart, found := s.findLineOffset(line)
+ if !found || len(s.contents) == 0 {
+ return "", false
+ }
+ charEnd, found := s.findLineOffset(line + 1)
+ if found {
+ return string(s.contents[charStart : charEnd-1]), true
+ }
+ return string(s.contents[charStart:]), true
+}
+
+func (s *sourceImpl) IDOffset(exprID int64) (int32, bool) {
+ if offset, found := s.idOffsets[exprID]; found {
+ return offset, true
+ }
+ return -1, false
+}
+
+func (s *sourceImpl) IDLocation(exprID int64) (Location, bool) {
+ if offset, found := s.IDOffset(exprID); found {
+ if location, found := s.OffsetLocation(offset); found {
+ return location, true
+ }
+ }
+ return NewLocation(1, 0), false
+}
+
+// findLineOffset returns the offset where the (1-indexed) line begins,
+// or false if line doesn't exist.
+func (s *sourceImpl) findLineOffset(line int) (int32, bool) {
+ if line == 1 {
+ return 0, true
+ } else if line > 1 && line <= int(len(s.lineOffsets)) {
+ offset := s.lineOffsets[line-2]
+ return offset, true
+ }
+ return -1, false
+}
+
+// findLine finds the line that contains the given character offset and
+// returns the line number and offset of the beginning of that line.
+// Note that the last line is treated as if it contains all offsets
+// beyond the end of the actual source.
+func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) {
+ var line int32 = 1
+ for _, lineOffset := range s.lineOffsets {
+ if lineOffset > characterOffset {
+ break
+ } else {
+ line++
+ }
+ }
+ if line == 1 {
+ return line, 0
+ }
+ return line, s.lineOffsets[line-2]
+}
diff --git a/vendor/github.com/google/cel-go/common/types/any_value.go b/vendor/github.com/google/cel-go/common/types/any_value.go
new file mode 100644
index 00000000000..8272ca00699
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/any_value.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ anypb "github.com/golang/protobuf/ptypes/any"
+)
+
+// anyValueType constant representing the reflected type of google.protobuf.Any.
+var anyValueType = reflect.TypeOf(&anypb.Any{})
diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go
new file mode 100644
index 00000000000..41db673723c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/bool.go
@@ -0,0 +1,126 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// Bool type that implements ref.Val and supports comparison and negation.
+type Bool bool
+
+var (
+ // BoolType singleton.
+ BoolType = NewTypeValue("bool",
+ traits.ComparerType,
+ traits.NegatorType)
+)
+
+// Boolean constants
+var (
+ False = Bool(false)
+ True = Bool(true)
+)
+
+// Compare implements traits.Comparer.Compare.
+func (b Bool) Compare(other ref.Val) ref.Val {
+ if BoolType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ otherBool := other.(Bool)
+ if b == otherBool {
+ return IntZero
+ }
+ if !b && otherBool {
+ return IntNegOne
+ }
+ return IntOne
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (b Bool) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ switch typeDesc.Kind() {
+ case reflect.Bool:
+ return bool(b), nil
+ case reflect.Ptr:
+ if typeDesc == jsonValueType {
+ return &structpb.Value{
+ Kind: &structpb.Value_BoolValue{
+ BoolValue: b.Value().(bool)}}, nil
+ }
+ if typeDesc.Elem().Kind() == reflect.Bool {
+ p := bool(b)
+ return &p, nil
+ }
+ case reflect.Interface:
+ if reflect.TypeOf(b).Implements(typeDesc) {
+ return b, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from bool to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (b Bool) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(strconv.FormatBool(bool(b)))
+ case BoolType:
+ return b
+ case TypeType:
+ return BoolType
+ }
+ return NewErr("type conversion error from '%v' to '%v'", BoolType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (b Bool) Equal(other ref.Val) ref.Val {
+ if BoolType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Bool(b == other.(Bool))
+}
+
+// Negate implements traits.Negater.Negate.
+func (b Bool) Negate() ref.Val {
+ return !b
+}
+
+// Type implements ref.Val.Type.
+func (b Bool) Type() ref.Type {
+ return BoolType
+}
+
+// Value implements ref.Val.Value.
+func (b Bool) Value() interface{} {
+ return bool(b)
+}
+
+// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType.
+func IsBool(elem interface{}) bool {
+ switch elem.(type) {
+ case ref.Type:
+ return elem == BoolType
+ case ref.Val:
+ return IsBool(elem.(ref.Val).Type())
+ }
+ return false
+}
diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go
new file mode 100644
index 00000000000..d0bdf175a52
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/bytes.go
@@ -0,0 +1,103 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// Bytes type that implements ref.Val and supports add, compare, and size
+// operations.
+type Bytes []byte
+
+var (
+ // BytesType singleton.
+ BytesType = NewTypeValue("bytes",
+ traits.AdderType,
+ traits.ComparerType,
+ traits.SizerType)
+)
+
+// Add implements traits.Adder.Add by concatenating byte sequences.
+func (b Bytes) Add(other ref.Val) ref.Val {
+ if BytesType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return append(b, other.(Bytes)...)
+}
+
+// Compare implments traits.Comparer.Compare by lexicographic ordering.
+func (b Bytes) Compare(other ref.Val) ref.Val {
+ if BytesType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Int(bytes.Compare(b, other.(Bytes)))
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (b Bytes) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ switch typeDesc.Kind() {
+ case reflect.Array, reflect.Slice:
+ if typeDesc.Elem().Kind() == reflect.Uint8 {
+ return b.Value(), nil
+ }
+ case reflect.Interface:
+ if reflect.TypeOf(b).Implements(typeDesc) {
+ return b, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from Bytes to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (b Bytes) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(b)
+ case BytesType:
+ return b
+ case TypeType:
+ return BytesType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", BytesType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (b Bytes) Equal(other ref.Val) ref.Val {
+ if BytesType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Bool(bytes.Equal(b, other.(Bytes)))
+}
+
+// Size implements traits.Sizer.Size.
+func (b Bytes) Size() ref.Val {
+ return Int(len(b))
+}
+
+// Type implements ref.Val.Type.
+func (b Bytes) Type() ref.Type {
+ return BytesType
+}
+
+// Value implements ref.Val.Value.
+func (b Bytes) Value() interface{} {
+ return []byte(b)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/doc.go b/vendor/github.com/google/cel-go/common/types/doc.go
new file mode 100644
index 00000000000..5f641d7043d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package types contains the types, traits, and utilities common to all
+// components of expression handling.
+package types
diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go
new file mode 100644
index 00000000000..094f249d1a9
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/double.go
@@ -0,0 +1,158 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// Double type that implements ref.Val, comparison, and mathematical
+// operations.
+type Double float64
+
+var (
+ // DoubleType singleton.
+ DoubleType = NewTypeValue("double",
+ traits.AdderType,
+ traits.ComparerType,
+ traits.DividerType,
+ traits.MultiplierType,
+ traits.NegatorType,
+ traits.SubtractorType)
+)
+
+// Add implements traits.Adder.Add.
+func (d Double) Add(other ref.Val) ref.Val {
+ if DoubleType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return d + other.(Double)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (d Double) Compare(other ref.Val) ref.Val {
+ if DoubleType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ if d < other.(Double) {
+ return IntNegOne
+ }
+ if d > other.(Double) {
+ return IntOne
+ }
+ return IntZero
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d Double) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ switch typeDesc.Kind() {
+ case reflect.Float32:
+ return float32(d), nil
+ case reflect.Float64:
+ return float64(d), nil
+ case reflect.Ptr:
+ if typeDesc == jsonValueType {
+ return &structpb.Value{
+ Kind: &structpb.Value_NumberValue{
+ NumberValue: float64(d)}}, nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Float32:
+ p := float32(d)
+ return &p, nil
+ case reflect.Float64:
+ p := float64(d)
+ return &p, nil
+ }
+ case reflect.Interface:
+ if reflect.TypeOf(d).Implements(typeDesc) {
+ return d, nil
+ }
+ }
+ return nil, fmt.Errorf("type conversion error from Double to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d Double) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ return Int(float64(d))
+ case UintType:
+ return Uint(float64(d))
+ case DoubleType:
+ return d
+ case StringType:
+ return String(fmt.Sprintf("%g", float64(d)))
+ case TypeType:
+ return DoubleType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", DoubleType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (d Double) Divide(other ref.Val) ref.Val {
+ if DoubleType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ if other.(Double) == Double(0) {
+ return NewErr("divide by zero")
+ }
+ return d / other.(Double)
+}
+
+// Equal implements ref.Val.Equal.
+func (d Double) Equal(other ref.Val) ref.Val {
+ if DoubleType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ // TODO: Handle NaNs properly.
+ return Bool(d == other.(Double))
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (d Double) Multiply(other ref.Val) ref.Val {
+ if DoubleType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return d * other.(Double)
+}
+
+// Negate implements traits.Negater.Negate.
+func (d Double) Negate() ref.Val {
+ return -d
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (d Double) Subtract(subtrahend ref.Val) ref.Val {
+ if DoubleType != subtrahend.Type() {
+ return ValOrErr(subtrahend, "no such overload")
+ }
+ return d - subtrahend.(Double)
+}
+
+// Type implements ref.Val.Type.
+func (d Double) Type() ref.Type {
+ return DoubleType
+}
+
+// Value implements ref.Val.Value.
+func (d Double) Value() interface{} {
+ return float64(d)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go
new file mode 100644
index 00000000000..85a9e1ec4db
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/duration.go
@@ -0,0 +1,199 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ dpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+// Duration type that implements ref.Val and supports add, compare, negate,
+// and subtract operators. This type is also a receiver which means it can
+// participate in dispatch to receiver functions.
+type Duration struct {
+ *dpb.Duration
+}
+
+var (
+ // DurationType singleton.
+ DurationType = NewTypeValue("google.protobuf.Duration",
+ traits.AdderType,
+ traits.ComparerType,
+ traits.NegatorType,
+ traits.ReceiverType,
+ traits.SubtractorType)
+)
+
+// Add implements traits.Adder.Add.
+func (d Duration) Add(other ref.Val) ref.Val {
+ switch other.Type() {
+ case DurationType:
+ dur1, err := ptypes.Duration(d.Duration)
+ if err != nil {
+ return &Err{err}
+ }
+ dur2, err := ptypes.Duration(other.(Duration).Duration)
+ if err != nil {
+ return &Err{err}
+ }
+ return Duration{ptypes.DurationProto(dur1 + dur2)}
+ case TimestampType:
+ dur, err := ptypes.Duration(d.Duration)
+ if err != nil {
+ return &Err{err}
+ }
+ ts, err := ptypes.Timestamp(other.(Timestamp).Timestamp)
+ if err != nil {
+ return &Err{err}
+ }
+ tstamp, err := ptypes.TimestampProto(ts.Add(dur))
+ if err != nil {
+ return &Err{err}
+ }
+ return Timestamp{tstamp}
+ }
+ return ValOrErr(other, "no such overload")
+}
+
+// Compare implements traits.Comparer.Compare.
+func (d Duration) Compare(other ref.Val) ref.Val {
+ if DurationType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ dur1, err := ptypes.Duration(d.Duration)
+ if err != nil {
+ return &Err{err}
+ }
+ dur2, err := ptypes.Duration(other.(Duration).Duration)
+ if err != nil {
+ return &Err{err}
+ }
+ dur := dur1 - dur2
+ if dur < 0 {
+ return IntNegOne
+ }
+ if dur > 0 {
+ return IntOne
+ }
+ return IntZero
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d Duration) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ if typeDesc == durationValueType {
+ return d.Value(), nil
+ }
+ // If the duration is already assignable to the desired type return it.
+ if reflect.TypeOf(d).AssignableTo(typeDesc) {
+ return d, nil
+ }
+ return nil, fmt.Errorf("type conversion error from "+
+ "'google.protobuf.Duration' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d Duration) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ if dur, err := ptypes.Duration(d.Duration); err == nil {
+ return String(dur.String())
+ }
+ case IntType:
+ if dur, err := ptypes.Duration(d.Duration); err == nil {
+ return Int(dur)
+ }
+ case DurationType:
+ return d
+ case TypeType:
+ return DurationType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", DurationType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (d Duration) Equal(other ref.Val) ref.Val {
+ if DurationType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Bool(proto.Equal(d.Duration, other.Value().(proto.Message)))
+}
+
+// Negate implements traits.Negater.Negate.
+func (d Duration) Negate() ref.Val {
+ dur, err := ptypes.Duration(d.Duration)
+ if err != nil {
+ return &Err{err}
+ }
+ return Duration{ptypes.DurationProto(-dur)}
+}
+
+// Receive implements traits.Receiver.Receive.
+func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val {
+ dur, err := ptypes.Duration(d.Duration)
+ if err != nil {
+ return &Err{err}
+ }
+ if len(args) == 0 {
+ if f, found := durationZeroArgOverloads[function]; found {
+ return f(dur)
+ }
+ }
+ return NewErr("no such overload")
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (d Duration) Subtract(subtrahend ref.Val) ref.Val {
+ if DurationType != subtrahend.Type() {
+ return ValOrErr(subtrahend, "no such overload")
+ }
+ return d.Add(subtrahend.(Duration).Negate())
+}
+
+// Type implements ref.Val.Type.
+func (d Duration) Type() ref.Type {
+ return DurationType
+}
+
+// Value implements ref.Val.Value.
+func (d Duration) Value() interface{} {
+ return d.Duration
+}
+
+var (
+ durationValueType = reflect.TypeOf(&dpb.Duration{})
+
+ durationZeroArgOverloads = map[string]func(time.Duration) ref.Val{
+ overloads.TimeGetHours: func(dur time.Duration) ref.Val {
+ return Int(dur.Hours())
+ },
+ overloads.TimeGetMinutes: func(dur time.Duration) ref.Val {
+ return Int(dur.Minutes())
+ },
+ overloads.TimeGetSeconds: func(dur time.Duration) ref.Val {
+ return Int(dur.Seconds())
+ },
+ overloads.TimeGetMilliseconds: func(dur time.Duration) ref.Val {
+ return Int(dur.Nanoseconds() / 1000000)
+ }}
+)
diff --git a/vendor/github.com/google/cel-go/common/types/dyn.go b/vendor/github.com/google/cel-go/common/types/dyn.go
new file mode 100644
index 00000000000..08530c999f6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/dyn.go
@@ -0,0 +1,33 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import "github.com/google/cel-go/common/types/ref"
+
+var (
+ // DynType singleton.
+ DynType = NewTypeValue("dyn")
+)
+
+// IsDyn indicates whether the input ref.Val or ref.Type is equal to DynType.
+func IsDyn(elem interface{}) bool {
+ switch elem.(type) {
+ case ref.Type:
+ return elem == DynType
+ case ref.Val:
+ return IsDyn(elem.(ref.Val).Type())
+ }
+ return false
+}
diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go
new file mode 100644
index 00000000000..13323613fa5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/err.go
@@ -0,0 +1,87 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Err type which extends the built-in go error and implements ref.Val.
+type Err struct {
+ error
+}
+
+var (
+ // ErrType singleton.
+ ErrType = NewTypeValue("error")
+)
+
+// NewErr creates a new Err described by the format string and args.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func NewErr(format string, args ...interface{}) ref.Val {
+ return &Err{fmt.Errorf(format, args...)}
+}
+
+// ValOrErr either returns the existing error or create a new one.
+// TODO: Audit the use of this function and standardize the error messages and codes.
+func ValOrErr(val ref.Val, format string, args ...interface{}) ref.Val {
+ switch val.Type() {
+ case ErrType, UnknownType:
+ return val
+ default:
+ return NewErr(format, args...)
+ }
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (e *Err) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ return nil, e.error
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (e *Err) ConvertToType(typeVal ref.Type) ref.Val {
+ // Errors are not convertible to other representations.
+ return e
+}
+
+// Equal implements ref.Val.Equal.
+func (e *Err) Equal(other ref.Val) ref.Val {
+ // An error cannot be equal to any other value, so it returns itself.
+ return e
+}
+
+// String implements fmt.Stringer.
+func (e *Err) String() string {
+ return e.error.Error()
+}
+
+// Type implements ref.Val.Type.
+func (e *Err) Type() ref.Type {
+ return ErrType
+}
+
+// Value implements ref.Val.Value.
+func (e *Err) Value() interface{} {
+ return e.error
+}
+
+// IsError returns whether the input element ref.Type or ref.Val is equal to
+// the ErrType singleton.
+func IsError(val ref.Val) bool {
+ return val.Type() == ErrType
+}
diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go
new file mode 100644
index 00000000000..c7d8245f2d2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/int.go
@@ -0,0 +1,178 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ structpb "github.com/golang/protobuf/ptypes/struct"
+)
+
+// Int type that implements ref.Val as well as comparison and math operators.
+type Int int64
+
+// Int constants used for comparison results.
+const (
+ IntZero = Int(0)
+ IntOne = Int(1)
+ IntNegOne = Int(-1)
+)
+
+var (
+ // IntType singleton.
+ IntType = NewTypeValue("int",
+ traits.AdderType,
+ traits.ComparerType,
+ traits.DividerType,
+ traits.ModderType,
+ traits.MultiplierType,
+ traits.NegatorType,
+ traits.SubtractorType)
+)
+
+// Add implements traits.Adder.Add.
+func (i Int) Add(other ref.Val) ref.Val {
+ if IntType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return i + other.(Int)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (i Int) Compare(other ref.Val) ref.Val {
+ if IntType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ if i < other.(Int) {
+ return IntNegOne
+ }
+ if i > other.(Int) {
+ return IntOne
+ }
+ return IntZero
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (i Int) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ switch typeDesc.Kind() {
+ case reflect.Int32:
+ return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil
+ case reflect.Int64:
+ return int64(i), nil
+ case reflect.Ptr:
+ if typeDesc == jsonValueType {
+ return &structpb.Value{
+ Kind: &structpb.Value_NumberValue{
+ NumberValue: float64(i)}}, nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Int32:
+ p := int32(i)
+ return &p, nil
+ case reflect.Int64:
+ p := int64(i)
+ return &p, nil
+ }
+ case reflect.Interface:
+ if reflect.TypeOf(i).Implements(typeDesc) {
+ return i, nil
+ }
+ }
+ return nil, fmt.Errorf("unsupported type conversion from 'int' to %v", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (i Int) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ return i
+ case UintType:
+ return Uint(i)
+ case DoubleType:
+ return Double(i)
+ case StringType:
+ return String(fmt.Sprintf("%d", int64(i)))
+ case TypeType:
+ return IntType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", IntType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (i Int) Divide(other ref.Val) ref.Val {
+ if IntType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ otherInt := other.(Int)
+ if otherInt == IntZero {
+ return NewErr("divide by zero")
+ }
+ return i / otherInt
+}
+
+// Equal implements ref.Val.Equal.
+func (i Int) Equal(other ref.Val) ref.Val {
+ if IntType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Bool(i == other.(Int))
+}
+
+// Modulo implements traits.Modder.Modulo.
+func (i Int) Modulo(other ref.Val) ref.Val {
+ if IntType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ otherInt := other.(Int)
+ if otherInt == IntZero {
+ return NewErr("modulus by zero")
+ }
+ return i % otherInt
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (i Int) Multiply(other ref.Val) ref.Val {
+ if IntType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return i * other.(Int)
+}
+
+// Negate implements traits.Negater.Negate.
+func (i Int) Negate() ref.Val {
+ return -i
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (i Int) Subtract(subtrahend ref.Val) ref.Val {
+ if IntType != subtrahend.Type() {
+ return ValOrErr(subtrahend, "no such overload")
+ }
+ return i - subtrahend.(Int)
+}
+
+// Type implements ref.Val.Type.
+func (i Int) Type() ref.Type {
+ return IntType
+}
+
+// Value implements ref.Val.Value.
+func (i Int) Value() interface{} {
+ return int64(i)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/iterator.go b/vendor/github.com/google/cel-go/common/types/iterator.go
new file mode 100644
index 00000000000..023fcaf7a18
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/iterator.go
@@ -0,0 +1,55 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ // IteratorType singleton.
+ IteratorType = NewTypeValue("iterator", traits.IteratorType)
+)
+
+// baseIterator is the basis for list, map, and object iterators.
+//
+// An iterator in and of itself should not be a valid value for comparison, but must implement the
+// `ref.Val` methods in order to be well-supported within instruction arguments processed by the
+// interpreter.
+type baseIterator struct{}
+
+func (it *baseIterator) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ return nil, fmt.Errorf("type conversion on iterators not supported")
+}
+
+func (it *baseIterator) ConvertToType(typeVal ref.Type) ref.Val {
+ return NewErr("no such overload")
+}
+
+func (it *baseIterator) Equal(other ref.Val) ref.Val {
+ return NewErr("no such overload")
+}
+
+func (it *baseIterator) Type() ref.Type {
+ return IteratorType
+}
+
+func (it *baseIterator) Value() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/json_list.go b/vendor/github.com/google/cel-go/common/types/json_list.go
new file mode 100644
index 00000000000..8c34d4c564a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/json_list.go
@@ -0,0 +1,186 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ jsonListValueType = reflect.TypeOf(&structpb.ListValue{})
+)
+
+type jsonListValue struct {
+ *structpb.ListValue
+ ref.TypeAdapter
+}
+
+// NewJSONList creates a traits.Lister implementation backed by a JSON list that has been encoded
+// in protocol buffer form.
+//
+// The `adapter` argument provides type adaptation capabilities from proto to CEL.
+func NewJSONList(adapter ref.TypeAdapter, l *structpb.ListValue) traits.Lister {
+ return &jsonListValue{TypeAdapter: adapter, ListValue: l}
+}
+
+func (l *jsonListValue) Add(other ref.Val) ref.Val {
+ if other.Type() != ListType {
+ return ValOrErr(other, "no such overload")
+ }
+ switch other.(type) {
+ case *jsonListValue:
+ otherList := other.(*jsonListValue)
+ concatElems := append(l.GetValues(), otherList.GetValues()...)
+ return NewJSONList(l.TypeAdapter, &structpb.ListValue{Values: concatElems})
+ }
+ return &concatList{
+ prevList: l,
+ nextList: other.(traits.Lister)}
+}
+
+func (l *jsonListValue) Contains(elem ref.Val) ref.Val {
+ for i := Int(0); i < l.Size().(Int); i++ {
+ if l.Get(i).Equal(elem) == True {
+ return True
+ }
+ }
+ return False
+}
+
+func (l *jsonListValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ switch typeDesc.Kind() {
+ case reflect.Array, reflect.Slice:
+ elemCount := int(l.Size().(Int))
+ nativeList := reflect.MakeSlice(typeDesc, elemCount, elemCount)
+ for i := 0; i < elemCount; i++ {
+ elem := l.Get(Int(i))
+ nativeElemVal, err := elem.ConvertToNative(typeDesc.Elem())
+ if err != nil {
+ return nil, err
+ }
+ nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal))
+ }
+ return nativeList.Interface(), nil
+
+ case reflect.Ptr:
+ switch typeDesc {
+ case jsonValueType:
+ return &structpb.Value{
+ Kind: &structpb.Value_ListValue{
+ ListValue: l.ListValue}}, nil
+ case jsonListValueType:
+ return l.ListValue, nil
+ case anyValueType:
+ return ptypes.MarshalAny(l.Value().(proto.Message))
+ }
+
+ case reflect.Interface:
+ // If the list is already assignable to the desired type return it.
+ if reflect.TypeOf(l).Implements(typeDesc) {
+ return l, nil
+ }
+ }
+ return nil, fmt.Errorf("no conversion found from list type to native type."+
+ " list elem: google.protobuf.Value, native type: %v", typeDesc)
+}
+
+func (l *jsonListValue) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case ListType:
+ return l
+ case TypeType:
+ return ListType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
+}
+
+func (l *jsonListValue) Equal(other ref.Val) ref.Val {
+ if ListType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ otherList := other.(traits.Lister)
+ if l.Size() != otherList.Size() {
+ return False
+ }
+ for i := IntZero; i < l.Size().(Int); i++ {
+ thisElem := l.Get(i)
+ otherElem := otherList.Get(i)
+ elemEq := thisElem.Equal(otherElem)
+ if elemEq == False || IsUnknownOrError(elemEq) {
+ return elemEq
+ }
+ }
+ return True
+}
+
+func (l *jsonListValue) Get(index ref.Val) ref.Val {
+ if IntType != index.Type() {
+ return ValOrErr(index, "unsupported index type: '%v", index.Type())
+ }
+ i := index.(Int)
+ if i < 0 || i >= l.Size().(Int) {
+ return NewErr("index '%d' out of range in list size '%d'", i, l.Size())
+ }
+ elem := l.GetValues()[i]
+ return l.NativeToValue(elem)
+}
+
+func (l *jsonListValue) Iterator() traits.Iterator {
+ return &jsonValueListIterator{
+ baseIterator: &baseIterator{},
+ TypeAdapter: l.TypeAdapter,
+ elems: l.GetValues(),
+ len: len(l.GetValues())}
+}
+
+func (l *jsonListValue) Size() ref.Val {
+ return Int(len(l.GetValues()))
+}
+
+func (l *jsonListValue) Type() ref.Type {
+ return ListType
+}
+
+func (l *jsonListValue) Value() interface{} {
+ return l.ListValue
+}
+
+type jsonValueListIterator struct {
+ *baseIterator
+ ref.TypeAdapter
+ cursor int
+ elems []*structpb.Value
+ len int
+}
+
+func (it *jsonValueListIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+func (it *jsonValueListIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ return it.NativeToValue(it.elems[index])
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/json_struct.go b/vendor/github.com/google/cel-go/common/types/json_struct.go
new file mode 100644
index 00000000000..c3967d77a0d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/json_struct.go
@@ -0,0 +1,188 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ jsonStructType = reflect.TypeOf(&structpb.Struct{})
+)
+
+type jsonStruct struct {
+ ref.TypeAdapter
+ *structpb.Struct
+}
+
+// NewJSONStruct creates a traits.Mapper implementation backed by a JSON struct that has been
+// encoded in protocol buffer form.
+//
+// The `adapter` argument provides type adaptation capabilities from proto to CEL.
+func NewJSONStruct(adapter ref.TypeAdapter, st *structpb.Struct) traits.Mapper {
+ return &jsonStruct{TypeAdapter: adapter, Struct: st}
+}
+
+func (m *jsonStruct) Contains(index ref.Val) ref.Val {
+ return Bool(!IsError(m.Get(index)))
+}
+
+func (m *jsonStruct) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ switch typeDesc.Kind() {
+ case reflect.Map:
+ otherKey := typeDesc.Key()
+ otherElem := typeDesc.Elem()
+ if typeDesc.Key().Kind() == reflect.String {
+ nativeMap := reflect.MakeMapWithSize(typeDesc, int(m.Size().(Int)))
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ refKeyValue, err := key.ConvertToNative(otherKey)
+ if err != nil {
+ return nil, err
+ }
+ refElemValue, err := m.Get(key).ConvertToNative(otherElem)
+ if err != nil {
+ return nil, err
+ }
+ nativeMap.SetMapIndex(
+ reflect.ValueOf(refKeyValue),
+ reflect.ValueOf(refElemValue))
+ }
+ return nativeMap.Interface(), nil
+ }
+
+ case reflect.Ptr:
+ switch typeDesc {
+ case jsonValueType:
+ return &structpb.Value{
+ Kind: &structpb.Value_StructValue{
+ StructValue: m.Struct}}, nil
+ case jsonStructType:
+ return m.Struct, nil
+ case anyValueType:
+ return ptypes.MarshalAny(m.Value().(proto.Message))
+ }
+
+ case reflect.Interface:
+ // If the struct is already assignable to the desired type return it.
+ if reflect.TypeOf(m).Implements(typeDesc) {
+ return m, nil
+ }
+ }
+ return nil, fmt.Errorf(
+ "no conversion found from map type to native type."+
+ " map type: google.protobuf.Struct, native type: %v", typeDesc)
+}
+
+func (m *jsonStruct) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case MapType:
+ return m
+ case TypeType:
+ return MapType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
+}
+
+func (m *jsonStruct) Equal(other ref.Val) ref.Val {
+ if MapType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ otherMap := other.(traits.Mapper)
+ if m.Size() != otherMap.Size() {
+ return False
+ }
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ if otherVal := otherMap.Get(key); IsError(otherVal) {
+ return False
+ } else if thisVal := m.Get(key); IsError(thisVal) {
+ return False
+ } else {
+ valEq := thisVal.Equal(otherVal)
+ if valEq == False || IsUnknownOrError(valEq) {
+ return valEq
+ }
+ }
+ }
+ return True
+}
+
+func (m *jsonStruct) Get(key ref.Val) ref.Val {
+ if StringType != key.Type() {
+ return ValOrErr(key, "unsupported key type: '%v", key.Type())
+ }
+ fields := m.Struct.GetFields()
+ value, found := fields[string(key.(String))]
+ if !found {
+ return NewErr("no such key: '%v'", key)
+ }
+ return m.NativeToValue(value)
+}
+
+func (m *jsonStruct) Iterator() traits.Iterator {
+ f := m.GetFields()
+ keys := make([]string, len(m.GetFields()))
+ i := 0
+ for k := range f {
+ keys[i] = k
+ i++
+ }
+ return &jsonValueMapIterator{
+ baseIterator: &baseIterator{},
+ len: len(keys),
+ mapKeys: keys}
+}
+
+func (m *jsonStruct) Size() ref.Val {
+ return Int(len(m.GetFields()))
+}
+
+func (m *jsonStruct) Type() ref.Type {
+ return MapType
+}
+
+func (m *jsonStruct) Value() interface{} {
+ return m.Struct
+}
+
+type jsonValueMapIterator struct {
+ *baseIterator
+ cursor int
+ len int
+ mapKeys []string
+}
+
+func (it *jsonValueMapIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+func (it *jsonValueMapIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ return String(it.mapKeys[index])
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/json_value.go b/vendor/github.com/google/cel-go/common/types/json_value.go
new file mode 100644
index 00000000000..e082dc6a282
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/json_value.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ "github.com/golang/protobuf/ptypes/struct"
+)
+
+// jsonValueType constant representing the reflected type of a protobuf Value.
+var jsonValueType = reflect.TypeOf(&structpb.Value{})
diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go
new file mode 100644
index 00000000000..e701a1a11e1
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/list.go
@@ -0,0 +1,450 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ // ListType singleton.
+ ListType = NewTypeValue("list",
+ traits.AdderType,
+ traits.ContainerType,
+ traits.IndexerType,
+ traits.IterableType,
+ traits.SizerType)
+)
+
+// NewDynamicList returns a traits.Lister with heterogenous elements.
+// value should be an array of "native" types, i.e. any type that
+// NativeToValue() can convert to a ref.Val.
+func NewDynamicList(adapter ref.TypeAdapter, value interface{}) traits.Lister {
+ return &baseList{
+ TypeAdapter: adapter,
+ value: value,
+ refValue: reflect.ValueOf(value)}
+}
+
+// NewStringList returns a traits.Lister containing only strings.
+func NewStringList(adapter ref.TypeAdapter, elems []string) traits.Lister {
+ return &stringList{
+ baseList: NewDynamicList(adapter, elems).(*baseList),
+ elems: elems}
+}
+
+// NewValueList returns a traits.Lister with ref.Val elements.
+func NewValueList(adapter ref.TypeAdapter, elems []ref.Val) traits.Lister {
+ return &valueList{
+ baseList: NewDynamicList(adapter, elems).(*baseList),
+ elems: elems}
+}
+
+// baseList points to a list containing elements of any type.
+// The `value` is an array of native values, and refValue is its reflection object.
+// The `ref.TypeAdapter` enables native type to CEL type conversions.
+type baseList struct {
+ ref.TypeAdapter
+ value interface{}
+ refValue reflect.Value
+}
+
+func (l *baseList) Add(other ref.Val) ref.Val {
+ if other.Type() != ListType {
+ return ValOrErr(other, "no such overload")
+ }
+ if l.Size() == IntZero {
+ return other
+ }
+ if other.(traits.Sizer).Size() == IntZero {
+ return l
+ }
+ return &concatList{
+ TypeAdapter: l.TypeAdapter,
+ prevList: l,
+ nextList: other.(traits.Lister)}
+}
+
+func (l *baseList) Contains(elem ref.Val) ref.Val {
+ for i := Int(0); i < l.Size().(Int); i++ {
+ if l.Get(i).Equal(elem) == True {
+ return True
+ }
+ }
+ return False
+}
+
+func (l *baseList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ // JSON conversions are a special case since the 'native' type is a proto message.
+ if typeDesc == jsonValueType || typeDesc == jsonListValueType {
+ jsonValues, err :=
+ l.ConvertToNative(reflect.TypeOf([]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)}
+ if typeDesc == jsonListValueType {
+ return jsonList, nil
+ }
+ return &structpb.Value{Kind: &structpb.Value_ListValue{ListValue: jsonList}}, nil
+ }
+
+ // If the list is already assignable to the desired type return it.
+ if reflect.TypeOf(l).AssignableTo(typeDesc) {
+ return l, nil
+ }
+
+ // Non-list conversion.
+ if typeDesc.Kind() != reflect.Slice && typeDesc.Kind() != reflect.Array {
+ return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc)
+ }
+
+ // List conversion.
+ thisType := l.refValue.Type()
+ thisElem := thisType.Elem()
+ thisElemKind := thisElem.Kind()
+
+ otherElem := typeDesc.Elem()
+ otherElemKind := otherElem.Kind()
+ if otherElemKind == thisElemKind {
+ return l.value, nil
+ }
+ // Allow the element ConvertToNative() function to determine whether conversion is possible.
+ elemCount := int(l.Size().(Int))
+ nativeList := reflect.MakeSlice(typeDesc, elemCount, elemCount)
+ for i := 0; i < elemCount; i++ {
+ elem := l.Get(Int(i))
+ nativeElemVal, err := elem.ConvertToNative(otherElem)
+ if err != nil {
+ return nil, err
+ }
+ nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal))
+ }
+ return nativeList.Interface(), nil
+}
+
+func (l *baseList) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case ListType:
+ return l
+ case TypeType:
+ return ListType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
+}
+
+func (l *baseList) Equal(other ref.Val) ref.Val {
+ if ListType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ otherList := other.(traits.Lister)
+ if l.Size() != otherList.Size() {
+ return False
+ }
+ for i := IntZero; i < l.Size().(Int); i++ {
+ thisElem := l.Get(i)
+ otherElem := otherList.Get(i)
+ elemEq := thisElem.Equal(otherElem)
+ if elemEq == False || IsUnknownOrError(elemEq) {
+ return elemEq
+ }
+ }
+ return True
+}
+
+func (l *baseList) Get(index ref.Val) ref.Val {
+ if index.Type() != IntType {
+ return ValOrErr(index, "unsupported index type '%s' in list", index.Type())
+ }
+ i := index.(Int)
+ if i < 0 || i >= l.Size().(Int) {
+ return NewErr("index '%d' out of range in list size '%d'", i, l.Size())
+ }
+ elem := l.refValue.Index(int(i)).Interface()
+ return l.NativeToValue(elem)
+}
+
+func (l *baseList) Iterator() traits.Iterator {
+ return &listIterator{
+ baseIterator: &baseIterator{},
+ listValue: l,
+ cursor: 0,
+ len: l.Size().(Int)}
+}
+
+func (l *baseList) Size() ref.Val {
+ return Int(l.refValue.Len())
+}
+
+func (l *baseList) Type() ref.Type {
+ return ListType
+}
+
+func (l *baseList) Value() interface{} {
+ return l.value
+}
+
+// concatList combines two list implementations together into a view.
+// The `ref.TypeAdapter` enables native type to CEL type conversions.
+type concatList struct {
+ ref.TypeAdapter
+ value interface{}
+ prevList traits.Lister
+ nextList traits.Lister
+}
+
+func (l *concatList) Add(other ref.Val) ref.Val {
+ if other.Type() != ListType {
+ return ValOrErr(other, "no such overload")
+ }
+ if l.Size() == IntZero {
+ return other
+ }
+ if other.(traits.Sizer).Size() == IntZero {
+ return l
+ }
+ return &concatList{
+ TypeAdapter: l.TypeAdapter,
+ prevList: l,
+ nextList: other.(traits.Lister)}
+}
+
+func (l *concatList) Contains(elem ref.Val) ref.Val {
+ return Bool(l.prevList.Contains(elem) == True ||
+ l.nextList.Contains(elem) == True)
+}
+
+func (l *concatList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ combined := &baseList{
+ TypeAdapter: l.TypeAdapter,
+ value: l.Value(),
+ refValue: reflect.ValueOf(l.Value())}
+ return combined.ConvertToNative(typeDesc)
+}
+
+func (l *concatList) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case ListType:
+ return l
+ case TypeType:
+ return ListType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal)
+}
+
+func (l *concatList) Equal(other ref.Val) ref.Val {
+ if ListType != other.Type() {
+ return False
+ }
+ otherList := other.(traits.Lister)
+ if l.Size() != otherList.Size() {
+ return False
+ }
+ for i := IntZero; i < l.Size().(Int); i++ {
+ thisElem := l.Get(i)
+ otherElem := otherList.Get(i)
+ if thisElem.Equal(otherElem) != True {
+ return False
+ }
+ }
+ return True
+}
+
+func (l *concatList) Get(index ref.Val) ref.Val {
+ if index.Type() != IntType {
+ return ValOrErr(index, "unsupported index type '%s' in list", index.Type())
+ }
+ i := index.(Int)
+ if i < l.prevList.Size().(Int) {
+ return l.prevList.Get(i)
+ }
+ offset := i - l.prevList.Size().(Int)
+ return l.nextList.Get(offset)
+}
+
+func (l *concatList) Iterator() traits.Iterator {
+ return &listIterator{
+ baseIterator: &baseIterator{},
+ listValue: l,
+ cursor: 0,
+ len: l.Size().(Int)}
+}
+
+func (l *concatList) Size() ref.Val {
+ return l.prevList.Size().(Int).Add(l.nextList.Size())
+}
+
+func (l *concatList) Type() ref.Type {
+ return ListType
+}
+
+func (l *concatList) Value() interface{} {
+ if l.value == nil {
+ prevVal := reflect.ValueOf(l.prevList.Value())
+ nextVal := reflect.ValueOf(l.nextList.Value())
+ merged := make([]interface{}, l.Size().(Int), l.Size().(Int))
+ prevLen := int(l.prevList.Size().(Int))
+ for i := 0; i < prevLen; i++ {
+ merged[i] = prevVal.Index(i).Interface()
+ }
+ for j := 0; j < int(l.nextList.Size().(Int)); j++ {
+ merged[prevLen+j] = nextVal.Index(j).Interface()
+ }
+ l.value = merged
+ }
+ return l.value
+}
+
+// stringList is a specialization of the traits.Lister interface which is
+// present to demonstrate the ability to specialize Lister implementations.
+type stringList struct {
+ *baseList
+ elems []string
+}
+
+func (l *stringList) Add(other ref.Val) ref.Val {
+ if other.Type() != ListType {
+ return ValOrErr(other, "no such overload")
+ }
+ if l.Size() == IntZero {
+ return other
+ }
+ if other.(traits.Sizer).Size() == IntZero {
+ return l
+ }
+ switch other.(type) {
+ case *stringList:
+ concatElems := append(l.elems, other.(*stringList).elems...)
+ return NewStringList(l.TypeAdapter, concatElems)
+ }
+ return &concatList{
+ TypeAdapter: l.TypeAdapter,
+ prevList: l.baseList,
+ nextList: other.(traits.Lister)}
+}
+
+func (l *stringList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ switch typeDesc.Kind() {
+ case reflect.Array, reflect.Slice:
+ if typeDesc.Elem().Kind() == reflect.String {
+ return l.elems, nil
+ }
+ case reflect.Ptr:
+ if typeDesc == jsonValueType || typeDesc == jsonListValueType {
+ elemCount := len(l.elems)
+ listVals := make([]*structpb.Value, elemCount, elemCount)
+ for i := 0; i < elemCount; i++ {
+ listVals[i] = &structpb.Value{
+ Kind: &structpb.Value_StringValue{StringValue: l.elems[i]}}
+ }
+ jsonList := &structpb.ListValue{Values: listVals}
+ if typeDesc == jsonListValueType {
+ return jsonList, nil
+ }
+ return &structpb.Value{
+ Kind: &structpb.Value_ListValue{
+ ListValue: jsonList}}, nil
+ }
+ }
+ // If the list is already assignable to the desired type return it.
+ if reflect.TypeOf(l).AssignableTo(typeDesc) {
+ return l, nil
+ }
+ return nil, fmt.Errorf("no conversion found from list type to native type."+
+ " list elem: string, native type: %v", typeDesc)
+}
+
+func (l *stringList) Get(index ref.Val) ref.Val {
+ if index.Type() != IntType {
+ return ValOrErr(index, "unsupported index type '%s' in list", index.Type())
+ }
+ i := index.(Int)
+ if i < 0 || i >= l.Size().(Int) {
+ return NewErr("index '%d' out of range in list size '%d'", i, l.Size())
+ }
+ return String(l.elems[i])
+}
+
+func (l *stringList) Size() ref.Val {
+ return Int(len(l.elems))
+}
+
+// valueList is a specialization of traits.Lister for ref.Val.
+type valueList struct {
+ *baseList
+ elems []ref.Val
+}
+
+func (l *valueList) Add(other ref.Val) ref.Val {
+ if other.Type() != ListType {
+ return ValOrErr(other, "no such overload")
+ }
+ return &concatList{
+ TypeAdapter: l.TypeAdapter,
+ prevList: l,
+ nextList: other.(traits.Lister)}
+}
+
+func (l *valueList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ natives := make([]interface{}, len(l.elems))
+ for _, v := range l.elems {
+ n, e := v.ConvertToNative(typeDesc)
+ if e != nil {
+ return nil, e
+ }
+ natives = append(natives, n)
+ }
+ return natives, nil
+}
+
+func (l *valueList) Get(index ref.Val) ref.Val {
+ if index.Type() != IntType {
+ return ValOrErr(index, "unsupported index type '%s' in list", index.Type())
+ }
+ i := index.(Int)
+ if i < 0 || i >= l.Size().(Int) {
+ return NewErr("index '%d' out of range in list size '%d'", i, l.Size())
+ }
+ return l.elems[i]
+}
+
+func (l *valueList) Size() ref.Val {
+ return Int(len(l.elems))
+}
+
+type listIterator struct {
+ *baseIterator
+ listValue traits.Lister
+ cursor Int
+ len Int
+}
+
+func (it *listIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+func (it *listIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ return it.listValue.Get(index)
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go
new file mode 100644
index 00000000000..3213ec31b1a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/map.go
@@ -0,0 +1,263 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// baseMap is a reflection based map implementation designed to handle a variety of map-like types.
+type baseMap struct {
+ ref.TypeAdapter
+ value interface{}
+ refValue reflect.Value
+}
+
+// stringMap is a specialization to improve the performance of simple key, value pair lookups by
+// string as this is the most common usage of maps.
+type stringMap struct {
+ *baseMap
+ mapStrStr map[string]string
+}
+
+// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
+func NewDynamicMap(adapter ref.TypeAdapter, value interface{}) traits.Mapper {
+ return &baseMap{
+ TypeAdapter: adapter,
+ value: value,
+ refValue: reflect.ValueOf(value)}
+}
+
+// NewStringStringMap returns a specialized traits.Mapper with string keys and values.
+func NewStringStringMap(value map[string]string) traits.Mapper {
+ return &stringMap{
+ baseMap: &baseMap{value: value},
+ mapStrStr: value,
+ }
+}
+
+var (
+ // MapType singleton.
+ MapType = NewTypeValue("map",
+ traits.ContainerType,
+ traits.IndexerType,
+ traits.IterableType,
+ traits.SizerType)
+)
+
+func (m *baseMap) Contains(index ref.Val) ref.Val {
+ return Bool(m.Get(index).Type() != ErrType)
+}
+
+func (m *baseMap) ConvertToNative(refType reflect.Type) (interface{}, error) {
+ // JSON conversion.
+ if refType == jsonValueType || refType == jsonStructType {
+ jsonEntries, err :=
+ m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{}))
+ if err != nil {
+ return nil, err
+ }
+ jsonMap := &structpb.Struct{
+ Fields: jsonEntries.(map[string]*structpb.Value)}
+ if refType == jsonStructType {
+ return jsonMap, nil
+ }
+ return &structpb.Value{
+ Kind: &structpb.Value_StructValue{
+ StructValue: jsonMap}}, nil
+ }
+
+ // Non-map conversion.
+ if refType.Kind() != reflect.Map {
+ return nil, fmt.Errorf("type conversion error from map to '%v'", refType)
+ }
+
+ // Map conversion.
+ thisType := m.refValue.Type()
+ thisKey := thisType.Key()
+ thisKeyKind := thisKey.Kind()
+ thisElem := thisType.Elem()
+ thisElemKind := thisElem.Kind()
+
+ otherKey := refType.Key()
+ otherKeyKind := otherKey.Kind()
+ otherElem := refType.Elem()
+ otherElemKind := otherElem.Kind()
+
+ if otherKeyKind == thisKeyKind && otherElemKind == thisElemKind {
+ return m.value, nil
+ }
+ elemCount := m.Size().(Int)
+ nativeMap := reflect.MakeMapWithSize(refType, int(elemCount))
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ refKeyValue, err := key.ConvertToNative(otherKey)
+ if err != nil {
+ return nil, err
+ }
+ refElemValue, err := m.Get(key).ConvertToNative(otherElem)
+ if err != nil {
+ return nil, err
+ }
+ nativeMap.SetMapIndex(
+ reflect.ValueOf(refKeyValue),
+ reflect.ValueOf(refElemValue))
+ }
+ return nativeMap.Interface(), nil
+}
+
+func (m *stringMap) ConvertToNative(refType reflect.Type) (interface{}, error) {
+ if !m.baseMap.refValue.IsValid() {
+ m.baseMap.refValue = reflect.ValueOf(m.value)
+ }
+ return m.baseMap.ConvertToNative(refType)
+}
+
+func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case MapType:
+ return m
+ case TypeType:
+ return MapType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal)
+}
+
+func (m *baseMap) Equal(other ref.Val) ref.Val {
+ if MapType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ otherMap := other.(traits.Mapper)
+ if m.Size() != otherMap.Size() {
+ return False
+ }
+ it := m.Iterator()
+ for it.HasNext() == True {
+ key := it.Next()
+ if otherVal := otherMap.Get(key); IsError(otherVal) {
+ return False
+ } else if thisVal := m.Get(key); IsError(thisVal) {
+ return False
+ } else {
+ valEq := thisVal.Equal(otherVal)
+ if valEq == False || IsUnknownOrError(valEq) {
+ return valEq
+ }
+ }
+ }
+ return True
+}
+
+func (m *stringMap) Equal(other ref.Val) ref.Val {
+ if !m.baseMap.refValue.IsValid() {
+ m.baseMap.refValue = reflect.ValueOf(m.value)
+ }
+ return m.baseMap.Equal(other)
+}
+
+func (m *baseMap) Get(key ref.Val) ref.Val {
+ // TODO: There are multiple reasons why a Get could fail. Typically, this is because the key
+ // does not exist in the map; however, it's possible that the value cannot be converted to
+ // the desired type. Refine this strategy to disambiguate these cases.
+ thisKeyType := m.refValue.Type().Key()
+ nativeKey, err := key.ConvertToNative(thisKeyType)
+ if err != nil {
+ return &Err{err}
+ }
+ nativeKeyVal := reflect.ValueOf(nativeKey)
+ if !nativeKeyVal.Type().AssignableTo(thisKeyType) {
+ return NewErr("no such key: '%v'", nativeKey)
+ }
+ value := m.refValue.MapIndex(nativeKeyVal)
+ if !value.IsValid() {
+ return NewErr("no such key: '%v'", nativeKey)
+ }
+ return m.NativeToValue(value.Interface())
+}
+
+func (m *stringMap) Get(key ref.Val) ref.Val {
+ strKey, ok := key.(String)
+ if !ok {
+ return ValOrErr(key, "no such key: %v", key)
+ }
+ val, found := m.mapStrStr[string(strKey)]
+ if !found {
+ return NewErr("no such key: %s", key)
+ }
+ return String(val)
+}
+
+func (m *baseMap) Iterator() traits.Iterator {
+ mapKeys := m.refValue.MapKeys()
+ return &mapIterator{
+ baseIterator: &baseIterator{},
+ TypeAdapter: m.TypeAdapter,
+ mapValue: m,
+ mapKeys: mapKeys,
+ cursor: 0,
+ len: int(m.Size().(Int))}
+}
+
+func (m *stringMap) Iterator() traits.Iterator {
+ if !m.baseMap.refValue.IsValid() {
+ m.baseMap.refValue = reflect.ValueOf(m.value)
+ }
+ return m.baseMap.Iterator()
+}
+
+func (m *baseMap) Size() ref.Val {
+ return Int(m.refValue.Len())
+}
+
+func (m *stringMap) Size() ref.Val {
+ return Int(len(m.mapStrStr))
+}
+
+func (m *baseMap) Type() ref.Type {
+ return MapType
+}
+
+func (m *baseMap) Value() interface{} {
+ return m.value
+}
+
+type mapIterator struct {
+ *baseIterator
+ ref.TypeAdapter
+ mapValue traits.Mapper
+ mapKeys []reflect.Value
+ cursor int
+ len int
+}
+
+func (it *mapIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.len)
+}
+
+func (it *mapIterator) Next() ref.Val {
+ if it.HasNext() == True {
+ index := it.cursor
+ it.cursor++
+ refKey := it.mapKeys[index]
+ return it.NativeToValue(refKey.Interface())
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go
new file mode 100644
index 00000000000..d75e0105c46
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/null.go
@@ -0,0 +1,89 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Null type implementation.
+type Null structpb.NullValue
+
+var (
+ // NullType singleton.
+ NullType = NewTypeValue("null_type")
+ // NullValue singleton.
+ NullValue = Null(structpb.NullValue_NULL_VALUE)
+)
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (n Null) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ switch typeDesc.Kind() {
+ case reflect.Ptr:
+ switch typeDesc {
+ case jsonValueType:
+ return &structpb.Value{
+ Kind: &structpb.Value_NullValue{
+ NullValue: structpb.NullValue_NULL_VALUE}}, nil
+ case anyValueType:
+ pb, err := n.ConvertToNative(jsonValueType)
+ if err != nil {
+ return nil, err
+ }
+ return ptypes.MarshalAny(pb.(proto.Message))
+ }
+ case reflect.Interface:
+ if reflect.TypeOf(n).Implements(typeDesc) {
+ return n, nil
+ }
+ }
+ // By default return 'null'.
+ // TODO: determine whether there are other valid conversions for `null`.
+ return structpb.NullValue_NULL_VALUE, nil
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (n Null) ConvertToType(typeVal ref.Type) ref.Val {
+ if typeVal == StringType {
+ return String("null")
+ }
+ if typeVal == NullType {
+ return n
+ }
+ return NewErr("type conversion error from '%s' to '%s'", NullType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (n Null) Equal(other ref.Val) ref.Val {
+ if NullType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return True
+}
+
+// Type implements ref.Val.Type.
+func (n Null) Type() ref.Type {
+ return NullType
+}
+
+// Value implements ref.Val.Value.
+func (n Null) Value() interface{} {
+ return structpb.NullValue_NULL_VALUE
+}
diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go
new file mode 100644
index 00000000000..a3e2a56cdff
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/object.go
@@ -0,0 +1,196 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+type protoObj struct {
+ ref.TypeAdapter
+ value proto.Message
+ refValue reflect.Value
+ typeDesc *pb.TypeDescription
+ typeValue *TypeValue
+ isAny bool
+}
+
+// NewObject returns an object based on a proto.Message value which handles
+// conversion between protobuf type values and expression type values.
+// Objects support indexing and iteration.
+// Note: only uses default Db.
+func NewObject(adapter ref.TypeAdapter,
+ typeDesc *pb.TypeDescription,
+ value proto.Message) ref.Val {
+ return &protoObj{
+ TypeAdapter: adapter,
+ value: value,
+ refValue: reflect.ValueOf(value),
+ typeDesc: typeDesc,
+ typeValue: NewObjectTypeValue(typeDesc.Name())}
+}
+
+func (o *protoObj) ConvertToNative(refl reflect.Type) (interface{}, error) {
+ if refl.AssignableTo(o.refValue.Type()) {
+ return o.value, nil
+ }
+ if refl == anyValueType {
+ return ptypes.MarshalAny(o.Value().(proto.Message))
+ }
+ // If the object is already assignable to the desired type return it.
+ if reflect.TypeOf(o).AssignableTo(refl) {
+ return o, nil
+ }
+ return nil, fmt.Errorf("type conversion error from '%v' to '%v'",
+ o.refValue.Type(), refl)
+}
+
+func (o *protoObj) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ default:
+ if o.Type().TypeName() == typeVal.TypeName() {
+ return o
+ }
+ case TypeType:
+ return o.typeValue
+ }
+ return NewErr("type conversion error from '%s' to '%s'",
+ o.typeDesc.Name(), typeVal)
+}
+
+func (o *protoObj) Equal(other ref.Val) ref.Val {
+ if o.typeDesc.Name() != other.Type().TypeName() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Bool(proto.Equal(o.value, other.Value().(proto.Message)))
+}
+
+// IsSet tests whether a field which is defined is set to a non-default value.
+func (o *protoObj) IsSet(field ref.Val) ref.Val {
+ if field.Type() != StringType {
+ return ValOrErr(field, "illegal object field type '%s'", field.Type())
+ }
+ protoFieldName := string(field.(String))
+ if f, found := o.typeDesc.FieldByName(protoFieldName); found {
+ if !f.IsOneof() {
+ return isFieldSet(o.refValue.Elem().Field(f.Index()))
+ }
+
+ getter := o.refValue.MethodByName(f.GetterName())
+ if getter.IsValid() {
+ refField := getter.Call([]reflect.Value{})[0]
+ if refField.IsValid() {
+ return isFieldSet(refField)
+ }
+ }
+ }
+ return NewErr("no such field '%s'", field)
+}
+
+func (o *protoObj) Get(index ref.Val) ref.Val {
+ if index.Type() != StringType {
+ return ValOrErr(index, "illegal object field type '%s'", index.Type())
+ }
+ protoFieldName := string(index.(String))
+ if f, found := o.typeDesc.FieldByName(protoFieldName); found {
+ if !f.IsOneof() {
+ return getOrDefaultInstance(o.TypeAdapter, o.refValue.Elem().Field(f.Index()))
+ }
+
+ getter := o.refValue.MethodByName(f.GetterName())
+ if getter.IsValid() {
+ refField := getter.Call([]reflect.Value{})[0]
+ if refField.IsValid() {
+ return getOrDefaultInstance(o.TypeAdapter, refField)
+ }
+ }
+ }
+ return NewErr("no such field '%s'", index)
+}
+
+func (o *protoObj) Iterator() traits.Iterator {
+ return &msgIterator{
+ baseIterator: &baseIterator{},
+ refValue: o.refValue,
+ typeDesc: o.typeDesc,
+ cursor: 0}
+}
+
+func (o *protoObj) Type() ref.Type {
+ return o.typeValue
+}
+
+func (o *protoObj) Value() interface{} {
+ return o.value
+}
+
+type msgIterator struct {
+ *baseIterator
+ refValue reflect.Value
+ typeDesc *pb.TypeDescription
+ cursor int
+ len int
+}
+
+func (it *msgIterator) HasNext() ref.Val {
+ return Bool(it.cursor < it.typeDesc.FieldCount())
+}
+
+func (it *msgIterator) Next() ref.Val {
+ if it.HasNext() == False {
+ return nil
+ }
+ fieldName, _ := it.typeDesc.FieldNameAtIndex(it.cursor, it.refValue)
+ it.cursor++
+ return String(fieldName)
+}
+
+var (
+ protoDefaultInstanceMap = make(map[reflect.Type]ref.Val)
+)
+
+func isFieldSet(refVal reflect.Value) ref.Val {
+ if refVal.Kind() == reflect.Ptr && refVal.IsNil() {
+ return False
+ }
+ return True
+}
+
+func getOrDefaultInstance(adapter ref.TypeAdapter, refVal reflect.Value) ref.Val {
+ if isFieldSet(refVal) == True {
+ value := refVal.Interface()
+ return adapter.NativeToValue(value)
+ }
+ return getDefaultInstance(adapter, refVal.Type())
+}
+
+func getDefaultInstance(adapter ref.TypeAdapter, refType reflect.Type) ref.Val {
+ if refType.Kind() == reflect.Ptr {
+ refType = refType.Elem()
+ }
+ if defaultValue, found := protoDefaultInstanceMap[refType]; found {
+ return defaultValue
+ }
+ defaultValue := adapter.NativeToValue(reflect.New(refType).Interface())
+ protoDefaultInstanceMap[refType] = defaultValue
+ return defaultValue
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/checked.go b/vendor/github.com/google/cel-go/common/types/pb/checked.go
new file mode 100644
index 00000000000..10d13176fc2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/checked.go
@@ -0,0 +1,90 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ emptypb "github.com/golang/protobuf/ptypes/empty"
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+var (
+ // CheckedPrimitives map from proto field descriptor type to expr.Type.
+ CheckedPrimitives = map[descpb.FieldDescriptorProto_Type]*exprpb.Type{
+ descpb.FieldDescriptorProto_TYPE_BOOL: checkedBool,
+ descpb.FieldDescriptorProto_TYPE_BYTES: checkedBytes,
+ descpb.FieldDescriptorProto_TYPE_DOUBLE: checkedDouble,
+ descpb.FieldDescriptorProto_TYPE_FLOAT: checkedDouble,
+ descpb.FieldDescriptorProto_TYPE_INT32: checkedInt,
+ descpb.FieldDescriptorProto_TYPE_INT64: checkedInt,
+ descpb.FieldDescriptorProto_TYPE_SINT32: checkedInt,
+ descpb.FieldDescriptorProto_TYPE_SINT64: checkedInt,
+ descpb.FieldDescriptorProto_TYPE_UINT32: checkedUint,
+ descpb.FieldDescriptorProto_TYPE_UINT64: checkedUint,
+ descpb.FieldDescriptorProto_TYPE_FIXED32: checkedUint,
+ descpb.FieldDescriptorProto_TYPE_FIXED64: checkedUint,
+ descpb.FieldDescriptorProto_TYPE_STRING: checkedString}
+
+ // CheckedWellKnowns map from qualified proto type name to expr.Type for
+ // well-known proto types.
+ CheckedWellKnowns = map[string]*exprpb.Type{
+ // Wrapper types.
+ "google.protobuf.BoolValue": checkedWrap(checkedBool),
+ "google.protobuf.BytesValue": checkedWrap(checkedBytes),
+ "google.protobuf.DoubleValue": checkedWrap(checkedDouble),
+ "google.protobuf.FloatValue": checkedWrap(checkedDouble),
+ "google.protobuf.Int64Value": checkedWrap(checkedInt),
+ "google.protobuf.Int32Value": checkedWrap(checkedInt),
+ "google.protobuf.UInt64Value": checkedWrap(checkedUint),
+ "google.protobuf.UInt32Value": checkedWrap(checkedUint),
+ "google.protobuf.StringValue": checkedWrap(checkedString),
+ // Well-known types.
+ "google.protobuf.Any": checkedAny,
+ "google.protobuf.Duration": checkedDuration,
+ "google.protobuf.Timestamp": checkedTimestamp,
+ // Json types.
+ "google.protobuf.ListValue": checkedListDyn,
+ "google.protobuf.NullValue": checkedNull,
+ "google.protobuf.Struct": checkedMapStringDyn,
+ "google.protobuf.Value": checkedDyn,
+ }
+
+ // common types
+ checkedDyn = &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}}
+ // Wrapper and primitive types.
+ checkedBool = checkedPrimitive(exprpb.Type_BOOL)
+ checkedBytes = checkedPrimitive(exprpb.Type_BYTES)
+ checkedDouble = checkedPrimitive(exprpb.Type_DOUBLE)
+ checkedInt = checkedPrimitive(exprpb.Type_INT64)
+ checkedString = checkedPrimitive(exprpb.Type_STRING)
+ checkedUint = checkedPrimitive(exprpb.Type_UINT64)
+ // Well-known type equivalents.
+ checkedAny = checkedWellKnown(exprpb.Type_ANY)
+ checkedDuration = checkedWellKnown(exprpb.Type_DURATION)
+ checkedTimestamp = checkedWellKnown(exprpb.Type_TIMESTAMP)
+ // Json-based type equivalents.
+ checkedNull = &exprpb.Type{
+ TypeKind: &exprpb.Type_Null{
+ Null: structpb.NullValue_NULL_VALUE}}
+ checkedListDyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{ElemType: checkedDyn}}}
+ checkedMapStringDyn = &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: checkedString,
+ ValueType: checkedDyn}}}
+)
diff --git a/vendor/github.com/google/cel-go/common/types/pb/enum.go b/vendor/github.com/google/cel-go/common/types/pb/enum.go
new file mode 100644
index 00000000000..6e122672a65
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/enum.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// EnumDescription maps a qualified enum name to its numeric value.
+type EnumDescription struct {
+ enumName string
+ file *FileDescription
+ desc *descpb.EnumValueDescriptorProto
+}
+
+// Name of the enum.
+func (ed *EnumDescription) Name() string {
+ return ed.enumName
+}
+
+// Value (numeric) of the enum.
+func (ed *EnumDescription) Value() int32 {
+ return ed.desc.GetNumber()
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/file.go b/vendor/github.com/google/cel-go/common/types/pb/file.go
new file mode 100644
index 00000000000..276e3690644
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/file.go
@@ -0,0 +1,112 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "fmt"
+
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// FileDescription holds a map of all types and enums declared within a .proto
+// file.
+type FileDescription struct {
+ pbdb *Db
+ desc *descpb.FileDescriptorProto
+ types map[string]*TypeDescription
+ enums map[string]*EnumDescription
+}
+
+// GetEnumDescription returns an EnumDescription for a qualified enum value
+// name declared within the .proto file.
+func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumDescription, error) {
+ if ed, found := fd.enums[sanitizeProtoName(enumName)]; found {
+ return ed, nil
+ }
+ return nil, fmt.Errorf("no such enum value '%s'", enumName)
+}
+
+// GetEnumNames returns the string names of all enum values in the file.
+func (fd *FileDescription) GetEnumNames() []string {
+ enumNames := make([]string, len(fd.enums))
+ i := 0
+ for _, e := range fd.enums {
+ enumNames[i] = e.Name()
+ i++
+ }
+ return enumNames
+}
+
+// GetTypeDescription returns a TypeDescription for a qualified type name
+// declared within the .proto file.
+func (fd *FileDescription) GetTypeDescription(typeName string) (*TypeDescription, error) {
+ if td, found := fd.types[sanitizeProtoName(typeName)]; found {
+ return td, nil
+ }
+ return nil, fmt.Errorf("no such type '%s'", typeName)
+}
+
+// GetTypeNames returns the list of all type names contained within the file.
+func (fd *FileDescription) GetTypeNames() []string {
+ typeNames := make([]string, len(fd.types))
+ i := 0
+ for _, t := range fd.types {
+ typeNames[i] = t.Name()
+ i++
+ }
+ return typeNames
+}
+
+// Package returns the file's qualified package name.
+func (fd *FileDescription) Package() string {
+ return fd.desc.GetPackage()
+}
+
+func (fd *FileDescription) indexEnums(pkg string, enumTypes []*descpb.EnumDescriptorProto) {
+ for _, enumType := range enumTypes {
+ for _, enumValue := range enumType.Value {
+ enumValueName := fmt.Sprintf(
+ "%s.%s.%s", pkg, enumType.GetName(), enumValue.GetName())
+ fd.enums[enumValueName] = &EnumDescription{
+ enumName: enumValueName,
+ file: fd,
+ desc: enumValue}
+ fd.pbdb.revFileDescriptorMap[enumValueName] = fd
+ }
+ }
+}
+
+func (fd *FileDescription) indexTypes(pkg string, msgTypes []*descpb.DescriptorProto) {
+ for _, msgType := range msgTypes {
+ msgName := fmt.Sprintf("%s.%s", pkg, msgType.GetName())
+ td := &TypeDescription{
+ typeName: msgName,
+ file: fd,
+ desc: msgType,
+ fields: make(map[string]*FieldDescription),
+ fieldIndices: make(map[int][]*FieldDescription)}
+ fd.types[msgName] = td
+ fd.indexTypes(msgName, msgType.NestedType)
+ fd.indexEnums(msgName, msgType.EnumType)
+ fd.pbdb.revFileDescriptorMap[msgName] = fd
+ }
+}
+
+func sanitizeProtoName(name string) string {
+ if name != "" && name[0] == '.' {
+ return name[1:]
+ }
+ return name
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/pb.go b/vendor/github.com/google/cel-go/common/types/pb/pb.go
new file mode 100644
index 00000000000..5ac58053221
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/pb.go
@@ -0,0 +1,148 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pb reflects over protocol buffer descriptors to generate objects
+// that simplify type, enum, and field lookup.
+package pb
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/golang/protobuf/descriptor"
+ "github.com/golang/protobuf/proto"
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ anypb "github.com/golang/protobuf/ptypes/any"
+ durpb "github.com/golang/protobuf/ptypes/duration"
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ tspb "github.com/golang/protobuf/ptypes/timestamp"
+ wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
+)
+
+// Db maps from file / message / enum name to file description.
+type Db struct {
+ revFileDescriptorMap map[string]*FileDescription
+}
+
+var (
+ // DefaultDb used at evaluation time or unless overridden at check time.
+ DefaultDb = &Db{
+ revFileDescriptorMap: make(map[string]*FileDescription),
+ }
+)
+
+// NewDb creates a new `pb.Db` with an empty type name to file description map.
+func NewDb() *Db {
+ pbdb := &Db{
+ revFileDescriptorMap: make(map[string]*FileDescription),
+ }
+ for k, v := range DefaultDb.revFileDescriptorMap {
+ pbdb.revFileDescriptorMap[k] = v
+ }
+ return pbdb
+}
+
+// RegisterDescriptor produces a `FileDescription` from a `FileDescriptorProto` and registers the
+// message and enum types into the `pb.Db`.
+func (pbdb *Db) RegisterDescriptor(fileDesc *descpb.FileDescriptorProto) (*FileDescription, error) {
+ fd, err := pbdb.describeFileInternal(fileDesc)
+ if err != nil {
+ return nil, err
+ }
+ pkg := fd.Package()
+ fd.indexTypes(pkg, fileDesc.MessageType)
+ fd.indexEnums(pkg, fileDesc.EnumType)
+ return fd, nil
+}
+
+// RegisterMessage produces a `FileDescription` from a `message` and registers the message and all
+// other definitions within the message file into the `pb.Db`.
+func (pbdb *Db) RegisterMessage(message proto.Message) (*FileDescription, error) {
+ typeName := sanitizeProtoName(proto.MessageName(message))
+ if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
+ return fd, nil
+ }
+ fileDesc, _ := descriptor.ForMessage(message.(descriptor.Message))
+ return pbdb.RegisterDescriptor(fileDesc)
+}
+
+// DescribeFile gets the `FileDescription` for the `message` type if it exists in the `pb.Db`.
+func (pbdb *Db) DescribeFile(message proto.Message) (*FileDescription, error) {
+ typeName := sanitizeProtoName(proto.MessageName(message))
+ if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
+ return fd, nil
+ }
+ return nil, fmt.Errorf("unrecognized proto type name '%s'", typeName)
+}
+
+// DescribeEnum takes a qualified enum name and returns an `EnumDescription` if it exists in the
+// `pb.Db`.
+func (pbdb *Db) DescribeEnum(enumName string) (*EnumDescription, error) {
+ enumName = sanitizeProtoName(enumName)
+ if fd, found := pbdb.revFileDescriptorMap[enumName]; found {
+ return fd.GetEnumDescription(enumName)
+ }
+ return nil, fmt.Errorf("unrecognized enum '%s'", enumName)
+}
+
+// DescribeType returns a `TypeDescription` for the `typeName` if it exists in the `pb.Db`.
+func (pbdb *Db) DescribeType(typeName string) (*TypeDescription, error) {
+ typeName = sanitizeProtoName(typeName)
+ if fd, found := pbdb.revFileDescriptorMap[typeName]; found {
+ return fd.GetTypeDescription(typeName)
+ }
+ return nil, fmt.Errorf("unrecognized type '%s'", typeName)
+}
+
+func (pbdb *Db) describeFileInternal(fileDesc *descpb.FileDescriptorProto) (*FileDescription, error) {
+ fd := &FileDescription{
+ pbdb: pbdb,
+ desc: fileDesc,
+ types: make(map[string]*TypeDescription),
+ enums: make(map[string]*EnumDescription)}
+ return fd, nil
+}
+
+func fileDescriptor(protoFileName string) (*descpb.FileDescriptorProto, error) {
+ gzipped := proto.FileDescriptor(protoFileName)
+ r, err := gzip.NewReader(bytes.NewReader(gzipped))
+ if err != nil {
+ return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+ }
+ unzipped, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+ }
+ fd := &descpb.FileDescriptorProto{}
+ if err := proto.Unmarshal(unzipped, fd); err != nil {
+ return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+ }
+ return fd, nil
+}
+
+func init() {
+ // Describe well-known types to ensure they can always be resolved by the check and interpret
+ // execution phases.
+ //
+ // The following subset of message types is enough to ensure that all well-known types can
+ // resolved in the runtime, since describing the value results in describing the whole file
+ // where the message is declared.
+ DefaultDb.RegisterMessage(&anypb.Any{})
+ DefaultDb.RegisterMessage(&durpb.Duration{})
+ DefaultDb.RegisterMessage(&tspb.Timestamp{})
+ DefaultDb.RegisterMessage(&structpb.Value{})
+ DefaultDb.RegisterMessage(&wrapperspb.BoolValue{})
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go
new file mode 100644
index 00000000000..0d6d743398c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/pb/type.go
@@ -0,0 +1,326 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pb
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// TypeDescription is a collection of type metadata relevant to expression
+// checking and evaluation.
+type TypeDescription struct {
+ typeName string
+ file *FileDescription
+ desc *descpb.DescriptorProto
+ fields map[string]*FieldDescription // fields by name (proto)
+ fieldIndices map[int][]*FieldDescription // fields by Go struct idx
+ fieldProperties *proto.StructProperties
+ refType *reflect.Type
+}
+
+// FieldCount returns the number of fields declared within the type.
+func (td *TypeDescription) FieldCount() int {
+ // Initialize the type's internal state.
+ var _, fieldIndices = td.getFieldsInfo()
+ // The number of keys in the field indices map corresponds to the number
+ // of fields on the proto message.
+ return len(fieldIndices)
+}
+
+// FieldByName returns the FieldDescription associated with a field name.
+func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) {
+ fieldMap, _ := td.getFieldsInfo()
+ fd, found := fieldMap[name]
+ return fd, found
+}
+
+// FieldNameAtIndex returns the field name at the specified index.
+//
+// For oneof field values, multiple fields may exist at the same index, so the
+// appropriate oneof getter's must be invoked in order to determine which of
+// oneof fields is currently set at the index.
+func (td *TypeDescription) FieldNameAtIndex(index int, refObj reflect.Value) (string, bool) {
+ fields := td.getFieldsAtIndex(index)
+ if len(fields) == 1 {
+ return fields[0].OrigName(), true
+ }
+ for _, fd := range fields {
+ if fd.IsOneof() {
+ getter := refObj.MethodByName(fd.GetterName())
+ if !getter.IsValid() {
+ continue
+ }
+ refField := getter.Call([]reflect.Value{})[0]
+ if refField.IsValid() && !refField.IsNil() {
+ return fd.OrigName(), true
+ }
+ }
+ }
+ return "", false
+}
+
+// Name of the type.
+func (td *TypeDescription) Name() string {
+ return td.typeName
+}
+
+// ReflectType returns the reflected struct type of the generated proto struct.
+func (td *TypeDescription) ReflectType() reflect.Type {
+ if td.refType == nil {
+ refType := proto.MessageType(td.Name())
+ if refType == nil {
+ return nil
+ }
+ td.refType = &refType
+ }
+ return *td.refType
+}
+
+func (td *TypeDescription) getFieldsInfo() (map[string]*FieldDescription,
+ map[int][]*FieldDescription) {
+ if len(td.fields) == 0 {
+ isProto3 := td.file.desc.GetSyntax() == "proto3"
+ fieldIndexMap := make(map[string]int)
+ fieldDescMap := make(map[string]*descpb.FieldDescriptorProto)
+ for i, f := range td.desc.Field {
+ fieldDescMap[f.GetName()] = f
+ fieldIndexMap[f.GetName()] = i
+ }
+ fieldProps := td.getFieldProperties()
+ if fieldProps != nil {
+ // This is a proper message type.
+ for i, prop := range fieldProps.Prop {
+ if strings.HasPrefix(prop.OrigName, "XXX_") {
+ // Book-keeping fields generated by protoc start with XXX_
+ continue
+ }
+ desc := fieldDescMap[prop.OrigName]
+ fd := &FieldDescription{
+ tdesc: td,
+ desc: desc,
+ index: i,
+ prop: prop,
+ proto3: isProto3}
+ td.fields[prop.OrigName] = fd
+ td.fieldIndices[i] = append(td.fieldIndices[i], fd)
+ }
+ for _, oneofProp := range fieldProps.OneofTypes {
+ desc := fieldDescMap[oneofProp.Prop.OrigName]
+ fd := &FieldDescription{
+ tdesc: td,
+ desc: desc,
+ index: oneofProp.Field,
+ prop: oneofProp.Prop,
+ oneofProp: oneofProp,
+ proto3: isProto3}
+ td.fields[oneofProp.Prop.OrigName] = fd
+ td.fieldIndices[oneofProp.Field] = append(td.fieldIndices[oneofProp.Field], fd)
+ }
+ } else {
+ for fieldName, desc := range fieldDescMap {
+ fd := &FieldDescription{
+ tdesc: td,
+ desc: desc,
+ index: int(desc.GetNumber()),
+ proto3: isProto3}
+ td.fields[fieldName] = fd
+ index := fieldIndexMap[fieldName]
+ td.fieldIndices[index] = append(td.fieldIndices[index], fd)
+ }
+ }
+ }
+ return td.fields, td.fieldIndices
+}
+
+func (td *TypeDescription) getFieldProperties() *proto.StructProperties {
+ if td.fieldProperties == nil {
+ refType := td.ReflectType()
+ if refType == nil {
+ return nil
+ }
+ if refType.Kind() == reflect.Ptr {
+ refType = refType.Elem()
+ }
+ if refType.Kind() == reflect.Struct {
+ td.fieldProperties = proto.GetProperties(refType)
+ }
+ }
+ return td.fieldProperties
+}
+
+func (td *TypeDescription) getFieldsAtIndex(i int) []*FieldDescription {
+ _, fieldIndices := td.getFieldsInfo()
+ return fieldIndices[i]
+}
+
+// FieldDescription holds metadata related to fields declared within a type.
+type FieldDescription struct {
+ tdesc *TypeDescription
+ desc *descpb.FieldDescriptorProto
+ index int
+ prop *proto.Properties
+ oneofProp *proto.OneofProperties
+ proto3 bool
+}
+
+// CheckedType returns the type-definition used at type-check time.
+func (fd *FieldDescription) CheckedType() *exprpb.Type {
+ if fd.IsMap() {
+ td, _ := fd.tdesc.file.pbdb.DescribeType(fd.TypeName())
+ key := td.getFieldsAtIndex(0)[0]
+ val := td.getFieldsAtIndex(1)[0]
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MapType_{
+ MapType: &exprpb.Type_MapType{
+ KeyType: key.typeDefToType(),
+ ValueType: val.typeDefToType()}}}
+ }
+ if fd.IsRepeated() {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_ListType_{
+ ListType: &exprpb.Type_ListType{
+ ElemType: fd.typeDefToType()}}}
+ }
+ return fd.typeDefToType()
+}
+
+// GetterName returns the accessor method name associated with the field
+// on the proto generated struct.
+func (fd *FieldDescription) GetterName() string {
+ return fmt.Sprintf("Get%s", fd.prop.Name)
+}
+
+// Index returns the field index within a reflected value.
+func (fd *FieldDescription) Index() int {
+ return fd.index
+}
+
+// IsEnum returns true if the field type refers to an enum value.
+func (fd *FieldDescription) IsEnum() bool {
+ return fd.desc.GetType() == descpb.FieldDescriptorProto_TYPE_ENUM
+}
+
+// IsOneof returns true if the field is declared within a oneof block.
+func (fd *FieldDescription) IsOneof() bool {
+ if fd.desc != nil {
+ return fd.desc.OneofIndex != nil
+ }
+ return fd.oneofProp != nil
+}
+
+// OneofType returns the reflect.Type value of a oneof field.
+//
+// Oneof field values are wrapped in a struct which contains one field whose
+// value is a proto.Message.
+func (fd *FieldDescription) OneofType() reflect.Type {
+ return fd.oneofProp.Type
+}
+
+// IsMap returns true if the field is of map type.
+func (fd *FieldDescription) IsMap() bool {
+ if !fd.IsRepeated() || !fd.IsMessage() {
+ return false
+ }
+ td, err := fd.tdesc.file.pbdb.DescribeType(fd.TypeName())
+ if err != nil {
+ return false
+ }
+ return td.desc.GetOptions().GetMapEntry()
+}
+
+// IsMessage returns true if the field is of message type.
+func (fd *FieldDescription) IsMessage() bool {
+ return fd.desc.GetType() == descpb.FieldDescriptorProto_TYPE_MESSAGE
+}
+
+// IsRepeated returns true if the field is a repeated value.
+//
+// This method will also return true for map values, so check whether the
+// field is also a map.
+func (fd *FieldDescription) IsRepeated() bool {
+ return *fd.desc.Label == descpb.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// OrigName returns the snake_case name of the field as it was declared within
+// the proto. This is the same name format that is expected within expressions.
+func (fd *FieldDescription) OrigName() string {
+ if fd.desc != nil && fd.desc.Name != nil {
+ return *fd.desc.Name
+ }
+ return fd.prop.OrigName
+}
+
+// Name returns the CamelCase name of the field within the proto-based struct.
+func (fd *FieldDescription) Name() string {
+ return fd.prop.Name
+}
+
+// SupportsPresence returns true if the field supports presence detection.
+func (fd *FieldDescription) SupportsPresence() bool {
+ return !fd.IsRepeated() && (fd.IsMessage() || !fd.proto3)
+}
+
+// String returns a proto-like field definition string.
+func (fd *FieldDescription) String() string {
+ return fmt.Sprintf("%s %s = %d `oneof=%t`",
+ fd.TypeName(), fd.OrigName(), fd.Index(), fd.IsOneof())
+}
+
+// TypeName returns the type name of the field.
+func (fd *FieldDescription) TypeName() string {
+ return sanitizeProtoName(fd.desc.GetTypeName())
+}
+
+func (fd *FieldDescription) typeDefToType() *exprpb.Type {
+ if fd.IsMessage() {
+ if wk, found := CheckedWellKnowns[fd.TypeName()]; found {
+ return wk
+ }
+ return checkedMessageType(fd.TypeName())
+ }
+ if fd.IsEnum() {
+ return checkedInt
+ }
+ if p, found := CheckedPrimitives[fd.desc.GetType()]; found {
+ return p
+ }
+ return CheckedPrimitives[fd.desc.GetType()]
+}
+
+func checkedMessageType(name string) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{MessageType: name}}
+}
+
+func checkedPrimitive(primitive exprpb.Type_PrimitiveType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Primitive{Primitive: primitive}}
+}
+
+func checkedWellKnown(wellKnown exprpb.Type_WellKnownType) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_WellKnown{WellKnown: wellKnown}}
+}
+
+func checkedWrap(t *exprpb.Type) *exprpb.Type {
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Wrapper{Wrapper: t.GetPrimitive()}}
+}
diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go
new file mode 100644
index 00000000000..15ca2e5a7d3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/provider.go
@@ -0,0 +1,383 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ anypb "github.com/golang/protobuf/ptypes/any"
+ dpb "github.com/golang/protobuf/ptypes/duration"
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ tpb "github.com/golang/protobuf/ptypes/timestamp"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+type protoTypeRegistry struct {
+ revTypeMap map[string]ref.Type
+ pbdb *pb.Db
+}
+
+// NewRegistry accepts a list of proto message instances and returns a type
+// provider which can create new instances of the provided message or any
+// message that proto depends upon in its FileDescriptor.
+func NewRegistry(types ...proto.Message) ref.TypeRegistry {
+ p := &protoTypeRegistry{
+ revTypeMap: make(map[string]ref.Type),
+ pbdb: pb.NewDb(),
+ }
+ p.RegisterType(
+ BoolType,
+ BytesType,
+ DoubleType,
+ DurationType,
+ DynType,
+ IntType,
+ ListType,
+ MapType,
+ NullType,
+ StringType,
+ TimestampType,
+ TypeType,
+ UintType)
+
+ for _, msgType := range types {
+ fd, err := p.pbdb.RegisterMessage(msgType)
+ if err != nil {
+ panic(err)
+ }
+ for _, typeName := range fd.GetTypeNames() {
+ p.RegisterType(NewObjectTypeValue(typeName))
+ }
+ }
+ return p
+}
+
+func (p *protoTypeRegistry) EnumValue(enumName string) ref.Val {
+ enumVal, err := p.pbdb.DescribeEnum(enumName)
+ if err != nil {
+ return NewErr("unknown enum name '%s'", enumName)
+ }
+ return Int(enumVal.Value())
+}
+
+func (p *protoTypeRegistry) FindFieldType(messageType string,
+ fieldName string) (*ref.FieldType, bool) {
+ msgType, err := p.pbdb.DescribeType(messageType)
+ if err != nil {
+ return nil, false
+ }
+ field, found := msgType.FieldByName(fieldName)
+ if !found {
+ return nil, false
+ }
+ return &ref.FieldType{
+ Type: field.CheckedType(),
+ SupportsPresence: field.SupportsPresence()},
+ true
+}
+
+func (p *protoTypeRegistry) FindIdent(identName string) (ref.Val, bool) {
+ if t, found := p.revTypeMap[identName]; found {
+ return t.(ref.Val), true
+ }
+ if enumVal, err := p.pbdb.DescribeEnum(identName); err == nil {
+ return Int(enumVal.Value()), true
+ }
+ return nil, false
+}
+
+func (p *protoTypeRegistry) FindType(typeName string) (*exprpb.Type, bool) {
+ if _, err := p.pbdb.DescribeType(typeName); err != nil {
+ return nil, false
+ }
+ if typeName != "" && typeName[0] == '.' {
+ typeName = typeName[1:]
+ }
+ return &exprpb.Type{
+ TypeKind: &exprpb.Type_Type{
+ Type: &exprpb.Type{
+ TypeKind: &exprpb.Type_MessageType{
+ MessageType: typeName}}}}, true
+}
+
+func (p *protoTypeRegistry) NewValue(typeName string, fields map[string]ref.Val) ref.Val {
+ td, err := p.pbdb.DescribeType(typeName)
+ if err != nil {
+ return NewErr("unknown type '%s'", typeName)
+ }
+ refType := td.ReflectType()
+ // create the new type instance.
+ value := reflect.New(refType.Elem())
+ pbValue := value.Elem()
+
+ // for all of the field names referenced, set the provided value.
+ for name, value := range fields {
+ fd, found := td.FieldByName(name)
+ if !found {
+ return NewErr("no such field '%s'", name)
+ }
+ refField := pbValue.Field(fd.Index())
+ if !refField.IsValid() {
+ return NewErr("no such field '%s'", name)
+ }
+
+ dstType := refField.Type()
+ // Oneof fields are defined with wrapper structs that have a single proto.Message
+ // field value. The oneof wrapper is not a proto.Message instance.
+ if fd.IsOneof() {
+ oneofVal := reflect.New(fd.OneofType().Elem())
+ refField.Set(oneofVal)
+ refField = oneofVal.Elem().Field(0)
+ dstType = refField.Type()
+ }
+ fieldValue, err := value.ConvertToNative(dstType)
+ if err != nil {
+ return &Err{err}
+ }
+ refField.Set(reflect.ValueOf(fieldValue))
+ }
+ return NewObject(p, td, value.Interface().(proto.Message))
+}
+
+func (p *protoTypeRegistry) RegisterDescriptor(fileDesc *descpb.FileDescriptorProto) error {
+ fd, err := p.pbdb.RegisterDescriptor(fileDesc)
+ if err != nil {
+ return err
+ }
+ return p.registerAllTypes(fd)
+}
+
+func (p *protoTypeRegistry) RegisterMessage(message proto.Message) error {
+ fd, err := p.pbdb.RegisterMessage(message)
+ if err != nil {
+ return err
+ }
+ return p.registerAllTypes(fd)
+}
+
+func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error {
+ for _, t := range types {
+ p.revTypeMap[t.TypeName()] = t
+ }
+ // TODO: generate an error when the type name is registered more than once.
+ return nil
+}
+
+func (p *protoTypeRegistry) registerAllTypes(fd *pb.FileDescription) error {
+ for _, typeName := range fd.GetTypeNames() {
+ err := p.RegisterType(NewObjectTypeValue(typeName))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// NativeToValue converts various "native" types to ref.Val with this specific implementation
+// providing support for custom proto-based types.
+//
+// This method should be the inverse of ref.Val.ConvertToNative.
+func (p *protoTypeRegistry) NativeToValue(value interface{}) ref.Val {
+ switch value.(type) {
+ case ref.Val:
+ return value.(ref.Val)
+ // Adapt common types and aggregate specializations using the DefaultTypeAdapter.
+ case bool, *bool,
+ float32, *float32, float64, *float64,
+ int, *int, int32, *int32, int64, *int64,
+ string, *string,
+ uint, *uint, uint32, *uint32, uint64, *uint64,
+ []byte,
+ []string,
+ map[string]string:
+ return DefaultTypeAdapter.NativeToValue(value)
+ // Adapt well-known proto-types using the DefaultTypeAdapter.
+ case *dpb.Duration,
+ *tpb.Timestamp,
+ *structpb.ListValue,
+ structpb.NullValue,
+ *structpb.Struct,
+ *structpb.Value:
+ return DefaultTypeAdapter.NativeToValue(value)
+ // Override the Any type by ensuring that custom proto-types are considered on recursive calls.
+ case *anypb.Any:
+ val := value.(*anypb.Any)
+ unpackedAny := ptypes.DynamicAny{}
+ if ptypes.UnmarshalAny(val, &unpackedAny) != nil {
+ NewErr("Fail to unmarshal any.")
+ }
+ return p.NativeToValue(unpackedAny.Message)
+ // Convert custom proto types to CEL values based on type's presence within the pb.Db.
+ case proto.Message:
+ pbVal := value.(proto.Message)
+ typeName := proto.MessageName(pbVal)
+ td, err := p.pbdb.DescribeType(typeName)
+ if err != nil {
+ return NewErr("unknown type '%s'", typeName)
+ }
+ return NewObject(p, td, pbVal)
+ // Override default handling for list and maps to ensure that blends of Go + proto types
+ // are appropriately adapted on recursive calls or subsequent inspection of the aggregate
+ // value.
+ default:
+ refValue := reflect.ValueOf(value)
+ if refValue.Kind() == reflect.Ptr {
+ refValue = refValue.Elem()
+ }
+ refKind := refValue.Kind()
+ switch refKind {
+ case reflect.Array, reflect.Slice:
+ return NewDynamicList(p, value)
+ case reflect.Map:
+ return NewDynamicMap(p, value)
+ }
+ }
+ // By default return the default type adapter's conversion to CEL.
+ return DefaultTypeAdapter.NativeToValue(value)
+}
+
+// defaultTypeAdapter converts go native types to CEL values.
+type defaultTypeAdapter struct{}
+
+var (
+ // DefaultTypeAdapter adapts canonical CEL types from their equivalent Go values.
+ DefaultTypeAdapter = &defaultTypeAdapter{}
+)
+
+// NativeToValue implements the ref.TypeAdapter interface.
+func (a *defaultTypeAdapter) NativeToValue(value interface{}) ref.Val {
+ switch value.(type) {
+ case ref.Val:
+ return value.(ref.Val)
+ case bool:
+ return Bool(value.(bool))
+ case *bool:
+ return Bool(*value.(*bool))
+ case int:
+ return Int(value.(int))
+ case int32:
+ return Int(value.(int32))
+ case int64:
+ return Int(value.(int64))
+ case *int:
+ return Int(*value.(*int))
+ case *int32:
+ return Int(*value.(*int32))
+ case *int64:
+ return Int(*value.(*int64))
+ case uint:
+ return Uint(value.(uint))
+ case uint32:
+ return Uint(value.(uint32))
+ case uint64:
+ return Uint(value.(uint64))
+ case *uint:
+ return Uint(*value.(*uint))
+ case *uint32:
+ return Uint(*value.(*uint32))
+ case *uint64:
+ return Uint(*value.(*uint64))
+ case float32:
+ return Double(value.(float32))
+ case float64:
+ return Double(value.(float64))
+ case *float32:
+ return Double(*value.(*float32))
+ case *float64:
+ return Double(*value.(*float64))
+ case string:
+ return String(value.(string))
+ case *string:
+ return String(*value.(*string))
+ case []byte:
+ return Bytes(value.([]byte))
+ case []string:
+ return NewStringList(a, value.([]string))
+ case map[string]string:
+ return NewStringStringMap(value.(map[string]string))
+ case *dpb.Duration:
+ return Duration{value.(*dpb.Duration)}
+ case *structpb.ListValue:
+ return NewJSONList(a, value.(*structpb.ListValue))
+ case structpb.NullValue:
+ return NullValue
+ case *structpb.Struct:
+ return NewJSONStruct(a, value.(*structpb.Struct))
+ case *structpb.Value:
+ v := value.(*structpb.Value)
+ switch v.Kind.(type) {
+ case *structpb.Value_BoolValue:
+ return a.NativeToValue(v.GetBoolValue())
+ case *structpb.Value_ListValue:
+ return a.NativeToValue(v.GetListValue())
+ case *structpb.Value_NullValue:
+ return NullValue
+ case *structpb.Value_NumberValue:
+ return a.NativeToValue(v.GetNumberValue())
+ case *structpb.Value_StringValue:
+ return a.NativeToValue(v.GetStringValue())
+ case *structpb.Value_StructValue:
+ return a.NativeToValue(v.GetStructValue())
+ }
+ case *tpb.Timestamp:
+ return Timestamp{value.(*tpb.Timestamp)}
+ case *anypb.Any:
+ val := value.(*anypb.Any)
+ unpackedAny := ptypes.DynamicAny{}
+ if ptypes.UnmarshalAny(val, &unpackedAny) != nil {
+ NewErr("Fail to unmarshal any.")
+ }
+ return a.NativeToValue(unpackedAny.Message)
+ default:
+ refValue := reflect.ValueOf(value)
+ if refValue.Kind() == reflect.Ptr {
+ refValue = refValue.Elem()
+ }
+ refKind := refValue.Kind()
+ switch refKind {
+ case reflect.Array, reflect.Slice:
+ return NewDynamicList(a, value)
+ case reflect.Map:
+ return NewDynamicMap(a, value)
+ // type aliases of primitive types cannot be asserted as that type, but rather need
+ // to be downcast to int32 before being converted to a CEL representation.
+ case reflect.Int32:
+ intType := reflect.TypeOf(int32(0))
+ return Int(refValue.Convert(intType).Interface().(int32))
+ case reflect.Int64:
+ intType := reflect.TypeOf(int64(0))
+ return Int(refValue.Convert(intType).Interface().(int64))
+ case reflect.Uint32:
+ uintType := reflect.TypeOf(uint32(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint32))
+ case reflect.Uint64:
+ uintType := reflect.TypeOf(uint64(0))
+ return Uint(refValue.Convert(uintType).Interface().(uint64))
+ case reflect.Float32:
+ doubleType := reflect.TypeOf(float32(0))
+ return Double(refValue.Convert(doubleType).Interface().(float32))
+ case reflect.Float64:
+ doubleType := reflect.TypeOf(float64(0))
+ return Double(refValue.Convert(doubleType).Interface().(float64))
+ }
+ }
+ return NewErr("unsupported type conversion for value '%v'", value)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/ref/provider.go b/vendor/github.com/google/cel-go/common/types/ref/provider.go
new file mode 100644
index 00000000000..6ead307bb54
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/ref/provider.go
@@ -0,0 +1,87 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ref
+
+import (
+ "github.com/golang/protobuf/proto"
+
+ descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// TypeProvider specifies functions for creating new object instances and for
+// resolving enum values by name.
+type TypeProvider interface {
+ // EnumValue returns the numeric value of the given enum value name.
+ EnumValue(enumName string) Val
+
+ // FindIdent takes a qualified identifier name and returns a Value if one
+ // exists.
+ FindIdent(identName string) (Val, bool)
+
+ // FindType looks up the Type given a qualified typeName. Returns false
+ // if not found.
+ //
+ // Used during type-checking only.
+ FindType(typeName string) (*exprpb.Type, bool)
+
+ // FieldFieldType returns the field type for a checked type value. Returns
+ // false if the field could not be found.
+ //
+ // Used during type-checking only.
+ FindFieldType(messageType string, fieldName string) (*FieldType, bool)
+
+ // NewValue creates a new type value from a qualified name and a map of
+ // field initializers.
+ NewValue(typeName string, fields map[string]Val) Val
+}
+
+// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values.
+type TypeAdapter interface {
+ // NativeToValue converts the input `value` to a CEL `ref.Val`.
+ NativeToValue(value interface{}) Val
+}
+
+// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider`
+// implementations support type-customization, so these features are optional. However, a
+// `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types
+// which are registered can be converted to CEL representations.
+type TypeRegistry interface {
+ TypeAdapter
+ TypeProvider
+
+ // RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`.
+ RegisterDescriptor(fileDesc *descpb.FileDescriptorProto) error
+
+ // RegisterMessage registers a protocol buffer message and its dependencies.
+ RegisterMessage(message proto.Message) error
+
+ // RegisterType registers a type value with the provider which ensures the
+ // provider is aware of how to map the type to an identifier.
+ //
+ // If a type is provided more than once with an alternative definition, the
+ // call will result in an error.
+ RegisterType(types ...Type) error
+}
+
+// FieldType represents a field's type value and whether that field supports
+// presence detection.
+type FieldType struct {
+ // SupportsPresence indicates if the field having been set can be detected.
+ SupportsPresence bool
+
+ // Type of the field.
+ Type *exprpb.Type
+}
diff --git a/vendor/github.com/google/cel-go/common/types/ref/reference.go b/vendor/github.com/google/cel-go/common/types/ref/reference.go
new file mode 100644
index 00000000000..e12169988d5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/ref/reference.go
@@ -0,0 +1,59 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ref contains the reference interfaces used throughout the types
+// components.
+package ref
+
+import (
+ "reflect"
+)
+
+// Type interface indicate the name of a given type.
+type Type interface {
+ // HasTrait returns whether the type has a given trait associated with it.
+ //
+ // See common/types/traits/traits.go for a list of supported traits.
+ HasTrait(trait int) bool
+
+ // TypeName returns the qualified type name of the type.
+ //
+ // The type name is also used as the type's identifier name at type-check
+ // and interpretation time.
+ TypeName() string
+}
+
+// Val interface defines the functions supported by all expression values.
+// Val implementations may specialize the behavior of the value through the
+// addition of traits.
+type Val interface {
+ // ConvertToNative converts the Value to a native Go struct according to the
+ // reflected type description, or error if the conversion is not feasible.
+ ConvertToNative(typeDesc reflect.Type) (interface{}, error)
+
+ // ConvertToType supports type conversions between value types supported by
+ // the expression language.
+ ConvertToType(typeValue Type) Val
+
+ // Equal returns true if the `other` value has the same type and content as
+ // the implementing struct.
+ Equal(other Val) Val
+
+ // Type returns the TypeValue of the value.
+ Type() Type
+
+ // Value returns the raw value of the instance which may not be directly
+ // compatible with the expression language types.
+ Value() interface{}
+}
diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go
new file mode 100644
index 00000000000..b2873c96a89
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/string.go
@@ -0,0 +1,200 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ "github.com/golang/protobuf/ptypes"
+ structpb "github.com/golang/protobuf/ptypes/struct"
+)
+
+// String type implementation which supports addition, comparison, matching,
+// and size functions.
+type String string
+
+var (
+ // StringType singleton.
+ StringType = NewTypeValue("string",
+ traits.AdderType,
+ traits.ComparerType,
+ traits.MatcherType,
+ traits.ReceiverType,
+ traits.SizerType)
+
+ stringOneArgOverloads = map[string]func(String, ref.Val) ref.Val{
+ overloads.Contains: stringContains,
+ overloads.EndsWith: stringEndsWith,
+ overloads.StartsWith: stringStartsWith,
+ }
+)
+
+// Add implements traits.Adder.Add.
+func (s String) Add(other ref.Val) ref.Val {
+ if StringType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return s + other.(String)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (s String) Compare(other ref.Val) ref.Val {
+ if StringType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Int(strings.Compare(s.Value().(string), other.Value().(string)))
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (s String) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ switch typeDesc.Kind() {
+ case reflect.String:
+ return s.Value(), nil
+ case reflect.Ptr:
+ if typeDesc == jsonValueType {
+ return &structpb.Value{
+ Kind: &structpb.Value_StringValue{
+ StringValue: s.Value().(string)}}, nil
+ }
+ if typeDesc.Elem().Kind() == reflect.String {
+ p := s.Value().(string)
+ return &p, nil
+ }
+ case reflect.Interface:
+ if reflect.TypeOf(s).Implements(typeDesc) {
+ return s, nil
+ }
+ }
+ return nil, fmt.Errorf(
+ "unsupported native conversion from string to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (s String) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ if n, err := strconv.ParseInt(s.Value().(string), 10, 64); err == nil {
+ return Int(n)
+ }
+ case UintType:
+ if n, err := strconv.ParseUint(s.Value().(string), 10, 64); err == nil {
+ return Uint(n)
+ }
+ case DoubleType:
+ if n, err := strconv.ParseFloat(s.Value().(string), 64); err == nil {
+ return Double(n)
+ }
+ case BoolType:
+ if b, err := strconv.ParseBool(s.Value().(string)); err == nil {
+ return Bool(b)
+ }
+ case BytesType:
+ return Bytes(s)
+ case DurationType:
+ if d, err := time.ParseDuration(s.Value().(string)); err == nil {
+ return Duration{ptypes.DurationProto(d)}
+ }
+ case TimestampType:
+ if t, err := time.Parse(time.RFC3339, s.Value().(string)); err == nil {
+ if ts, err := ptypes.TimestampProto(t); err == nil {
+ return Timestamp{ts}
+ }
+ }
+ case StringType:
+ return s
+ case TypeType:
+ return StringType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", StringType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (s String) Equal(other ref.Val) ref.Val {
+ if StringType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Bool(s == other.(String))
+}
+
+// Match implements traits.Matcher.Match.
+func (s String) Match(pattern ref.Val) ref.Val {
+ if pattern.Type() != StringType {
+ return ValOrErr(pattern, "no such overload")
+ }
+ matched, err := regexp.MatchString(pattern.Value().(string), s.Value().(string))
+ if err != nil {
+ return &Err{err}
+ }
+ return Bool(matched)
+}
+
+// Receive implements traits.Reciever.Receive.
+func (s String) Receive(function string, overload string, args []ref.Val) ref.Val {
+ switch len(args) {
+ case 1:
+ if f, found := stringOneArgOverloads[function]; found {
+ return f(s, args[0])
+ }
+ }
+ return NewErr("no such overload")
+}
+
+// Size implements traits.Sizer.Size.
+func (s String) Size() ref.Val {
+ return Int(len([]rune(s.Value().(string))))
+}
+
+// Type implements ref.Val.Type.
+func (s String) Type() ref.Type {
+ return StringType
+}
+
+// Value implements ref.Val.Value.
+func (s String) Value() interface{} {
+ return string(s)
+}
+
+func stringContains(s String, sub ref.Val) ref.Val {
+ subStr, ok := sub.(String)
+ if !ok {
+ return ValOrErr(sub, "no such overload")
+ }
+ return Bool(strings.Contains(string(s), string(subStr)))
+}
+
+func stringEndsWith(s String, suf ref.Val) ref.Val {
+ sufStr, ok := suf.(String)
+ if !ok {
+ return ValOrErr(suf, "no such overload")
+ }
+ return Bool(strings.HasSuffix(string(s), string(sufStr)))
+}
+
+func stringStartsWith(s String, pre ref.Val) ref.Val {
+ preStr, ok := pre.(String)
+ if !ok {
+ return ValOrErr(pre, "no such overload")
+ }
+ return Bool(strings.HasPrefix(string(s), string(preStr)))
+}
diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go
new file mode 100644
index 00000000000..2482bded80d
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/timestamp.go
@@ -0,0 +1,284 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ tpb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+// Timestamp type implementation which supports add, compare, and subtract
+// operations. Timestamps are also capable of participating in dynamic
+// function dispatch to instance methods.
+type Timestamp struct {
+ *tpb.Timestamp
+}
+
+var (
+ // TimestampType singleton.
+ TimestampType = NewTypeValue("google.protobuf.Timestamp",
+ traits.AdderType,
+ traits.ComparerType,
+ traits.ReceiverType,
+ traits.SubtractorType)
+)
+
+// Add implements traits.Adder.Add.
+func (t Timestamp) Add(other ref.Val) ref.Val {
+ switch other.Type() {
+ case DurationType:
+ return other.(Duration).Add(t)
+ }
+ return ValOrErr(other, "no such overload")
+}
+
+// Compare implements traits.Comparer.Compare.
+func (t Timestamp) Compare(other ref.Val) ref.Val {
+ if TimestampType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ ts1, err := ptypes.Timestamp(t.Timestamp)
+ if err != nil {
+ return &Err{err}
+ }
+ ts2, err := ptypes.Timestamp(other.(Timestamp).Timestamp)
+ if err != nil {
+ return &Err{err}
+ }
+ ts := ts1.Sub(ts2)
+ if ts < 0 {
+ return IntNegOne
+ }
+ if ts > 0 {
+ return IntOne
+ }
+ return IntZero
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ if typeDesc == timestampValueType {
+ return t.Value(), nil
+ }
+ // If the timestamp is already assignable to the desired type return it.
+ if reflect.TypeOf(t).AssignableTo(typeDesc) {
+ return t, nil
+ }
+ return nil, fmt.Errorf("type conversion error from "+
+ "'google.protobuf.Duration' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t Timestamp) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case StringType:
+ return String(ptypes.TimestampString(t.Timestamp))
+ case IntType:
+ if ts, err := ptypes.Timestamp(t.Timestamp); err == nil {
+ // Return the Unix time in seconds since 1970
+ return Int(ts.Unix())
+ }
+ case TimestampType:
+ return t
+ case TypeType:
+ return TimestampType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", TimestampType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (t Timestamp) Equal(other ref.Val) ref.Val {
+ if TimestampType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Bool(proto.Equal(t.Timestamp, other.Value().(proto.Message)))
+}
+
+// Receive implements traits.Reciever.Receive.
+func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val {
+ ts := t.Timestamp
+ tstamp, err := ptypes.Timestamp(ts)
+ if err != nil {
+ return &Err{err}
+ }
+ switch len(args) {
+ case 0:
+ if f, found := timestampZeroArgOverloads[function]; found {
+ return f(tstamp)
+ }
+ case 1:
+ if f, found := timestampOneArgOverloads[function]; found {
+ return f(tstamp, args[0])
+ }
+ }
+ return NewErr("no such overload")
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val {
+ switch subtrahend.Type() {
+ case DurationType:
+ ts, err := ptypes.Timestamp(t.Timestamp)
+ if err != nil {
+ return &Err{err}
+ }
+ dur, err := ptypes.Duration(subtrahend.(Duration).Duration)
+ if err != nil {
+ return &Err{err}
+ }
+ tstamp, err := ptypes.TimestampProto(ts.Add(-dur))
+ if err != nil {
+ return &Err{err}
+ }
+ return Timestamp{tstamp}
+ case TimestampType:
+ ts1, err := ptypes.Timestamp(t.Timestamp)
+ if err != nil {
+ return &Err{err}
+ }
+ ts2, err := ptypes.Timestamp(subtrahend.(Timestamp).Timestamp)
+ if err != nil {
+ return &Err{err}
+ }
+ return Duration{ptypes.DurationProto(ts1.Sub(ts2))}
+ }
+ return ValOrErr(subtrahend, "no such overload")
+}
+
+// Type implements ref.Val.Type.
+func (t Timestamp) Type() ref.Type {
+ return TimestampType
+}
+
+// Value implements ref.Val.Value.
+func (t Timestamp) Value() interface{} {
+ return t.Timestamp
+}
+
+var (
+ timestampValueType = reflect.TypeOf(&tpb.Timestamp{})
+
+ timestampZeroArgOverloads = map[string]func(time.Time) ref.Val{
+ overloads.TimeGetFullYear: timestampGetFullYear,
+ overloads.TimeGetMonth: timestampGetMonth,
+ overloads.TimeGetDayOfYear: timestampGetDayOfYear,
+ overloads.TimeGetDate: timestampGetDayOfMonthOneBased,
+ overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBased,
+ overloads.TimeGetDayOfWeek: timestampGetDayOfWeek,
+ overloads.TimeGetHours: timestampGetHours,
+ overloads.TimeGetMinutes: timestampGetMinutes,
+ overloads.TimeGetSeconds: timestampGetSeconds,
+ overloads.TimeGetMilliseconds: timestampGetMilliseconds}
+
+ timestampOneArgOverloads = map[string]func(time.Time, ref.Val) ref.Val{
+ overloads.TimeGetFullYear: timestampGetFullYearWithTz,
+ overloads.TimeGetMonth: timestampGetMonthWithTz,
+ overloads.TimeGetDayOfYear: timestampGetDayOfYearWithTz,
+ overloads.TimeGetDate: timestampGetDayOfMonthOneBasedWithTz,
+ overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBasedWithTz,
+ overloads.TimeGetDayOfWeek: timestampGetDayOfWeekWithTz,
+ overloads.TimeGetHours: timestampGetHoursWithTz,
+ overloads.TimeGetMinutes: timestampGetMinutesWithTz,
+ overloads.TimeGetSeconds: timestampGetSecondsWithTz,
+ overloads.TimeGetMilliseconds: timestampGetMillisecondsWithTz}
+)
+
+type timestampVisitor func(time.Time) ref.Val
+
+func timestampGetFullYear(t time.Time) ref.Val {
+ return Int(t.Year())
+}
+func timestampGetMonth(t time.Time) ref.Val {
+ // CEL spec indicates that the month should be 0-based, but the Time value
+ // for Month() is 1-based.
+ return Int(t.Month() - 1)
+}
+func timestampGetDayOfYear(t time.Time) ref.Val {
+ return Int(t.YearDay())
+}
+func timestampGetDayOfMonthZeroBased(t time.Time) ref.Val {
+ return Int(t.Day() - 1)
+}
+func timestampGetDayOfMonthOneBased(t time.Time) ref.Val {
+ return Int(t.Day())
+}
+func timestampGetDayOfWeek(t time.Time) ref.Val {
+ return Int(t.Weekday())
+}
+func timestampGetHours(t time.Time) ref.Val {
+ return Int(t.Hour())
+}
+func timestampGetMinutes(t time.Time) ref.Val {
+ return Int(t.Minute())
+}
+func timestampGetSeconds(t time.Time) ref.Val {
+ return Int(t.Second())
+}
+func timestampGetMilliseconds(t time.Time) ref.Val {
+ return Int(t.Nanosecond() / 1000000)
+}
+
+func timestampGetFullYearWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetFullYear)(t)
+}
+func timestampGetMonthWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMonth)(t)
+}
+func timestampGetDayOfYearWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfYear)(t)
+}
+func timestampGetDayOfMonthZeroBasedWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfMonthZeroBased)(t)
+}
+func timestampGetDayOfMonthOneBasedWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfMonthOneBased)(t)
+}
+func timestampGetDayOfWeekWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetDayOfWeek)(t)
+}
+func timestampGetHoursWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetHours)(t)
+}
+func timestampGetMinutesWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMinutes)(t)
+}
+func timestampGetSecondsWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetSeconds)(t)
+}
+func timestampGetMillisecondsWithTz(t time.Time, tz ref.Val) ref.Val {
+ return timeZone(tz, timestampGetMilliseconds)(t)
+}
+
+func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor {
+ return func(t time.Time) ref.Val {
+ if StringType != tz.Type() {
+ return ValOrErr(tz, "no such overload")
+ }
+ loc, err := time.LoadLocation(string(tz.(String)))
+ if err == nil {
+ return visitor(t.In(loc))
+ }
+ return &Err{err}
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/comparer.go b/vendor/github.com/google/cel-go/common/types/traits/comparer.go
new file mode 100644
index 00000000000..b531d9ae2bf
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/comparer.go
@@ -0,0 +1,33 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Comparer interface for ordering comparisons between values in order to
+// support '<', '<=', '>=', '>' overloads.
+type Comparer interface {
+ // Compare this value to the input other value, returning an Int:
+ //
+ // this < other -> Int(-1)
+ // this == other -> Int(0)
+ // this > other -> Int(1)
+ //
+ // If the comparison cannot be made or is not supported, an error should
+ // be returned.
+ Compare(other ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/container.go b/vendor/github.com/google/cel-go/common/types/traits/container.go
new file mode 100644
index 00000000000..cf5c621ae9f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/container.go
@@ -0,0 +1,23 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Container interface which permits containment tests such as 'a in b'.
+type Container interface {
+ // Contains returns true if the value exists within the object.
+ Contains(value ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/field_tester.go b/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
new file mode 100644
index 00000000000..816a9565238
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/field_tester.go
@@ -0,0 +1,30 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// FieldTester indicates if a defined field on an object type is set to a
+// non-default value.
+//
+// For use with the `has()` macro.
+type FieldTester interface {
+ // IsSet returns true if the field is defined and set to a non-default
+ // value. The method will return false if defined and not set, and an error
+ // if the field is not defined.
+ IsSet(field ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/indexer.go b/vendor/github.com/google/cel-go/common/types/traits/indexer.go
new file mode 100644
index 00000000000..662c6836c33
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/indexer.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Indexer permits random access of elements by index 'a[b()]'.
+type Indexer interface {
+ // Get the value at the specified index or error.
+ Get(index ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/vendor/github.com/google/cel-go/common/types/traits/iterator.go
new file mode 100644
index 00000000000..42dd371aa49
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/iterator.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Iterable aggregate types permit traversal over their elements.
+type Iterable interface {
+ // Iterator returns a new iterator view of the struct.
+ Iterator() Iterator
+}
+
+// Iterator permits safe traversal over the contents of an aggregate type.
+type Iterator interface {
+ ref.Val
+
+ // HasNext returns true if there are unvisited elements in the Iterator.
+ HasNext() ref.Val
+
+ // Next returns the next element.
+ Next() ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/lister.go b/vendor/github.com/google/cel-go/common/types/traits/lister.go
new file mode 100644
index 00000000000..7b05c301d58
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/lister.go
@@ -0,0 +1,27 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Lister interface which aggregates the traits of a list.
+type Lister interface {
+ ref.Val
+ Adder
+ Container
+ Indexer
+ Iterable
+ Sizer
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/vendor/github.com/google/cel-go/common/types/traits/mapper.go
new file mode 100644
index 00000000000..e6d46357114
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/mapper.go
@@ -0,0 +1,26 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Mapper interface which aggregates the traits of a maps.
+type Mapper interface {
+ ref.Val
+ Container
+ Indexer
+ Iterable
+ Sizer
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/matcher.go b/vendor/github.com/google/cel-go/common/types/traits/matcher.go
new file mode 100644
index 00000000000..085dc94ff4f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/matcher.go
@@ -0,0 +1,23 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Matcher interface for supporting 'matches()' overloads.
+type Matcher interface {
+ // Match returns true if the pattern matches the current value.
+ Match(pattern ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/math.go b/vendor/github.com/google/cel-go/common/types/traits/math.go
new file mode 100644
index 00000000000..86d5b9137e6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/math.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Adder interface to support '+' operator overloads.
+type Adder interface {
+ // Add returns a combination of the current value and other value.
+ //
+ // If the other value is an unsupported type, an error is returned.
+ Add(other ref.Val) ref.Val
+}
+
+// Divider interface to support '/' operator overloads.
+type Divider interface {
+ // Divide returns the result of dividing the current value by the input
+ // denominator.
+ //
+ // A denominator value of zero results in an error.
+ Divide(denominator ref.Val) ref.Val
+}
+
+// Modder interface to support '%' operator overloads.
+type Modder interface {
+ // Modulo returns the result of taking the modulus of the current value
+ // by the denominator.
+ //
+ // A denominator value of zero results in an error.
+ Modulo(denominator ref.Val) ref.Val
+}
+
+// Multiplier interface to support '*' operator overloads.
+type Multiplier interface {
+ // Multiply returns the result of multiplying the current and input value.
+ Multiply(other ref.Val) ref.Val
+}
+
+// Negater interface to support unary '-' and '!' operator overloads.
+type Negater interface {
+ // Negate returns the complement of the current value.
+ Negate() ref.Val
+}
+
+// Subtractor interface to support binary '-' operator overloads.
+type Subtractor interface {
+ // Subtract returns the result of subtracting the input from the current
+ // value.
+ Subtract(subtrahend ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/receiver.go b/vendor/github.com/google/cel-go/common/types/traits/receiver.go
new file mode 100644
index 00000000000..8f41db45e8f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/receiver.go
@@ -0,0 +1,24 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Receiver interface for routing instance method calls within a value.
+type Receiver interface {
+ // Receive accepts a function name, overload id, and arguments and returns
+ // a value.
+ Receive(function string, overload string, args []ref.Val) ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/sizer.go b/vendor/github.com/google/cel-go/common/types/traits/sizer.go
new file mode 100644
index 00000000000..b80d25137a7
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/sizer.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package traits
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Sizer interface for supporting 'size()' overloads.
+type Sizer interface {
+ // Size returns the number of elements or length of the value.
+ Size() ref.Val
+}
diff --git a/vendor/github.com/google/cel-go/common/types/traits/traits.go b/vendor/github.com/google/cel-go/common/types/traits/traits.go
new file mode 100644
index 00000000000..6da3e6a3e1f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/traits/traits.go
@@ -0,0 +1,64 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package traits defines interfaces that a type may implement to participate
+// in operator overloads and function dispatch.
+package traits
+
+const (
+ // AdderType types provide a '+' operator overload.
+ AdderType = 1 << iota
+
+ // ComparerType types support ordering comparisons '<', '<=', '>', '>='.
+ ComparerType
+
+ // ContainerType types support 'in' operations.
+ ContainerType
+
+ // DividerType types support '/' operations.
+ DividerType
+
+ // FieldTesterType types support the detection of field value presence.
+ FieldTesterType
+
+ // IndexerType types support index access with dynamic values.
+ IndexerType
+
+ // IterableType types can be iterated over in comprehensions.
+ IterableType
+
+ // IteratorType types support iterator semantics.
+ IteratorType
+
+ // MatcherType types support pattern matching via 'matches' method.
+ MatcherType
+
+ // ModderType types support modulus operations '%'
+ ModderType
+
+ // MultiplierType types support '*' operations.
+ MultiplierType
+
+ // NegatorType types support either negation via '!' or '-'
+ NegatorType
+
+ // ReceiverType types support dynamic dispatch to instance methods.
+ ReceiverType
+
+ // SizerType types support the size() method.
+ SizerType
+
+ // SubtractorType type support '-' operations.
+ SubtractorType
+)
diff --git a/vendor/github.com/google/cel-go/common/types/type.go b/vendor/github.com/google/cel-go/common/types/type.go
new file mode 100644
index 00000000000..5d5a781ca4a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/type.go
@@ -0,0 +1,105 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+var (
+ // TypeType is the type of a TypeValue.
+ TypeType = NewTypeValue("type")
+)
+
+// TypeValue is an instance of a Value that describes a value's type.
+type TypeValue struct {
+ name string
+ traitMask int
+}
+
+// NewTypeValue returns *TypeValue which is both a ref.Type and ref.Val.
+func NewTypeValue(name string, traits ...int) *TypeValue {
+ traitMask := 0
+ for _, trait := range traits {
+ traitMask |= trait
+ }
+ return &TypeValue{
+ name: name,
+ traitMask: traitMask}
+}
+
+// NewObjectTypeValue returns a *TypeValue based on the input name, which is
+// annotated with the traits relevant to all objects.
+func NewObjectTypeValue(name string) *TypeValue {
+ return NewTypeValue(name,
+ traits.FieldTesterType,
+ traits.IndexerType,
+ traits.IterableType)
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ // TODO: replace the internal type representation with a proto-value.
+ return nil, fmt.Errorf("type conversion not supported for 'type'")
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t *TypeValue) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case TypeType:
+ return t
+ case StringType:
+ return String(t.TypeName())
+ }
+ return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (t *TypeValue) Equal(other ref.Val) ref.Val {
+ if TypeType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Bool(t.TypeName() == other.(ref.Type).TypeName())
+}
+
+// HasTrait indicates whether the type supports the given trait.
+// Trait codes are defined in the traits package, e.g. see traits.AdderType.
+func (t *TypeValue) HasTrait(trait int) bool {
+ return trait&t.traitMask == trait
+}
+
+// String implements fmt.Stringer.
+func (t *TypeValue) String() string {
+ return t.name
+}
+
+// Type implements ref.Val.Type.
+func (t *TypeValue) Type() ref.Type {
+ return TypeType
+}
+
+// TypeName gives the type's name as a string.
+func (t *TypeValue) TypeName() string {
+ return t.name
+}
+
+// Value implements ref.Val.Value.
+func (t *TypeValue) Value() interface{} {
+ return t.name
+}
diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go
new file mode 100644
index 00000000000..73a0b50c010
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/uint.go
@@ -0,0 +1,170 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "reflect"
+
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// Uint type implementation which supports comparison and math operators.
+type Uint uint64
+
+var (
+ // UintType singleton.
+ UintType = NewTypeValue("uint",
+ traits.AdderType,
+ traits.ComparerType,
+ traits.DividerType,
+ traits.ModderType,
+ traits.MultiplierType,
+ traits.SubtractorType)
+)
+
+// Int constants
+const (
+ uintZero = Uint(0)
+)
+
+// Add implements traits.Adder.Add.
+func (i Uint) Add(other ref.Val) ref.Val {
+ if UintType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return i + other.(Uint)
+}
+
+// Compare implements traits.Comparer.Compare.
+func (i Uint) Compare(other ref.Val) ref.Val {
+ if UintType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ if i < other.(Uint) {
+ return IntNegOne
+ }
+ if i > other.(Uint) {
+ return IntOne
+ }
+ return IntZero
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (i Uint) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ value := i.Value()
+ switch typeDesc.Kind() {
+ case reflect.Uint32:
+ return uint32(value.(uint64)), nil
+ case reflect.Uint64:
+ return value, nil
+ case reflect.Ptr:
+ if typeDesc == jsonValueType {
+ return &structpb.Value{
+ Kind: &structpb.Value_NumberValue{
+ NumberValue: float64(i)}}, nil
+ }
+ switch typeDesc.Elem().Kind() {
+ case reflect.Uint32:
+ p := uint32(i)
+ return &p, nil
+ case reflect.Uint64:
+ p := uint64(i)
+ return &p, nil
+ }
+ case reflect.Interface:
+ if reflect.TypeOf(i).Implements(typeDesc) {
+ return i, nil
+ }
+ }
+ return nil, fmt.Errorf("unsupported type conversion from 'uint' to %v", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (i Uint) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case IntType:
+ return Int(i)
+ case UintType:
+ return i
+ case DoubleType:
+ return Double(i)
+ case StringType:
+ return String(fmt.Sprintf("%d", uint64(i)))
+ case TypeType:
+ return UintType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", UintType, typeVal)
+}
+
+// Divide implements traits.Divider.Divide.
+func (i Uint) Divide(other ref.Val) ref.Val {
+ if UintType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ otherUint := other.(Uint)
+ if otherUint == uintZero {
+ return NewErr("divide by zero")
+ }
+ return i / otherUint
+}
+
+// Equal implements ref.Val.Equal.
+func (i Uint) Equal(other ref.Val) ref.Val {
+ if UintType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return Bool(i == other.(Uint))
+}
+
+// Modulo implements traits.Modder.Modulo.
+func (i Uint) Modulo(other ref.Val) ref.Val {
+ if UintType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ otherUint := other.(Uint)
+ if otherUint == uintZero {
+ return NewErr("modulus by zero")
+ }
+ return i % otherUint
+}
+
+// Multiply implements traits.Multiplier.Multiply.
+func (i Uint) Multiply(other ref.Val) ref.Val {
+ if UintType != other.Type() {
+ return ValOrErr(other, "no such overload")
+ }
+ return i * other.(Uint)
+}
+
+// Subtract implements traits.Subtractor.Subtract.
+func (i Uint) Subtract(subtrahend ref.Val) ref.Val {
+ if UintType != subtrahend.Type() {
+ return ValOrErr(subtrahend, "no such overload")
+ }
+ return i - subtrahend.(Uint)
+}
+
+// Type implements ref.Val.Type.
+func (i Uint) Type() ref.Type {
+ return UintType
+}
+
+// Value implements ref.Val.Value.
+func (i Uint) Value() interface{} {
+ return uint64(i)
+}
diff --git a/vendor/github.com/google/cel-go/common/types/unknown.go b/vendor/github.com/google/cel-go/common/types/unknown.go
new file mode 100644
index 00000000000..a950920d606
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/unknown.go
@@ -0,0 +1,61 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Unknown type implementation which collects expression ids which caused the
+// current value to become unknown.
+type Unknown []int64
+
+var (
+ // UnknownType singleton.
+ UnknownType = NewTypeValue("unknown")
+)
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (u Unknown) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+ return u.Value(), nil
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (u Unknown) ConvertToType(typeVal ref.Type) ref.Val {
+ return u
+}
+
+// Equal implements ref.Val.Equal.
+func (u Unknown) Equal(other ref.Val) ref.Val {
+ return u
+}
+
+// Type implements ref.Val.Type.
+func (u Unknown) Type() ref.Type {
+ return UnknownType
+}
+
+// Value implements ref.Val.Value.
+func (u Unknown) Value() interface{} {
+ return []int64(u)
+}
+
+// IsUnknown returns whether the element ref.Type or ref.Val is equal to the
+// UnknownType singleton.
+func IsUnknown(val ref.Val) bool {
+ return val.Type() == UnknownType
+}
diff --git a/vendor/github.com/google/cel-go/common/types/util.go b/vendor/github.com/google/cel-go/common/types/util.go
new file mode 100644
index 00000000000..df4b289832c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/util.go
@@ -0,0 +1,29 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// IsUnknownOrError returns whether the input element ref.Type or ref.Val is an ErrType or
+// UnknonwType.
+func IsUnknownOrError(val ref.Val) bool {
+ switch val.Type() {
+ case UnknownType, ErrType:
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go
new file mode 100644
index 00000000000..df79622c908
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/activation.go
@@ -0,0 +1,189 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// Activation used to resolve identifiers by name and references by id.
+//
+// An Activation is the primary mechanism by which a caller supplies input into a CEL program.
+type Activation interface {
+ // ResolveName returns a value from the activation by qualified name, or false if the name
+ // could not be found.
+ ResolveName(name string) (ref.Val, bool)
+
+ // Parent returns the parent of the current activation, may be nil.
+ // If non-nil, the parent will be searched during resolve calls.
+ Parent() Activation
+}
+
+// EmptyActivation returns a variable free activation.
+func EmptyActivation() Activation {
+ // This call cannot fail.
+ a, _ := NewActivation(map[string]interface{}{})
+ return a
+}
+
+// NewActivation returns an activation based on a map-based binding where the map keys are
+// expected to be qualified names used with ResolveName calls.
+//
+// The input `bindings` may either be of type `Activation` or `map[string]interface{}`.
+//
+// When the bindings are a `map` form whose values are not of `ref.Val` type, the values will be
+// converted to CEL values (if possible) using the `types.DefaultTypeAdapter`.
+func NewActivation(bindings interface{}) (Activation, error) {
+ return NewAdaptingActivation(types.DefaultTypeAdapter, bindings)
+}
+
+// NewAdaptingActivation returns an actvation which is capable of adapting `bindings` from native
+// Go values to equivalent CEL `ref.Val` objects.
+//
+// The input `bindings` may either be of type `Activation` or `map[string]interface{}`.
+//
+// When the bindings are a `map` the values may be one of the following types:
+// - `ref.Val`: a CEL value instance.
+// - `func() ref.Val`: a CEL value supplier.
+// - other: a native value which must be converted to a CEL `ref.Val` by the `adapter`.
+func NewAdaptingActivation(adapter ref.TypeAdapter, bindings interface{}) (Activation, error) {
+ a, isActivation := bindings.(Activation)
+ if isActivation {
+ return a, nil
+ }
+ m, isMap := bindings.(map[string]interface{})
+ if !isMap {
+ return nil, fmt.Errorf(
+ "activation input must be an activation or map[string]interface: got %T",
+ bindings)
+ }
+ var allRefVals = true
+ for _, v := range m {
+ _, isVal := v.(ref.Val)
+ if !isVal {
+ allRefVals = false
+ break
+ }
+ }
+ if allRefVals {
+ return &mapActivation{bindings: m}, nil
+ }
+ return &mapActivation{adapter: adapter, bindings: m}, nil
+}
+
+// mapActivation which implements Activation and maps of named values.
+//
+// Named bindings may lazily supply values by providing a function which accepts no arguments and
+// produces an interface value.
+type mapActivation struct {
+ adapter ref.TypeAdapter
+ bindings map[string]interface{}
+}
+
+// Parent implements the Activation interface method.
+func (a *mapActivation) Parent() Activation {
+ return nil
+}
+
+// ResolveName implements the Activation interface method.
+func (a *mapActivation) ResolveName(name string) (ref.Val, bool) {
+ if object, found := a.bindings[name]; found {
+ switch object.(type) {
+ // Resolve a lazily bound value.
+ case func() ref.Val:
+ val := object.(func() ref.Val)()
+ a.bindings[name] = val
+ return val, true
+ // Otherwise, return the bound value.
+ case ref.Val:
+ return object.(ref.Val), true
+ default:
+ if a.adapter != nil {
+ return a.adapter.NativeToValue(object), true
+ }
+ }
+ }
+ return nil, false
+}
+
+// hierarchicalActivation which implements Activation and contains a parent and
+// child activation.
+type hierarchicalActivation struct {
+ parent Activation
+ child Activation
+}
+
+// Parent implements the Activation interface method.
+func (a *hierarchicalActivation) Parent() Activation {
+ return a.parent
+}
+
+// ResolveName implements the Activation interface method.
+func (a *hierarchicalActivation) ResolveName(name string) (ref.Val, bool) {
+ if object, found := a.child.ResolveName(name); found {
+ return object, found
+ }
+ return a.parent.ResolveName(name)
+}
+
+// NewHierarchicalActivation takes two activations and produces a new one which prioritizes
+// resolution in the child first and parent(s) second.
+func NewHierarchicalActivation(parent Activation, child Activation) Activation {
+ return &hierarchicalActivation{parent, child}
+}
+
+// newVarActivation returns a new varActivation instance.
+func newVarActivation(parent Activation, name string) *varActivation {
+ return &varActivation{
+ parent: parent,
+ name: name,
+ }
+}
+
+// varActivation represents a single mutable variable binding.
+//
+// This activation type should only be used within folds as the fold loop controls the object
+// life-cycle.
+type varActivation struct {
+ parent Activation
+ name string
+ val ref.Val
+}
+
+// Parent implements the Activation interface method.
+func (v *varActivation) Parent() Activation {
+ return v.parent
+}
+
+// ResolveName implements the Activation interface method.
+func (v *varActivation) ResolveName(name string) (ref.Val, bool) {
+ if name == v.name {
+ return v.val, true
+ }
+ return v.parent.ResolveName(name)
+}
+
+var (
+ // pool of var activations to reduce allocations during folds.
+ varActivationPool = &sync.Pool{
+ New: func() interface{} {
+ return &varActivation{}
+ },
+ }
+)
diff --git a/vendor/github.com/google/cel-go/interpreter/decorators.go b/vendor/github.com/google/cel-go/interpreter/decorators.go
new file mode 100644
index 00000000000..f53e7f2e3b6
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/decorators.go
@@ -0,0 +1,287 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// evalObserver is a functional interface that accepts an expression id and an observed value.
+type evalObserver func(int64, ref.Val)
+
+// decObserveEval records evaluation state into an EvalState object.
+func decObserveEval(observer evalObserver) InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ return &evalWatch{
+ inst: i,
+ observer: observer,
+ }, nil
+ }
+}
+
+// decDisableShortcircuits ensures that all branches of an expression will be evaluated, no short-circuiting.
+func decDisableShortcircuits() InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ switch i.(type) {
+ case *evalOr:
+ or := i.(*evalOr)
+ return &evalExhaustiveOr{
+ id: or.id,
+ lhs: or.lhs,
+ rhs: or.rhs,
+ }, nil
+ case *evalAnd:
+ and := i.(*evalAnd)
+ return &evalExhaustiveAnd{
+ id: and.id,
+ lhs: and.lhs,
+ rhs: and.rhs,
+ }, nil
+ case *evalConditional:
+ cond := i.(*evalConditional)
+ return &evalExhaustiveConditional{
+ id: cond.id,
+ expr: cond.expr,
+ truthy: cond.truthy,
+ falsy: cond.falsy,
+ }, nil
+ case *evalFold:
+ fold := i.(*evalFold)
+ return &evalExhaustiveFold{
+ id: fold.id,
+ accu: fold.accu,
+ accuVar: fold.accuVar,
+ iterRange: fold.iterRange,
+ iterVar: fold.iterVar,
+ cond: fold.cond,
+ step: fold.step,
+ result: fold.result,
+ }, nil
+ }
+ return i, nil
+ }
+}
+
+// decFoldConstants checks whether the arguments the create list and map operations are all
+// constant values and constructs a new aggregate value. Future improvements to this method will
+// also do the same for typed object creations and functions whose arguments are constant.
+func decFoldConstants() InterpretableDecorator {
+ return func(i Interpretable) (Interpretable, error) {
+ switch i.(type) {
+ case *evalList:
+ l := i.(*evalList)
+ for _, elem := range l.elems {
+ _, isConst := elem.(*evalConst)
+ if !isConst {
+ return i, nil
+ }
+ }
+ val := l.Eval(EmptyActivation())
+ return &evalConst{
+ id: l.id,
+ val: val,
+ }, nil
+ case *evalMap:
+ mp := i.(*evalMap)
+ for idx, key := range mp.keys {
+ _, isConst := key.(*evalConst)
+ if !isConst {
+ return i, nil
+ }
+ _, isConst = mp.vals[idx].(*evalConst)
+ if !isConst {
+ return i, nil
+ }
+ }
+ val := mp.Eval(EmptyActivation())
+ return &evalConst{
+ id: mp.id,
+ val: val,
+ }, nil
+ }
+ return i, nil
+ }
+}
+
+// evalWatch is an Interpretable implementation that wraps the execution of a given
+// expression so that it may observe the computed value and send it to an observer.
+type evalWatch struct {
+ inst Interpretable
+ observer evalObserver
+}
+
+// ID implements the Interpretable interface method.
+func (e *evalWatch) ID() int64 {
+ return e.inst.ID()
+}
+
+// Eval implements the Interpretable interface method.
+func (e *evalWatch) Eval(ctx Activation) ref.Val {
+ val := e.inst.Eval(ctx)
+ e.observer(e.inst.ID(), val)
+ return val
+}
+
+// evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation.
+type evalExhaustiveOr struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (or *evalExhaustiveOr) ID() int64 {
+ return or.id
+}
+
+// Eval implements the Interpretable interface method.
+func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val {
+ lVal := or.lhs.Eval(ctx)
+ rVal := or.rhs.Eval(ctx)
+ lBool, lok := lVal.(types.Bool)
+ if lok && lBool == types.True {
+ return types.True
+ }
+ rBool, rok := rVal.(types.Bool)
+ if rok && rBool == types.True {
+ return types.True
+ }
+ if lok && rok {
+ return types.False
+ }
+ if types.IsUnknown(lVal) {
+ return lVal
+ }
+ if types.IsUnknown(rVal) {
+ return rVal
+ }
+ return types.ValOrErr(lVal, "no such overload")
+}
+
+// evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation.
+type evalExhaustiveAnd struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (and *evalExhaustiveAnd) ID() int64 {
+ return and.id
+}
+
+// Eval implements the Interpretable interface method.
+func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val {
+ lVal := and.lhs.Eval(ctx)
+ rVal := and.rhs.Eval(ctx)
+ lBool, lok := lVal.(types.Bool)
+ if lok && lBool == types.False {
+ return types.False
+ }
+ rBool, rok := rVal.(types.Bool)
+ if rok && rBool == types.False {
+ return types.False
+ }
+ if lok && rok {
+ return types.True
+ }
+ if types.IsUnknown(lVal) {
+ return lVal
+ }
+ if types.IsUnknown(rVal) {
+ return rVal
+ }
+ return types.ValOrErr(lVal, "no such overload")
+}
+
+// evalExhaustiveConditional is like evalConditional, but does not short-circuit argument
+// evaluation.
+type evalExhaustiveConditional struct {
+ id int64
+ expr Interpretable
+ truthy Interpretable
+ falsy Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (cond *evalExhaustiveConditional) ID() int64 {
+ return cond.id
+}
+
+// Eval implements the Interpretable interface method.
+func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val {
+ cVal := cond.expr.Eval(ctx)
+ tVal := cond.truthy.Eval(ctx)
+ fVal := cond.falsy.Eval(ctx)
+ cBool, ok := cVal.(types.Bool)
+ if !ok {
+ return types.ValOrErr(cVal, "no such overload")
+ }
+ if cBool {
+ return tVal
+ }
+ return fVal
+}
+
+// evalExhaustiveFold is like evalFold, but does not short-circuit argument evaluation.
+type evalExhaustiveFold struct {
+ id int64
+ accuVar string
+ iterVar string
+ iterRange Interpretable
+ accu Interpretable
+ cond Interpretable
+ step Interpretable
+ result Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (fold *evalExhaustiveFold) ID() int64 {
+ return fold.id
+}
+
+// Eval implements the Interpretable interface method.
+func (fold *evalExhaustiveFold) Eval(ctx Activation) ref.Val {
+ foldRange := fold.iterRange.Eval(ctx)
+ if !foldRange.Type().HasTrait(traits.IterableType) {
+ return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange)
+ }
+ // Configure the fold activation with the accumulator initial value.
+ accuCtx := varActivationPool.Get().(*varActivation)
+ accuCtx.parent = ctx
+ accuCtx.name = fold.accuVar
+ accuCtx.val = fold.accu.Eval(ctx)
+ iterCtx := varActivationPool.Get().(*varActivation)
+ iterCtx.parent = accuCtx
+ iterCtx.name = fold.iterVar
+ it := foldRange.(traits.Iterable).Iterator()
+ for it.HasNext() == types.True {
+ // Modify the iter var in the fold activation.
+ iterCtx.val = it.Next()
+
+ // Evaluate the condition, but don't terminate the loop as this is exhaustive eval!
+ fold.cond.Eval(iterCtx)
+
+ // Evalute the evaluation step into accu var.
+ accuCtx.val = fold.step.Eval(iterCtx)
+ }
+ // Compute the result.
+ res := fold.result.Eval(accuCtx)
+ varActivationPool.Put(iterCtx)
+ varActivationPool.Put(accuCtx)
+ return res
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/dispatcher.go b/vendor/github.com/google/cel-go/interpreter/dispatcher.go
new file mode 100644
index 00000000000..2c6516f24df
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/dispatcher.go
@@ -0,0 +1,100 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/interpreter/functions"
+)
+
+// Dispatcher resolves function calls to their appropriate overload.
+type Dispatcher interface {
+ // Add one or more overloads, returning an error if any Overload has the same Overload#Name.
+ Add(overloads ...*functions.Overload) error
+
+ // FindOverload returns an Overload definition matching the provided name.
+ FindOverload(overload string) (*functions.Overload, bool)
+
+ // OverloadIds returns the set of all overload identifiers configured for dispatch.
+ OverloadIds() []string
+}
+
+// NewDispatcher returns an empty Dispatcher instance.
+func NewDispatcher() Dispatcher {
+ return &defaultDispatcher{
+ overloads: make(map[string]*functions.Overload)}
+}
+
+// ExtendDispatcher returns a Dispatcher which inherits the overloads of its parent, and
+// provides an isolation layer between built-ins and extension functions which is useful
+// for forward compatibility.
+func ExtendDispatcher(parent Dispatcher) Dispatcher {
+ return &defaultDispatcher{
+ parent: parent,
+ overloads: make(map[string]*functions.Overload)}
+}
+
+// overloadMap helper type for indexing overloads by function name.
+type overloadMap map[string]*functions.Overload
+
+// defaultDispatcher struct which contains an overload map.
+type defaultDispatcher struct {
+ parent Dispatcher
+ overloads overloadMap
+}
+
+// Add implements the Dispatcher.Add interface method.
+func (d *defaultDispatcher) Add(overloads ...*functions.Overload) error {
+ for _, o := range overloads {
+ // add the overload unless an overload of the same name has already been provided.
+ if _, found := d.overloads[o.Operator]; found {
+ return fmt.Errorf("overload already exists '%s'", o.Operator)
+ }
+ // index the overload by function name.
+ d.overloads[o.Operator] = o
+ }
+ return nil
+}
+
+// FindOverload implements the Dispatcher.FindOverload interface method.
+func (d *defaultDispatcher) FindOverload(overload string) (*functions.Overload, bool) {
+ o, found := d.overloads[overload]
+ // Attempt to dispatch to an overload defined in the parent.
+ if !found && d.parent != nil {
+ return d.parent.FindOverload(overload)
+ }
+ return o, found
+}
+
+// OverloadIds implements the Dispatcher interface method.
+func (d *defaultDispatcher) OverloadIds() []string {
+ i := 0
+ overloads := make([]string, len(d.overloads), len(d.overloads))
+ for name := range d.overloads {
+ overloads[i] = name
+ i++
+ }
+ if d.parent == nil {
+ return overloads
+ }
+ parentOverloads := d.parent.OverloadIds()
+ for _, pName := range parentOverloads {
+ if _, found := d.overloads[pName]; !found {
+ overloads = append(overloads, pName)
+ }
+ }
+ return overloads
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/evalstate.go b/vendor/github.com/google/cel-go/interpreter/evalstate.go
new file mode 100644
index 00000000000..351c3e38d5e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/evalstate.go
@@ -0,0 +1,60 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/types/ref"
+)
+
+// EvalState tracks the values associated with expression ids during execution.
+type EvalState interface {
+ // Value returns the observed value of the given expression id if found, and a nil false
+ // result if not.
+ Value(int64) (ref.Val, bool)
+
+ // SetValue sets the observed value of the expression id.
+ SetValue(int64, ref.Val)
+
+ // Reset clears the previously recorded expression values.
+ Reset()
+}
+
+// evalState permits the mutation of evaluation state for a given expression id.
+type evalState struct {
+ values map[int64]ref.Val
+}
+
+// NewEvalState returns an EvalState instanced used to observe the intermediate
+// evaluations of an expression.
+func NewEvalState() EvalState {
+ return &evalState{
+ values: make(map[int64]ref.Val),
+ }
+}
+
+// Value is an implementation of the EvalState interface method.
+func (s *evalState) Value(exprID int64) (ref.Val, bool) {
+ val, found := s.values[exprID]
+ return val, found
+}
+
+// SetValue is an implementation of the EvalState interface method.
+func (s *evalState) SetValue(exprID int64, val ref.Val) {
+ s.values[exprID] = val
+}
+
+func (s *evalState) Reset() {
+ s.values = map[int64]ref.Val{}
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/functions/functions.go b/vendor/github.com/google/cel-go/interpreter/functions/functions.go
new file mode 100644
index 00000000000..4ca706e96cd
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/functions/functions.go
@@ -0,0 +1,58 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package functions defines the standard builtin functions supported by the
+// interpreter and as declared within the checker#StandardDeclarations.
+package functions
+
+import "github.com/google/cel-go/common/types/ref"
+
+// Overload defines a named overload of a function, indicating an operand trait
+// which must be present on the first argument to the overload as well as one
+// of either a unary, binary, or function implementation.
+//
+// The majority of operators within the expression language are unary or binary
+// and the specializations simplify the call contract for implementers of
+// types with operator overloads. Any added complexity is assumed to be handled
+// by the generic FunctionOp.
+type Overload struct {
+ // Operator name as written in an expression or defined within
+ // operators.go.
+ Operator string
+
+ // Operand trait used to dispatch the call. The zero-value indicates a
+ // global function overload or that one of the Unary / Binary / Function
+ // definitions should be used to execute the call.
+ OperandTrait int
+
+ // Unary defines the overload with a UnaryOp implementation. May be nil.
+ Unary UnaryOp
+
+ // Binary defines the overload with a BinaryOp implementation. May be nil.
+ Binary BinaryOp
+
+ // Function defines the overload with a FunctionOp implementation. May be
+ // nil.
+ Function FunctionOp
+}
+
+// UnaryOp is a function that takes a single value and produces an output.
+type UnaryOp func(value ref.Val) ref.Val
+
+// BinaryOp is a function that takes two values and produces an output.
+type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val
+
+// FunctionOp is a function with accepts zero or more arguments and produces
+// an value (as interface{}) or error as a result.
+type FunctionOp func(values ...ref.Val) ref.Val
diff --git a/vendor/github.com/google/cel-go/interpreter/functions/standard.go b/vendor/github.com/google/cel-go/interpreter/functions/standard.go
new file mode 100644
index 00000000000..be225364c4a
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/functions/standard.go
@@ -0,0 +1,260 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package functions
+
+import (
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// StandardOverloads returns the definitions of the built-in overloads.
+func StandardOverloads() []*Overload {
+ return []*Overload{
+ // Logical not (!a)
+ {
+ Operator: operators.LogicalNot,
+ OperandTrait: traits.NegatorType,
+ Unary: func(value ref.Val) ref.Val {
+ return value.(traits.Negater).Negate()
+ }},
+ // Not strictly false: IsBool(a) ? a : true
+ {
+ Operator: operators.NotStrictlyFalse,
+ Unary: notStrictlyFalse},
+ // Deprecated: not strictly false, may be overridden in the environment.
+ {
+ Operator: operators.OldNotStrictlyFalse,
+ Unary: notStrictlyFalse},
+
+ // Less than operator
+ {Operator: operators.Less,
+ OperandTrait: traits.ComparerType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntNegOne {
+ return types.True
+ }
+ if cmp == types.IntOne || cmp == types.IntZero {
+ return types.False
+ }
+ return cmp
+ }},
+
+ // Less than or equal operator
+ {Operator: operators.LessEquals,
+ OperandTrait: traits.ComparerType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntNegOne || cmp == types.IntZero {
+ return types.True
+ }
+ if cmp == types.IntOne {
+ return types.False
+ }
+ return cmp
+ }},
+
+ // Greater than operator
+ {Operator: operators.Greater,
+ OperandTrait: traits.ComparerType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntOne {
+ return types.True
+ }
+ if cmp == types.IntNegOne || cmp == types.IntZero {
+ return types.False
+ }
+ return cmp
+ }},
+
+ // Greater than equal operators
+ {Operator: operators.GreaterEquals,
+ OperandTrait: traits.ComparerType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ cmp := lhs.(traits.Comparer).Compare(rhs)
+ if cmp == types.IntOne || cmp == types.IntZero {
+ return types.True
+ }
+ if cmp == types.IntNegOne {
+ return types.False
+ }
+ return cmp
+ }},
+
+ // TODO: Verify overflow, NaN, underflow cases for numeric values.
+
+ // Add operator
+ {Operator: operators.Add,
+ OperandTrait: traits.AdderType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ return lhs.(traits.Adder).Add(rhs)
+ }},
+
+ // Subtract operators
+ {Operator: operators.Subtract,
+ OperandTrait: traits.SubtractorType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ return lhs.(traits.Subtractor).Subtract(rhs)
+ }},
+
+ // Multiply operator
+ {Operator: operators.Multiply,
+ OperandTrait: traits.MultiplierType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ return lhs.(traits.Multiplier).Multiply(rhs)
+ }},
+
+ // Divide operator
+ {Operator: operators.Divide,
+ OperandTrait: traits.DividerType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ return lhs.(traits.Divider).Divide(rhs)
+ }},
+
+ // Modulo operator
+ {Operator: operators.Modulo,
+ OperandTrait: traits.ModderType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ return lhs.(traits.Modder).Modulo(rhs)
+ }},
+
+ // Negate operator
+ {Operator: operators.Negate,
+ OperandTrait: traits.NegatorType,
+ Unary: func(value ref.Val) ref.Val {
+ return value.(traits.Negater).Negate()
+ }},
+
+ // Index operator
+ {Operator: operators.Index,
+ OperandTrait: traits.IndexerType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ return lhs.(traits.Indexer).Get(rhs)
+ }},
+
+ // Size function
+ {Operator: overloads.Size,
+ OperandTrait: traits.SizerType,
+ Unary: func(value ref.Val) ref.Val {
+ return value.(traits.Sizer).Size()
+ }},
+
+ // In operator
+ {Operator: operators.In, Binary: inAggregate},
+ // Deprecated: in operator, may be overridden in the environment.
+ {Operator: operators.OldIn, Binary: inAggregate},
+
+ // Matches function
+ {Operator: overloads.Matches,
+ OperandTrait: traits.MatcherType,
+ Binary: func(lhs ref.Val, rhs ref.Val) ref.Val {
+ return lhs.(traits.Matcher).Match(rhs)
+ }},
+
+ // Type conversion functions
+ // TODO: verify type conversion safety of numeric values.
+
+ // Int conversions.
+ {Operator: overloads.TypeConvertInt,
+ Unary: func(value ref.Val) ref.Val {
+ return value.ConvertToType(types.IntType)
+ }},
+
+ // Uint conversions.
+ {Operator: overloads.TypeConvertUint,
+ Unary: func(value ref.Val) ref.Val {
+ return value.ConvertToType(types.UintType)
+ }},
+
+ // Double conversions.
+ {Operator: overloads.TypeConvertDouble,
+ Unary: func(value ref.Val) ref.Val {
+ return value.ConvertToType(types.DoubleType)
+ }},
+
+ // Bool conversions.
+ {Operator: overloads.TypeConvertBool,
+ Unary: func(value ref.Val) ref.Val {
+ return value.ConvertToType(types.BoolType)
+ }},
+
+ // Bytes conversions.
+ {Operator: overloads.TypeConvertBytes,
+ Unary: func(value ref.Val) ref.Val {
+ return value.ConvertToType(types.BytesType)
+ }},
+
+ // String conversions.
+ {Operator: overloads.TypeConvertString,
+ Unary: func(value ref.Val) ref.Val {
+ return value.ConvertToType(types.StringType)
+ }},
+
+ // Timestamp conversions.
+ {Operator: overloads.TypeConvertTimestamp,
+ Unary: func(value ref.Val) ref.Val {
+ return value.ConvertToType(types.TimestampType)
+ }},
+
+ // Duration conversions.
+ {Operator: overloads.TypeConvertDuration,
+ Unary: func(value ref.Val) ref.Val {
+ return value.ConvertToType(types.DurationType)
+ }},
+
+ // Type operations.
+ {Operator: overloads.TypeConvertType,
+ Unary: func(value ref.Val) ref.Val {
+ return value.ConvertToType(types.TypeType)
+ }},
+
+ {Operator: overloads.Iterator,
+ OperandTrait: traits.IterableType,
+ Unary: func(value ref.Val) ref.Val {
+ return value.(traits.Iterable).Iterator()
+ }},
+
+ {Operator: overloads.HasNext,
+ OperandTrait: traits.IteratorType,
+ Unary: func(value ref.Val) ref.Val {
+ return value.(traits.Iterator).HasNext()
+ }},
+
+ {Operator: overloads.Next,
+ OperandTrait: traits.IteratorType,
+ Unary: func(value ref.Val) ref.Val {
+ return value.(traits.Iterator).Next()
+ }},
+ }
+
+}
+
+func notStrictlyFalse(value ref.Val) ref.Val {
+ if types.IsBool(value) {
+ return value
+ }
+ return types.True
+}
+
+func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val {
+ if rhs.Type().HasTrait(traits.ContainerType) {
+ return rhs.(traits.Container).Contains(lhs)
+ }
+ return types.ValOrErr(rhs, "no such overload")
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go
new file mode 100644
index 00000000000..3ae550d782f
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go
@@ -0,0 +1,144 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package interpreter provides functions to evaluate parsed expressions with
+// the option to augment the evaluation with inputs and functions supplied at
+// evaluation time.
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/packages"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/interpreter/functions"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Interpretable can accept a given Activation and produce a value along with
+// an accompanying EvalState which can be used to inspect whether additional
+// data might be necessary to complete the evaluation.
+type Interpretable interface {
+ // ID value corresponding to the expression node.
+ ID() int64
+
+ // Eval an Activation to produce an output.
+ Eval(activation Activation) ref.Val
+}
+
+// InterpretableDecorator is a functional interface for decorating or replacing
+// Interpretable expression nodes at construction time.
+type InterpretableDecorator func(Interpretable) (Interpretable, error)
+
+// Interpreter generates a new Interpretable from a checked or unchecked expression.
+type Interpreter interface {
+ // NewInterpretable creates an Interpretable from a checked expression and an
+ // optional list of InterpretableDecorator values.
+ NewInterpretable(checked *exprpb.CheckedExpr,
+ decorators ...InterpretableDecorator) (Interpretable, error)
+
+ // NewUncheckedInterpretable returns an Interpretable from a parsed expression
+ // and an optional list of InterpretableDecorator values.
+ NewUncheckedInterpretable(expr *exprpb.Expr,
+ decorators ...InterpretableDecorator) (Interpretable, error)
+}
+
+// TrackState decorates each expression node with an observer which records the value
+// associated with the given expression id. EvalState must be provided to the decorator.
+// This decorator is not thread-safe, and the EvalState must be reset between Eval()
+// calls.
+func TrackState(state EvalState) InterpretableDecorator {
+ observer := func(id int64, val ref.Val) {
+ state.SetValue(id, val)
+ }
+ return decObserveEval(observer)
+}
+
+// ExhaustiveEval replaces operations that short-circuit with versions that evaluate
+// expressions and couples this behavior with the TrackState() decorator to provide
+// insight into the evaluation state of the entire expression. EvalState must be
+// provided to the decorator. This decorator is not thread-safe, and the EvalState
+// must be reset between Eval() calls.
+func ExhaustiveEval(state EvalState) InterpretableDecorator {
+ ex := decDisableShortcircuits()
+ obs := TrackState(state)
+ return func(i Interpretable) (Interpretable, error) {
+ var err error
+ i, err = ex(i)
+ if err != nil {
+ return nil, err
+ }
+ return obs(i)
+ }
+}
+
+// FoldConstants will pre-compute list and map literals comprised entirely of constant entries.
+// This optimization will increase the set of constant fold operations over time.
+func FoldConstants() InterpretableDecorator {
+ return decFoldConstants()
+}
+
+type exprInterpreter struct {
+ dispatcher Dispatcher
+ packager packages.Packager
+ provider ref.TypeProvider
+ adapter ref.TypeAdapter
+}
+
+// NewInterpreter builds an Interpreter from a Dispatcher and TypeProvider which will be used
+// throughout the Eval of all Interpretable instances gerenated from it.
+func NewInterpreter(dispatcher Dispatcher, packager packages.Packager,
+ provider ref.TypeProvider,
+ adapter ref.TypeAdapter) Interpreter {
+ return &exprInterpreter{
+ dispatcher: dispatcher,
+ packager: packager,
+ provider: provider,
+ adapter: adapter}
+}
+
+// NewStandardInterpreter builds a Dispatcher and TypeProvider with support for all of the CEL
+// builtins defined in the language definition.
+func NewStandardInterpreter(packager packages.Packager, provider ref.TypeProvider,
+ adapter ref.TypeAdapter) Interpreter {
+ dispatcher := NewDispatcher()
+ dispatcher.Add(functions.StandardOverloads()...)
+ return NewInterpreter(dispatcher, packager, provider, adapter)
+}
+
+// NewIntepretable implements the Interpreter interface method.
+func (i *exprInterpreter) NewInterpretable(
+ checked *exprpb.CheckedExpr,
+ decorators ...InterpretableDecorator) (Interpretable, error) {
+ p := newPlanner(
+ i.dispatcher,
+ i.provider,
+ i.adapter,
+ i.packager,
+ checked,
+ decorators...)
+ return p.Plan(checked.GetExpr())
+}
+
+// NewUncheckedIntepretable implements the Interpreter interface method.
+func (i *exprInterpreter) NewUncheckedInterpretable(
+ expr *exprpb.Expr,
+ decorators ...InterpretableDecorator) (Interpretable, error) {
+ p := newUncheckedPlanner(
+ i.dispatcher,
+ i.provider,
+ i.adapter,
+ i.packager,
+ decorators...)
+ return p.Plan(expr)
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go
new file mode 100644
index 00000000000..e92288f309b
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/planner.go
@@ -0,0 +1,1106 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/packages"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/interpreter/functions"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value.
+type interpretablePlanner interface {
+ // Plan generates an Interpretable value (or error) from the input proto Expr.
+ Plan(expr *exprpb.Expr) (Interpretable, error)
+}
+
+// newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider,
+// TypeAdapter, Packager, and CheckedExpr value. These pieces of data are used to resolve
+// functions, types, and namespaced identifiers at plan time rather than at runtime since
+// it only needs to be done once and may be semi-expensive to compute.
+func newPlanner(disp Dispatcher,
+ provider ref.TypeProvider,
+ adapter ref.TypeAdapter,
+ pkg packages.Packager,
+ checked *exprpb.CheckedExpr,
+ decorators ...InterpretableDecorator) interpretablePlanner {
+ return &planner{
+ disp: disp,
+ provider: provider,
+ adapter: adapter,
+ pkg: pkg,
+ identMap: make(map[string]Interpretable),
+ refMap: checked.GetReferenceMap(),
+ typeMap: checked.GetTypeMap(),
+ decorators: decorators,
+ }
+}
+
+// newUncheckedPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider,
+// TypeAdapter, and Packager to resolve functions and types at plan time. Namespaces present in
+// Select expressions are resolved lazily at evaluation time.
+func newUncheckedPlanner(disp Dispatcher,
+ provider ref.TypeProvider,
+ adapter ref.TypeAdapter,
+ pkg packages.Packager,
+ decorators ...InterpretableDecorator) interpretablePlanner {
+ return &planner{
+ disp: disp,
+ provider: provider,
+ adapter: adapter,
+ pkg: pkg,
+ identMap: make(map[string]Interpretable),
+ refMap: make(map[int64]*exprpb.Reference),
+ typeMap: make(map[int64]*exprpb.Type),
+ decorators: decorators,
+ }
+}
+
+// planner is an implementatio of the interpretablePlanner interface.
+type planner struct {
+ disp Dispatcher
+ provider ref.TypeProvider
+ adapter ref.TypeAdapter
+ pkg packages.Packager
+ identMap map[string]Interpretable
+ refMap map[int64]*exprpb.Reference
+ typeMap map[int64]*exprpb.Type
+ decorators []InterpretableDecorator
+}
+
+// Plan implements the interpretablePlanner interface. This implementation of the Plan method also
+// applies decorators to each Interpretable generated as part of the overall plan. Decorators are
+// useful for layering functionality into the evaluation that is not natively understood by CEL,
+// such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of
+// repeated expressions.
+func (p *planner) Plan(expr *exprpb.Expr) (Interpretable, error) {
+ switch expr.ExprKind.(type) {
+ case *exprpb.Expr_CallExpr:
+ return p.decorate(p.planCall(expr))
+ case *exprpb.Expr_IdentExpr:
+ return p.decorate(p.planIdent(expr))
+ case *exprpb.Expr_SelectExpr:
+ return p.decorate(p.planSelect(expr))
+ case *exprpb.Expr_ListExpr:
+ return p.decorate(p.planCreateList(expr))
+ case *exprpb.Expr_StructExpr:
+ return p.decorate(p.planCreateStruct(expr))
+ case *exprpb.Expr_ComprehensionExpr:
+ return p.decorate(p.planComprehension(expr))
+ case *exprpb.Expr_ConstExpr:
+ return p.decorate(p.planConst(expr))
+ }
+ return nil, fmt.Errorf("unsupported expr: %v", expr)
+}
+
+// decorate applies the InterpretableDecorator functions to the given Interpretable.
+// Both the Interpretable and error generated by a Plan step are accepted as arguments
+// for convenience.
+func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) {
+ if err != nil {
+ return nil, err
+ }
+ for _, dec := range p.decorators {
+ i, err = dec(i)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return i, nil
+}
+
+// planIdent creates an Interpretable that resolves an identifier from an Activation.
+func (p *planner) planIdent(expr *exprpb.Expr) (Interpretable, error) {
+ ident := expr.GetIdentExpr()
+ idName := ident.Name
+ i, found := p.identMap[idName]
+ if found {
+ return i, nil
+ }
+ i = &evalIdent{
+ id: expr.Id,
+ name: idName,
+ provider: p.provider,
+ }
+ p.identMap[idName] = i
+ return i, nil
+}
+
+// planSelect creates an Interpretable with either:
+// a) selects a field from a map or proto.
+// b) creates a field presence test for a select within a has() macro.
+// c) resolves the select expression to a namespaced identifier.
+func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) {
+ sel := expr.GetSelectExpr()
+ // If the Select was marked TestOnly, this is a presence test.
+ //
+ // Note: presence tests are defined for structured (e.g. proto) and dynamic values (map, json)
+ // as follows:
+ // - True if the object field has a non-default value, e.g. obj.str != ""
+ // - True if the dynamic value has the field defined, e.g. key in map
+ //
+ // However, presence tests are not defined for qualified identifier names with primitive types.
+ // If a string named 'a.b.c' is declared in the environment and referenced within `has(a.b.c)`,
+ // it is not clear whether has should error or follow the convention defined for structured
+ // values.
+ if sel.TestOnly {
+ op, err := p.Plan(sel.GetOperand())
+ if err != nil {
+ return nil, err
+ }
+ return &evalTestOnly{
+ id: expr.Id,
+ field: types.String(sel.Field),
+ op: op,
+ }, nil
+ }
+
+ // If the Select id appears in the reference map from the CheckedExpr proto then it is either
+ // a namespaced identifier or enum value.
+ idRef, found := p.refMap[expr.Id]
+ if found {
+ idName := idRef.Name
+ // If the reference has a value, this id represents an enum.
+ if idRef.Value != nil {
+ return p.Plan(&exprpb.Expr{Id: expr.Id,
+ ExprKind: &exprpb.Expr_ConstExpr{
+ ConstExpr: idRef.Value,
+ }})
+ }
+ // If the identifier has already been encountered before, return the previous Iterable.
+ i, found := p.identMap[idName]
+ if found {
+ return i, nil
+ }
+ // Otherwise, generate an evalIdent Interpretable.
+ i = &evalIdent{
+ id: expr.Id,
+ name: idName,
+ }
+ p.identMap[idName] = i
+ return i, nil
+ }
+
+ // Lastly, create a field selection Interpretable.
+ op, err := p.Plan(sel.GetOperand())
+ if err != nil {
+ return nil, err
+ }
+ return &evalSelect{
+ id: expr.Id,
+ field: types.String(sel.Field),
+ op: op,
+ resolveID: p.idResolver(sel),
+ }, nil
+}
+
+// planCall creates a callable Interpretable while specializing for common functions and invocation
+// patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in
+// optimized Interpretable values.
+func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) {
+ call := expr.GetCallExpr()
+ fnName := call.Function
+ fnDef, _ := p.disp.FindOverload(fnName)
+ argCount := len(call.GetArgs())
+ var offset int
+ if call.Target != nil {
+ argCount++
+ offset++
+ }
+ args := make([]Interpretable, argCount, argCount)
+ if call.Target != nil {
+ arg, err := p.Plan(call.Target)
+ if err != nil {
+ return nil, err
+ }
+ args[0] = arg
+ }
+ for i, argExpr := range call.GetArgs() {
+ arg, err := p.Plan(argExpr)
+ if err != nil {
+ return nil, err
+ }
+ args[i+offset] = arg
+ }
+ var oName string
+ if oRef, found := p.refMap[expr.Id]; found &&
+ len(oRef.GetOverloadId()) == 1 {
+ oName = oRef.GetOverloadId()[0]
+ }
+
+ // Generate specialized Interpretable operators by function name if possible.
+ switch fnName {
+ case operators.LogicalAnd:
+ return p.planCallLogicalAnd(expr, args)
+ case operators.LogicalOr:
+ return p.planCallLogicalOr(expr, args)
+ case operators.Conditional:
+ return p.planCallConditional(expr, args)
+ case operators.Equals:
+ return p.planCallEqual(expr, args)
+ case operators.NotEquals:
+ return p.planCallNotEqual(expr, args)
+ }
+
+ // Otherwise, generate Interpretable calls specialized by argument count.
+ switch argCount {
+ case 0:
+ return p.planCallZero(expr, fnName, oName, fnDef)
+ case 1:
+ return p.planCallUnary(expr, fnName, oName, fnDef, args)
+ case 2:
+ return p.planCallBinary(expr, fnName, oName, fnDef, args)
+ default:
+ return p.planCallVarArgs(expr, fnName, oName, fnDef, args)
+ }
+}
+
+// planCallZero generates a zero-arity callable Interpretable.
+func (p *planner) planCallZero(expr *exprpb.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload) (Interpretable, error) {
+ if impl == nil || impl.Function == nil {
+ return nil, fmt.Errorf("no such overload: %s()", function)
+ }
+ return &evalZeroArity{
+ id: expr.Id,
+ impl: impl.Function,
+ }, nil
+}
+
+// planCallUnary generates a unary callable Interpretable.
+func (p *planner) planCallUnary(expr *exprpb.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload,
+ args []Interpretable) (Interpretable, error) {
+ var fn functions.UnaryOp
+ var trait int
+ if impl != nil {
+ if impl.Unary == nil {
+ return nil, fmt.Errorf("no such overload: %s(arg)", function)
+ }
+ fn = impl.Unary
+ trait = impl.OperandTrait
+ }
+ return &evalUnary{
+ id: expr.Id,
+ function: function,
+ overload: overload,
+ arg: args[0],
+ trait: trait,
+ impl: fn,
+ }, nil
+}
+
+// planCallBinary generates a binary callable Interpretable.
+func (p *planner) planCallBinary(expr *exprpb.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload,
+ args []Interpretable) (Interpretable, error) {
+ var fn functions.BinaryOp
+ var trait int
+ if impl != nil {
+ if impl.Binary == nil {
+ return nil, fmt.Errorf("no such overload: %s(lhs, rhs)", function)
+ }
+ fn = impl.Binary
+ trait = impl.OperandTrait
+ }
+ return &evalBinary{
+ id: expr.Id,
+ function: function,
+ overload: overload,
+ lhs: args[0],
+ rhs: args[1],
+ trait: trait,
+ impl: fn,
+ }, nil
+}
+
+// planCallVarArgs generates a variable argument callable Interpretable.
+func (p *planner) planCallVarArgs(expr *exprpb.Expr,
+ function string,
+ overload string,
+ impl *functions.Overload,
+ args []Interpretable) (Interpretable, error) {
+ var fn functions.FunctionOp
+ var trait int
+ if impl != nil {
+ if impl.Function == nil {
+ return nil, fmt.Errorf("no such overload: %s(...)", function)
+ }
+ fn = impl.Function
+ trait = impl.OperandTrait
+ }
+ return &evalVarArgs{
+ id: expr.Id,
+ function: function,
+ overload: overload,
+ args: args,
+ trait: trait,
+ impl: fn,
+ }, nil
+}
+
+// planCallEqual generates an equals (==) Interpretable.
+func (p *planner) planCallEqual(expr *exprpb.Expr,
+ args []Interpretable) (Interpretable, error) {
+ return &evalEq{
+ id: expr.Id,
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+}
+
+// planCallNotEqual generates a not equals (!=) Interpretable.
+func (p *planner) planCallNotEqual(expr *exprpb.Expr,
+ args []Interpretable) (Interpretable, error) {
+ return &evalNe{
+ id: expr.Id,
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+}
+
+// planCallLogicalAnd generates a logical and (&&) Interpretable.
+func (p *planner) planCallLogicalAnd(expr *exprpb.Expr,
+ args []Interpretable) (Interpretable, error) {
+ return &evalAnd{
+ id: expr.Id,
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+}
+
+// planCallLogicalOr generates a logical or (||) Interpretable.
+func (p *planner) planCallLogicalOr(expr *exprpb.Expr,
+ args []Interpretable) (Interpretable, error) {
+ return &evalOr{
+ id: expr.Id,
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+}
+
+// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable.
+func (p *planner) planCallConditional(expr *exprpb.Expr,
+ args []Interpretable) (Interpretable, error) {
+ return &evalConditional{
+ id: expr.Id,
+ expr: args[0],
+ truthy: args[1],
+ falsy: args[2],
+ }, nil
+}
+
+// planCreateList generates a list construction Interpretable.
+func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) {
+ list := expr.GetListExpr()
+ elems := make([]Interpretable, len(list.GetElements()), len(list.GetElements()))
+ for i, elem := range list.GetElements() {
+ elemVal, err := p.Plan(elem)
+ if err != nil {
+ return nil, err
+ }
+ elems[i] = elemVal
+ }
+ return &evalList{
+ id: expr.Id,
+ elems: elems,
+ adapter: p.adapter,
+ }, nil
+}
+
+// planCreateStruct generates a map or object construction Interpretable.
+func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) {
+ str := expr.GetStructExpr()
+ if len(str.MessageName) != 0 {
+ return p.planCreateObj(expr)
+ }
+ entries := str.GetEntries()
+ keys := make([]Interpretable, len(entries))
+ vals := make([]Interpretable, len(entries))
+ for i, entry := range entries {
+ keyVal, err := p.Plan(entry.GetMapKey())
+ if err != nil {
+ return nil, err
+ }
+ keys[i] = keyVal
+
+ valVal, err := p.Plan(entry.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ vals[i] = valVal
+ }
+ return &evalMap{
+ id: expr.Id,
+ keys: keys,
+ vals: vals,
+ adapter: p.adapter,
+ }, nil
+}
+
+// planCreateObj generates an object construction Interpretable.
+func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) {
+ obj := expr.GetStructExpr()
+ typeName := obj.MessageName
+ var defined bool
+ for _, qualifiedTypeName := range p.pkg.ResolveCandidateNames(typeName) {
+ if _, found := p.provider.FindType(qualifiedTypeName); found {
+ typeName = qualifiedTypeName
+ defined = true
+ break
+ }
+ }
+ if !defined {
+ return nil, fmt.Errorf("unknown type: %s", typeName)
+ }
+ entries := obj.GetEntries()
+ fields := make([]string, len(entries))
+ vals := make([]Interpretable, len(entries))
+ for i, entry := range entries {
+ fields[i] = entry.GetFieldKey()
+ val, err := p.Plan(entry.GetValue())
+ if err != nil {
+ return nil, err
+ }
+ vals[i] = val
+ }
+ return &evalObj{
+ id: expr.Id,
+ typeName: typeName,
+ fields: fields,
+ vals: vals,
+ provider: p.provider,
+ }, nil
+}
+
+// planComprehension generates an Interpretable fold operation.
+func (p *planner) planComprehension(expr *exprpb.Expr) (Interpretable, error) {
+ fold := expr.GetComprehensionExpr()
+ accu, err := p.Plan(fold.GetAccuInit())
+ if err != nil {
+ return nil, err
+ }
+ iterRange, err := p.Plan(fold.GetIterRange())
+ if err != nil {
+ return nil, err
+ }
+ cond, err := p.Plan(fold.GetLoopCondition())
+ if err != nil {
+ return nil, err
+ }
+ step, err := p.Plan(fold.GetLoopStep())
+ if err != nil {
+ return nil, err
+ }
+ result, err := p.Plan(fold.GetResult())
+ if err != nil {
+ return nil, err
+ }
+ return &evalFold{
+ id: expr.Id,
+ accuVar: fold.AccuVar,
+ accu: accu,
+ iterVar: fold.IterVar,
+ iterRange: iterRange,
+ cond: cond,
+ step: step,
+ result: result,
+ }, nil
+}
+
+// planConst generates a constant valued Interpretable.
+func (p *planner) planConst(expr *exprpb.Expr) (Interpretable, error) {
+ val, err := p.constValue(expr.GetConstExpr())
+ if err != nil {
+ return nil, err
+ }
+ return &evalConst{
+ id: expr.Id,
+ val: val,
+ }, nil
+}
+
+// constValue converts a proto Constant value to a ref.Val.
+func (p *planner) constValue(c *exprpb.Constant) (ref.Val, error) {
+ switch c.ConstantKind.(type) {
+ case *exprpb.Constant_BoolValue:
+ return types.Bool(c.GetBoolValue()), nil
+ case *exprpb.Constant_BytesValue:
+ return types.Bytes(c.GetBytesValue()), nil
+ case *exprpb.Constant_DoubleValue:
+ return types.Double(c.GetDoubleValue()), nil
+ case *exprpb.Constant_Int64Value:
+ return types.Int(c.GetInt64Value()), nil
+ case *exprpb.Constant_NullValue:
+ return types.Null(c.GetNullValue()), nil
+ case *exprpb.Constant_StringValue:
+ return types.String(c.GetStringValue()), nil
+ case *exprpb.Constant_Uint64Value:
+ return types.Uint(c.GetUint64Value()), nil
+ }
+ return nil, fmt.Errorf("unknown constant type: %v", c)
+}
+
+// idResolver returns a function that resolves a Select expression to an identifier or field
+// selection based on the operand being of Unknown type.
+func (p *planner) idResolver(sel *exprpb.Expr_Select) func(Activation) (ref.Val, bool) {
+ // TODO: ensure id resolution prefers the most specific identifier rather than the least
+ // specific to be consistent with the check id resolution.
+ validIdent := true
+ resolvedIdent := false
+ ident := sel.Field
+ op := sel.Operand
+ for validIdent && !resolvedIdent {
+ switch op.ExprKind.(type) {
+ case *exprpb.Expr_IdentExpr:
+ ident = op.GetIdentExpr().Name + "." + ident
+ resolvedIdent = true
+ case *exprpb.Expr_SelectExpr:
+ nested := op.GetSelectExpr()
+ ident = nested.GetField() + "." + ident
+ op = nested.Operand
+ default:
+ validIdent = false
+ }
+ }
+ return func(ctx Activation) (ref.Val, bool) {
+ for _, id := range p.pkg.ResolveCandidateNames(ident) {
+ if object, found := ctx.ResolveName(id); found {
+ return object, found
+ }
+ if typeIdent, found := p.provider.FindIdent(id); found {
+ return typeIdent, found
+ }
+ }
+ return nil, false
+ }
+}
+
+type evalIdent struct {
+ id int64
+ name string
+ provider ref.TypeProvider
+}
+
+// ID implements the Interpretable interface method.
+func (id *evalIdent) ID() int64 {
+ return id.id
+}
+
+// Eval implements the Interpretable interface method.
+func (id *evalIdent) Eval(ctx Activation) ref.Val {
+ val, found := ctx.ResolveName(id.name)
+ if found {
+ return val
+ }
+ typeVal, found := id.provider.FindIdent(id.name)
+ if found {
+ return typeVal
+ }
+ return types.Unknown{id.id}
+
+}
+
+type evalSelect struct {
+ id int64
+ op Interpretable
+ field types.String
+ resolveID func(Activation) (ref.Val, bool)
+}
+
+// ID implements the Interpretable interface method.
+func (sel *evalSelect) ID() int64 {
+ return sel.id
+}
+
+// Eval implements the Interpretable interface method.
+func (sel *evalSelect) Eval(ctx Activation) ref.Val {
+ // If the select is actually a qualified identifier return.
+ if resolve, found := sel.resolveID(ctx); found {
+ return resolve
+ }
+ // Otherwise, evaluate the operand and select the field.
+ obj := sel.op.Eval(ctx)
+ indexer, ok := obj.(traits.Indexer)
+ if !ok {
+ return types.ValOrErr(obj, "invalid type for field selection.")
+ }
+ return indexer.Get(sel.field)
+}
+
+type evalTestOnly struct {
+ id int64
+ op Interpretable
+ field types.String
+}
+
+// ID implements the Interpretable interface method.
+func (test *evalTestOnly) ID() int64 {
+ return test.id
+}
+
+// Eval implements the Interpretable interface method.
+func (test *evalTestOnly) Eval(ctx Activation) ref.Val {
+ obj := test.op.Eval(ctx)
+ tester, ok := obj.(traits.FieldTester)
+ if ok {
+ return tester.IsSet(test.field)
+ }
+ container, ok := obj.(traits.Container)
+ if ok {
+ return container.Contains(test.field)
+ }
+ return types.ValOrErr(obj, "invalid type for field selection.")
+
+}
+
+type evalConst struct {
+ id int64
+ val ref.Val
+}
+
+// ID implements the Interpretable interface method.
+func (cons *evalConst) ID() int64 {
+ return cons.id
+}
+
+// Eval implements the Interpretable interface method.
+func (cons *evalConst) Eval(ctx Activation) ref.Val {
+ return cons.val
+}
+
+type evalOr struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (or *evalOr) ID() int64 {
+ return or.id
+}
+
+// Eval implements the Interpretable interface method.
+func (or *evalOr) Eval(ctx Activation) ref.Val {
+ // short-circuit lhs.
+ lVal := or.lhs.Eval(ctx)
+ lBool, lok := lVal.(types.Bool)
+ if lok && lBool == types.True {
+ return types.True
+ }
+ // short-circuit on rhs.
+ rVal := or.rhs.Eval(ctx)
+ rBool, rok := rVal.(types.Bool)
+ if rok && rBool == types.True {
+ return types.True
+ }
+ // return if both sides are bool false.
+ if lok && rok {
+ return types.False
+ }
+ // TODO: return both values as a set if both are unknown or error.
+ // prefer left unknown to right unknown.
+ if types.IsUnknown(lVal) {
+ return lVal
+ }
+ if types.IsUnknown(rVal) {
+ return rVal
+ }
+ // if the left-hand side is non-boolean return it as the error.
+ return types.ValOrErr(lVal, "no such overload")
+}
+
+type evalAnd struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (and *evalAnd) ID() int64 {
+ return and.id
+}
+
+// Eval implements the Interpretable interface method.
+func (and *evalAnd) Eval(ctx Activation) ref.Val {
+ // short-circuit lhs.
+ lVal := and.lhs.Eval(ctx)
+ lBool, lok := lVal.(types.Bool)
+ if lok && lBool == types.False {
+ return types.False
+ }
+ // short-circuit on rhs.
+ rVal := and.rhs.Eval(ctx)
+ rBool, rok := rVal.(types.Bool)
+ if rok && rBool == types.False {
+ return types.False
+ }
+ // return if both sides are bool true.
+ if lok && rok {
+ return types.True
+ }
+ // TODO: return both values as a set if both are unknown or error.
+ // prefer left unknown to right unknown.
+ if types.IsUnknown(lVal) {
+ return lVal
+ }
+ if types.IsUnknown(rVal) {
+ return rVal
+ }
+ // if the left-hand side is non-boolean return it as the error.
+ return types.ValOrErr(lVal, "no such overload")
+}
+
+type evalConditional struct {
+ id int64
+ expr Interpretable
+ truthy Interpretable
+ falsy Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (cond *evalConditional) ID() int64 {
+ return cond.id
+}
+
+// Eval implements the Interpretable interface method.
+func (cond *evalConditional) Eval(ctx Activation) ref.Val {
+ condVal := cond.expr.Eval(ctx)
+ condBool, ok := condVal.(types.Bool)
+ if !ok {
+ return types.ValOrErr(condVal, "no such overload")
+ }
+ if condBool {
+ return cond.truthy.Eval(ctx)
+ }
+ return cond.falsy.Eval(ctx)
+}
+
+type evalEq struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (eq *evalEq) ID() int64 {
+ return eq.id
+}
+
+// Eval implements the Interpretable interface method.
+func (eq *evalEq) Eval(ctx Activation) ref.Val {
+ lVal := eq.lhs.Eval(ctx)
+ rVal := eq.rhs.Eval(ctx)
+ return lVal.Equal(rVal)
+}
+
+type evalNe struct {
+ id int64
+ lhs Interpretable
+ rhs Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (ne *evalNe) ID() int64 {
+ return ne.id
+}
+
+// Eval implements the Interpretable interface method.
+func (ne *evalNe) Eval(ctx Activation) ref.Val {
+ lVal := ne.lhs.Eval(ctx)
+ rVal := ne.rhs.Eval(ctx)
+ eqVal := lVal.Equal(rVal)
+ eqBool, ok := eqVal.(types.Bool)
+ if !ok {
+ if types.IsUnknown(eqVal) {
+ return eqVal
+ }
+ return types.NewErr("no such overload: _!=_")
+ }
+ return !eqBool
+}
+
+type evalZeroArity struct {
+ id int64
+ impl functions.FunctionOp
+}
+
+// ID implements the Interpretable interface method.
+func (zero *evalZeroArity) ID() int64 {
+ return zero.id
+}
+
+// Eval implements the Interpretable interface method.
+func (zero *evalZeroArity) Eval(ctx Activation) ref.Val {
+ return zero.impl()
+}
+
+type evalUnary struct {
+ id int64
+ function string
+ overload string
+ arg Interpretable
+ trait int
+ impl functions.UnaryOp
+}
+
+// ID implements the Interpretable interface method.
+func (un *evalUnary) ID() int64 {
+ return un.id
+}
+
+// Eval implements the Interpretable interface method.
+func (un *evalUnary) Eval(ctx Activation) ref.Val {
+ argVal := un.arg.Eval(ctx)
+ // Early return if the argument to the function is unknown or error.
+ if types.IsUnknownOrError(argVal) {
+ return argVal
+ }
+ // If the implementation is bound and the argument value has the right traits required to
+ // invoke it, then call the implementation.
+ if un.impl != nil && (un.trait == 0 || argVal.Type().HasTrait(un.trait)) {
+ return un.impl(argVal)
+ }
+ // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
+ // operand (arg0).
+ if argVal.Type().HasTrait(traits.ReceiverType) {
+ return argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{})
+ }
+ return types.NewErr("no such overload: %s", un.function)
+}
+
+type evalBinary struct {
+ id int64
+ function string
+ overload string
+ lhs Interpretable
+ rhs Interpretable
+ trait int
+ impl functions.BinaryOp
+}
+
+// ID implements the Interpretable interface method.
+func (bin *evalBinary) ID() int64 {
+ return bin.id
+}
+
+// Eval implements the Interpretable interface method.
+func (bin *evalBinary) Eval(ctx Activation) ref.Val {
+ lVal := bin.lhs.Eval(ctx)
+ rVal := bin.rhs.Eval(ctx)
+ // Early return if any argument to the function is unknown or error.
+ if types.IsUnknownOrError(lVal) {
+ return lVal
+ }
+ if types.IsUnknownOrError(rVal) {
+ return rVal
+ }
+ // If the implementation is bound and the argument value has the right traits required to
+ // invoke it, then call the implementation.
+ if bin.impl != nil && (bin.trait == 0 || lVal.Type().HasTrait(bin.trait)) {
+ return bin.impl(lVal, rVal)
+ }
+ // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
+ // operand (arg0).
+ if lVal.Type().HasTrait(traits.ReceiverType) {
+ return lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal})
+ }
+ return types.NewErr("no such overload: %s", bin.function)
+}
+
+type evalVarArgs struct {
+ id int64
+ function string
+ overload string
+ args []Interpretable
+ trait int
+ impl functions.FunctionOp
+}
+
+// ID implements the Interpretable interface method.
+func (fn *evalVarArgs) ID() int64 {
+ return fn.id
+}
+
+// Eval implements the Interpretable interface method.
+func (fn *evalVarArgs) Eval(ctx Activation) ref.Val {
+ argVals := make([]ref.Val, len(fn.args), len(fn.args))
+ // Early return if any argument to the function is unknown or error.
+ for i, arg := range fn.args {
+ argVals[i] = arg.Eval(ctx)
+ if types.IsUnknownOrError(argVals[i]) {
+ return argVals[i]
+ }
+ }
+ // If the implementation is bound and the argument value has the right traits required to
+ // invoke it, then call the implementation.
+ arg0 := argVals[0]
+ if fn.impl != nil && (fn.trait == 0 || arg0.Type().HasTrait(fn.trait)) {
+ return fn.impl(argVals...)
+ }
+ // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the
+ // operand (arg0).
+ if arg0.Type().HasTrait(traits.ReceiverType) {
+ return arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:])
+ }
+ return types.NewErr("no such overload: %s", fn.function)
+}
+
+type evalList struct {
+ id int64
+ elems []Interpretable
+ adapter ref.TypeAdapter
+}
+
+// ID implements the Interpretable interface method.
+func (l *evalList) ID() int64 {
+ return l.id
+}
+
+// Eval implements the Interpretable interface method.
+func (l *evalList) Eval(ctx Activation) ref.Val {
+ elemVals := make([]ref.Val, len(l.elems), len(l.elems))
+ // If any argument is unknown or error early terminate.
+ for i, elem := range l.elems {
+ elemVal := elem.Eval(ctx)
+ if types.IsUnknownOrError(elemVal) {
+ return elemVal
+ }
+ elemVals[i] = elemVal
+ }
+ return types.NewDynamicList(l.adapter, elemVals)
+}
+
+type evalMap struct {
+ id int64
+ keys []Interpretable
+ vals []Interpretable
+ adapter ref.TypeAdapter
+}
+
+// ID implements the Interpretable interface method.
+func (m *evalMap) ID() int64 {
+ return m.id
+}
+
+// Eval implements the Interpretable interface method.
+func (m *evalMap) Eval(ctx Activation) ref.Val {
+ entries := make(map[ref.Val]ref.Val)
+ // If any argument is unknown or error early terminate.
+ for i, key := range m.keys {
+ keyVal := key.Eval(ctx)
+ if types.IsUnknownOrError(keyVal) {
+ return keyVal
+ }
+ valVal := m.vals[i].Eval(ctx)
+ if types.IsUnknownOrError(valVal) {
+ return valVal
+ }
+ entries[keyVal] = valVal
+ }
+ return types.NewDynamicMap(m.adapter, entries)
+}
+
+type evalObj struct {
+ id int64
+ typeName string
+ fields []string
+ vals []Interpretable
+ provider ref.TypeProvider
+}
+
+// ID implements the Interpretable interface method.
+func (o *evalObj) ID() int64 {
+ return o.id
+}
+
+// Eval implements the Interpretable interface method.
+func (o *evalObj) Eval(ctx Activation) ref.Val {
+ fieldVals := make(map[string]ref.Val)
+ // If any argument is unknown or error early terminate.
+ for i, field := range o.fields {
+ val := o.vals[i].Eval(ctx)
+ if types.IsUnknownOrError(val) {
+ return val
+ }
+ fieldVals[field] = val
+ }
+ return o.provider.NewValue(o.typeName, fieldVals)
+}
+
+type evalFold struct {
+ id int64
+ accuVar string
+ iterVar string
+ iterRange Interpretable
+ accu Interpretable
+ cond Interpretable
+ step Interpretable
+ result Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (fold *evalFold) ID() int64 {
+ return fold.id
+}
+
+// Eval implements the Interpretable interface method.
+func (fold *evalFold) Eval(ctx Activation) ref.Val {
+ foldRange := fold.iterRange.Eval(ctx)
+ if !foldRange.Type().HasTrait(traits.IterableType) {
+ return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange)
+ }
+ // Configure the fold activation with the accumulator initial value.
+ accuCtx := varActivationPool.Get().(*varActivation)
+ accuCtx.parent = ctx
+ accuCtx.name = fold.accuVar
+ accuCtx.val = fold.accu.Eval(ctx)
+ iterCtx := varActivationPool.Get().(*varActivation)
+ iterCtx.parent = accuCtx
+ iterCtx.name = fold.iterVar
+ it := foldRange.(traits.Iterable).Iterator()
+ for it.HasNext() == types.True {
+ // Modify the iter var in the fold activation.
+ iterCtx.val = it.Next()
+
+ // Evaluate the condition, terminate the loop if false.
+ cond := fold.cond.Eval(iterCtx)
+ condBool, ok := cond.(types.Bool)
+ if !types.IsUnknown(cond) && ok && condBool != types.True {
+ break
+ }
+
+ // Evalute the evaluation step into accu var.
+ accuCtx.val = fold.step.Eval(iterCtx)
+ }
+ // Compute the result.
+ res := fold.result.Eval(accuCtx)
+ varActivationPool.Put(iterCtx)
+ varActivationPool.Put(accuCtx)
+ return res
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go
new file mode 100644
index 00000000000..8858dfa9bc2
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/prune.go
@@ -0,0 +1,256 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ structpb "github.com/golang/protobuf/ptypes/struct"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+type astPruner struct {
+ expr *exprpb.Expr
+ state EvalState
+}
+
+// TODO Consider having a separate walk of the AST that finds common
+// subexpressions. This can be called before or after constant folding to find
+// common subexpressions.
+
+// PruneAst prunes the given AST based on the given EvalState and generates a new AST.
+// Given AST is copied on write and a new AST is returned.
+// Couple of typical use cases this interface would be:
+//
+// A)
+// 1) Evaluate expr with some unknowns,
+// 2) If result is unknown:
+// a) PruneAst
+// b) Goto 1
+// Functional call results which are known would be effectively cached across
+// iterations.
+//
+// B)
+// 1) Compile the expression (maybe via a service and maybe after checking a
+// compiled expression does not exists in local cache)
+// 2) Prepare the environment and the interpreter. Activation might be empty.
+// 3) Eval the expression. This might return unknown or error or a concrete
+// value.
+// 4) PruneAst
+// 4) Maybe cache the expression
+// This is effectively constant folding the expression. How the environment is
+// prepared in step 2 is flexible. For example, If the caller caches the
+// compiled and constant folded expressions, but is not willing to constant
+// fold(and thus cache results of) some external calls, then they can prepare
+// the overloads accordingly.
+func PruneAst(expr *exprpb.Expr, state EvalState) *exprpb.Expr {
+ pruner := &astPruner{
+ expr: expr,
+ state: state}
+ newExpr, _ := pruner.prune(expr)
+ return newExpr
+}
+
+func (p *astPruner) createLiteral(node *exprpb.Expr, val *exprpb.Constant) *exprpb.Expr {
+ newExpr := *node
+ newExpr.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: val}
+ return &newExpr
+}
+
+func (p *astPruner) maybePruneAndOr(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ if !p.existsWithUnknownValue(node.GetId()) {
+ return nil, false
+ }
+
+ call := node.GetCallExpr()
+
+ // We know result is unknown, so we have at least one unknown arg
+ // and if one side is a known value, we know we can ignore it.
+ if p.existsWithKnownValue(call.Args[0].GetId()) {
+ return call.Args[1], true
+ }
+
+ if p.existsWithKnownValue(call.Args[1].GetId()) {
+ return call.Args[0], true
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ if !p.existsWithUnknownValue(node.GetId()) {
+ return nil, false
+ }
+
+ call := node.GetCallExpr()
+ condVal, condValueExists := p.value(call.Args[0].GetId())
+ if !condValueExists || types.IsUnknownOrError(condVal) {
+ return nil, false
+ }
+
+ if condVal.Value().(bool) {
+ return call.Args[1], true
+ }
+ return call.Args[2], true
+}
+
+func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
+ if call.Function == operators.LogicalOr || call.Function == operators.LogicalAnd {
+ return p.maybePruneAndOr(node)
+ }
+ if call.Function == operators.Conditional {
+ return p.maybePruneConditional(node)
+ }
+
+ return nil, false
+}
+
+func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ if node == nil {
+ return node, false
+ }
+ if val, valueExists := p.value(node.GetId()); valueExists && !types.IsUnknownOrError(val) {
+
+ // TODO if we have a list or struct, create a list/struct
+ // expression. This is useful especially if these expressions
+ // are result of a function call.
+
+ switch val.Type() {
+ case types.BoolType:
+ return p.createLiteral(node,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: val.Value().(bool)}}), true
+ case types.IntType:
+ return p.createLiteral(node,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: val.Value().(int64)}}), true
+ case types.UintType:
+ return p.createLiteral(node,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: val.Value().(uint64)}}), true
+ case types.StringType:
+ return p.createLiteral(node,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: val.Value().(string)}}), true
+ case types.DoubleType:
+ return p.createLiteral(node,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: val.Value().(float64)}}), true
+ case types.BytesType:
+ return p.createLiteral(node,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: val.Value().([]byte)}}), true
+ case types.NullType:
+ return p.createLiteral(node,
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: val.Value().(structpb.NullValue)}}), true
+ }
+ }
+
+ // We have either an unknown/error value, or something we dont want to
+ // transform, or expression was not evaluated. If possible, drill down
+ // more.
+
+ switch node.ExprKind.(type) {
+ case *exprpb.Expr_SelectExpr:
+ if operand, pruned := p.prune(node.GetSelectExpr().Operand); pruned {
+ newExpr := *node
+ newSelect := *newExpr.GetSelectExpr()
+ newSelect.Operand = operand
+ newExpr.GetExprKind().(*exprpb.Expr_SelectExpr).SelectExpr = &newSelect
+ return &newExpr, true
+ }
+ case *exprpb.Expr_CallExpr:
+ if newExpr, pruned := p.maybePruneFunction(node); pruned {
+ newExpr, _ = p.prune(newExpr)
+ return newExpr, true
+ }
+ newCall := *node.GetCallExpr()
+ var prunedCall bool
+ var prunedArg bool
+ for i, arg := range node.GetCallExpr().Args {
+ if newCall.Args[i], prunedArg = p.prune(arg); prunedArg {
+ prunedCall = true
+ }
+ }
+ if newTarget, prunedTarget := p.prune(node.GetCallExpr().Target); prunedTarget {
+ prunedCall = true
+ newCall.Target = newTarget
+ }
+ if prunedCall {
+ newExpr := *node
+ newExpr.GetExprKind().(*exprpb.Expr_CallExpr).CallExpr = &newCall
+ return &newExpr, true
+ }
+ case *exprpb.Expr_ListExpr:
+ newList := *node.GetListExpr()
+ var prunedList bool
+ var prunedElem bool
+ for i, elem := range node.GetListExpr().Elements {
+ if newList.Elements[i], prunedElem = p.prune(elem); prunedElem {
+ prunedList = true
+ }
+ }
+ if prunedList {
+ newExpr := *node
+ newExpr.GetExprKind().(*exprpb.Expr_ListExpr).ListExpr = &newList
+ return &newExpr, true
+ }
+ case *exprpb.Expr_StructExpr:
+ newStruct := *node.GetStructExpr()
+ var prunedStruct bool
+ var prunedEntry bool
+ for i, entry := range node.GetStructExpr().Entries {
+ newEntry := *entry
+ if newKey, pruned := p.prune(entry.GetMapKey()); pruned {
+ prunedEntry = true
+ newEntry.GetKeyKind().(*exprpb.Expr_CreateStruct_Entry_MapKey).MapKey = newKey
+ }
+ if newValue, pruned := p.prune(entry.Value); pruned {
+ prunedEntry = true
+ newEntry.Value = newValue
+ }
+ if prunedEntry {
+ prunedStruct = true
+ newStruct.Entries[i] = &newEntry
+ }
+ }
+ if prunedStruct {
+ newExpr := *node
+ newExpr.GetExprKind().(*exprpb.Expr_StructExpr).StructExpr = &newStruct
+ return &newExpr, true
+ }
+ case *exprpb.Expr_ComprehensionExpr:
+ if newIterRange, pruned := p.prune(node.GetComprehensionExpr().IterRange); pruned {
+ newExpr := *node
+ newCompre := *newExpr.GetComprehensionExpr()
+ newCompre.IterRange = newIterRange
+ newExpr.GetExprKind().(*exprpb.Expr_ComprehensionExpr).ComprehensionExpr = &newCompre
+ return &newExpr, true
+ }
+ }
+ return node, false
+}
+
+func (p *astPruner) value(id int64) (ref.Val, bool) {
+ val, found := p.state.Value(id)
+ return val, (found && val != nil)
+}
+
+func (p *astPruner) existsWithUnknownValue(id int64) bool {
+ val, valueExists := p.value(id)
+ return valueExists && types.IsUnknown(val)
+}
+
+func (p *astPruner) existsWithKnownValue(id int64) bool {
+ val, valueExists := p.value(id)
+ return valueExists && !types.IsUnknown(val)
+}
diff --git a/vendor/github.com/google/cel-go/parser/errors.go b/vendor/github.com/google/cel-go/parser/errors.go
new file mode 100644
index 00000000000..140beb9532e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/errors.go
@@ -0,0 +1,42 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+
+ "github.com/google/cel-go/common"
+)
+
+// parseErrors is a specialization of Errors.
+type parseErrors struct {
+ *common.Errors
+}
+
+func (e *parseErrors) syntaxError(l common.Location, message string) {
+ e.ReportError(l, fmt.Sprintf("Syntax error: %s", message))
+}
+
+func (e *parseErrors) invalidHasArgument(l common.Location) {
+ e.ReportError(l, "Argument to the function 'has' must be a field selection")
+}
+
+func (e *parseErrors) argumentIsNotIdent(l common.Location) {
+ e.ReportError(l, "Argument must be a simple name")
+}
+
+func (e *parseErrors) notAQualifiedName(l common.Location) {
+ e.ReportError(l, "Expected a qualified name")
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
new file mode 100644
index 00000000000..9f27b266fc7
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
@@ -0,0 +1,195 @@
+// Generated from /Users/tswadell/lace/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7.
+
+package gen // CEL
+import "github.com/antlr/antlr4/runtime/Go/antlr"
+
+// BaseCELListener is a complete listener for a parse tree produced by CELParser.
+type BaseCELListener struct{}
+
+var _ CELListener = &BaseCELListener{}
+
+// VisitTerminal is called when a terminal node is visited.
+func (s *BaseCELListener) VisitTerminal(node antlr.TerminalNode) {}
+
+// VisitErrorNode is called when an error node is visited.
+func (s *BaseCELListener) VisitErrorNode(node antlr.ErrorNode) {}
+
+// EnterEveryRule is called when any rule is entered.
+func (s *BaseCELListener) EnterEveryRule(ctx antlr.ParserRuleContext) {}
+
+// ExitEveryRule is called when any rule is exited.
+func (s *BaseCELListener) ExitEveryRule(ctx antlr.ParserRuleContext) {}
+
+// EnterStart is called when production start is entered.
+func (s *BaseCELListener) EnterStart(ctx *StartContext) {}
+
+// ExitStart is called when production start is exited.
+func (s *BaseCELListener) ExitStart(ctx *StartContext) {}
+
+// EnterExpr is called when production expr is entered.
+func (s *BaseCELListener) EnterExpr(ctx *ExprContext) {}
+
+// ExitExpr is called when production expr is exited.
+func (s *BaseCELListener) ExitExpr(ctx *ExprContext) {}
+
+// EnterConditionalOr is called when production conditionalOr is entered.
+func (s *BaseCELListener) EnterConditionalOr(ctx *ConditionalOrContext) {}
+
+// ExitConditionalOr is called when production conditionalOr is exited.
+func (s *BaseCELListener) ExitConditionalOr(ctx *ConditionalOrContext) {}
+
+// EnterConditionalAnd is called when production conditionalAnd is entered.
+func (s *BaseCELListener) EnterConditionalAnd(ctx *ConditionalAndContext) {}
+
+// ExitConditionalAnd is called when production conditionalAnd is exited.
+func (s *BaseCELListener) ExitConditionalAnd(ctx *ConditionalAndContext) {}
+
+// EnterRelation is called when production relation is entered.
+func (s *BaseCELListener) EnterRelation(ctx *RelationContext) {}
+
+// ExitRelation is called when production relation is exited.
+func (s *BaseCELListener) ExitRelation(ctx *RelationContext) {}
+
+// EnterCalc is called when production calc is entered.
+func (s *BaseCELListener) EnterCalc(ctx *CalcContext) {}
+
+// ExitCalc is called when production calc is exited.
+func (s *BaseCELListener) ExitCalc(ctx *CalcContext) {}
+
+// EnterMemberExpr is called when production MemberExpr is entered.
+func (s *BaseCELListener) EnterMemberExpr(ctx *MemberExprContext) {}
+
+// ExitMemberExpr is called when production MemberExpr is exited.
+func (s *BaseCELListener) ExitMemberExpr(ctx *MemberExprContext) {}
+
+// EnterLogicalNot is called when production LogicalNot is entered.
+func (s *BaseCELListener) EnterLogicalNot(ctx *LogicalNotContext) {}
+
+// ExitLogicalNot is called when production LogicalNot is exited.
+func (s *BaseCELListener) ExitLogicalNot(ctx *LogicalNotContext) {}
+
+// EnterNegate is called when production Negate is entered.
+func (s *BaseCELListener) EnterNegate(ctx *NegateContext) {}
+
+// ExitNegate is called when production Negate is exited.
+func (s *BaseCELListener) ExitNegate(ctx *NegateContext) {}
+
+// EnterSelectOrCall is called when production SelectOrCall is entered.
+func (s *BaseCELListener) EnterSelectOrCall(ctx *SelectOrCallContext) {}
+
+// ExitSelectOrCall is called when production SelectOrCall is exited.
+func (s *BaseCELListener) ExitSelectOrCall(ctx *SelectOrCallContext) {}
+
+// EnterPrimaryExpr is called when production PrimaryExpr is entered.
+func (s *BaseCELListener) EnterPrimaryExpr(ctx *PrimaryExprContext) {}
+
+// ExitPrimaryExpr is called when production PrimaryExpr is exited.
+func (s *BaseCELListener) ExitPrimaryExpr(ctx *PrimaryExprContext) {}
+
+// EnterIndex is called when production Index is entered.
+func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {}
+
+// ExitIndex is called when production Index is exited.
+func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {}
+
+// EnterCreateMessage is called when production CreateMessage is entered.
+func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {}
+
+// ExitCreateMessage is called when production CreateMessage is exited.
+func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {}
+
+// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered.
+func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
+
+// ExitIdentOrGlobalCall is called when production IdentOrGlobalCall is exited.
+func (s *BaseCELListener) ExitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
+
+// EnterNested is called when production Nested is entered.
+func (s *BaseCELListener) EnterNested(ctx *NestedContext) {}
+
+// ExitNested is called when production Nested is exited.
+func (s *BaseCELListener) ExitNested(ctx *NestedContext) {}
+
+// EnterCreateList is called when production CreateList is entered.
+func (s *BaseCELListener) EnterCreateList(ctx *CreateListContext) {}
+
+// ExitCreateList is called when production CreateList is exited.
+func (s *BaseCELListener) ExitCreateList(ctx *CreateListContext) {}
+
+// EnterCreateStruct is called when production CreateStruct is entered.
+func (s *BaseCELListener) EnterCreateStruct(ctx *CreateStructContext) {}
+
+// ExitCreateStruct is called when production CreateStruct is exited.
+func (s *BaseCELListener) ExitCreateStruct(ctx *CreateStructContext) {}
+
+// EnterConstantLiteral is called when production ConstantLiteral is entered.
+func (s *BaseCELListener) EnterConstantLiteral(ctx *ConstantLiteralContext) {}
+
+// ExitConstantLiteral is called when production ConstantLiteral is exited.
+func (s *BaseCELListener) ExitConstantLiteral(ctx *ConstantLiteralContext) {}
+
+// EnterExprList is called when production exprList is entered.
+func (s *BaseCELListener) EnterExprList(ctx *ExprListContext) {}
+
+// ExitExprList is called when production exprList is exited.
+func (s *BaseCELListener) ExitExprList(ctx *ExprListContext) {}
+
+// EnterFieldInitializerList is called when production fieldInitializerList is entered.
+func (s *BaseCELListener) EnterFieldInitializerList(ctx *FieldInitializerListContext) {}
+
+// ExitFieldInitializerList is called when production fieldInitializerList is exited.
+func (s *BaseCELListener) ExitFieldInitializerList(ctx *FieldInitializerListContext) {}
+
+// EnterMapInitializerList is called when production mapInitializerList is entered.
+func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext) {}
+
+// ExitMapInitializerList is called when production mapInitializerList is exited.
+func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {}
+
+// EnterInt is called when production Int is entered.
+func (s *BaseCELListener) EnterInt(ctx *IntContext) {}
+
+// ExitInt is called when production Int is exited.
+func (s *BaseCELListener) ExitInt(ctx *IntContext) {}
+
+// EnterUint is called when production Uint is entered.
+func (s *BaseCELListener) EnterUint(ctx *UintContext) {}
+
+// ExitUint is called when production Uint is exited.
+func (s *BaseCELListener) ExitUint(ctx *UintContext) {}
+
+// EnterDouble is called when production Double is entered.
+func (s *BaseCELListener) EnterDouble(ctx *DoubleContext) {}
+
+// ExitDouble is called when production Double is exited.
+func (s *BaseCELListener) ExitDouble(ctx *DoubleContext) {}
+
+// EnterString is called when production String is entered.
+func (s *BaseCELListener) EnterString(ctx *StringContext) {}
+
+// ExitString is called when production String is exited.
+func (s *BaseCELListener) ExitString(ctx *StringContext) {}
+
+// EnterBytes is called when production Bytes is entered.
+func (s *BaseCELListener) EnterBytes(ctx *BytesContext) {}
+
+// ExitBytes is called when production Bytes is exited.
+func (s *BaseCELListener) ExitBytes(ctx *BytesContext) {}
+
+// EnterBoolTrue is called when production BoolTrue is entered.
+func (s *BaseCELListener) EnterBoolTrue(ctx *BoolTrueContext) {}
+
+// ExitBoolTrue is called when production BoolTrue is exited.
+func (s *BaseCELListener) ExitBoolTrue(ctx *BoolTrueContext) {}
+
+// EnterBoolFalse is called when production BoolFalse is entered.
+func (s *BaseCELListener) EnterBoolFalse(ctx *BoolFalseContext) {}
+
+// ExitBoolFalse is called when production BoolFalse is exited.
+func (s *BaseCELListener) ExitBoolFalse(ctx *BoolFalseContext) {}
+
+// EnterNull is called when production Null is entered.
+func (s *BaseCELListener) EnterNull(ctx *NullContext) {}
+
+// ExitNull is called when production Null is exited.
+func (s *BaseCELListener) ExitNull(ctx *NullContext) {}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
new file mode 100644
index 00000000000..e4f32e619b3
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
@@ -0,0 +1,124 @@
+// Generated from /Users/tswadell/lace/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7.
+
+package gen // CEL
+import "github.com/antlr/antlr4/runtime/Go/antlr"
+
+type BaseCELVisitor struct {
+ *antlr.BaseParseTreeVisitor
+}
+
+func (v *BaseCELVisitor) VisitStart(ctx *StartContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitExpr(ctx *ExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitConditionalOr(ctx *ConditionalOrContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitConditionalAnd(ctx *ConditionalAndContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitRelation(ctx *RelationContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCalc(ctx *CalcContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitMemberExpr(ctx *MemberExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitLogicalNot(ctx *LogicalNotContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitNegate(ctx *NegateContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitSelectOrCall(ctx *SelectOrCallContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitNested(ctx *NestedContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCreateList(ctx *CreateListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitCreateStruct(ctx *CreateStructContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitExprList(ctx *ExprListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitInt(ctx *IntContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitUint(ctx *UintContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitDouble(ctx *DoubleContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitString(ctx *StringContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitBytes(ctx *BytesContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitBoolTrue(ctx *BoolTrueContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitBoolFalse(ctx *BoolFalseContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCELVisitor) VisitNull(ctx *NullContext) interface{} {
+ return v.VisitChildren(ctx)
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
new file mode 100644
index 00000000000..e474a5b9136
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
@@ -0,0 +1,319 @@
+// Generated from /Users/tswadell/lace/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7.
+
+package gen
+
+import (
+ "fmt"
+ "unicode"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr"
+)
+
+// Suppress unused import error
+var _ = fmt.Printf
+var _ = unicode.IsLetter
+
+var serializedLexerAtn = []uint16{
+ 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 38, 425,
+ 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7,
+ 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12,
+ 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4,
+ 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23,
+ 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9,
+ 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33,
+ 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4,
+ 39, 9, 39, 4, 40, 9, 40, 4, 41, 9, 41, 4, 42, 9, 42, 4, 43, 9, 43, 4, 44,
+ 9, 44, 4, 45, 9, 45, 4, 46, 9, 46, 4, 47, 9, 47, 4, 48, 9, 48, 3, 2, 3,
+ 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3,
+ 6, 3, 7, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10,
+ 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3,
+ 16, 3, 16, 3, 17, 3, 17, 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21,
+ 3, 21, 3, 22, 3, 22, 3, 23, 3, 23, 3, 24, 3, 24, 3, 25, 3, 25, 3, 26, 3,
+ 26, 3, 27, 3, 27, 3, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 28, 3, 28,
+ 3, 28, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3,
+ 32, 3, 32, 3, 33, 3, 33, 5, 33, 179, 10, 33, 3, 33, 6, 33, 182, 10, 33,
+ 13, 33, 14, 33, 183, 3, 34, 3, 34, 3, 35, 3, 35, 3, 36, 3, 36, 3, 36, 3,
+ 36, 5, 36, 194, 10, 36, 3, 37, 3, 37, 3, 37, 3, 38, 3, 38, 3, 38, 3, 38,
+ 3, 38, 3, 39, 3, 39, 3, 39, 3, 39, 3, 39, 3, 40, 3, 40, 3, 40, 3, 40, 3,
+ 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40, 3, 40,
+ 3, 40, 3, 40, 3, 40, 5, 40, 227, 10, 40, 3, 41, 6, 41, 230, 10, 41, 13,
+ 41, 14, 41, 231, 3, 41, 3, 41, 3, 42, 3, 42, 3, 42, 3, 42, 7, 42, 240,
+ 10, 42, 12, 42, 14, 42, 243, 11, 42, 3, 42, 3, 42, 3, 43, 6, 43, 248, 10,
+ 43, 13, 43, 14, 43, 249, 3, 43, 3, 43, 6, 43, 254, 10, 43, 13, 43, 14,
+ 43, 255, 3, 43, 5, 43, 259, 10, 43, 3, 43, 6, 43, 262, 10, 43, 13, 43,
+ 14, 43, 263, 3, 43, 3, 43, 3, 43, 3, 43, 6, 43, 270, 10, 43, 13, 43, 14,
+ 43, 271, 3, 43, 5, 43, 275, 10, 43, 5, 43, 277, 10, 43, 3, 44, 6, 44, 280,
+ 10, 44, 13, 44, 14, 44, 281, 3, 44, 3, 44, 3, 44, 3, 44, 6, 44, 288, 10,
+ 44, 13, 44, 14, 44, 289, 5, 44, 292, 10, 44, 3, 45, 6, 45, 295, 10, 45,
+ 13, 45, 14, 45, 296, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 3, 45, 6, 45, 305,
+ 10, 45, 13, 45, 14, 45, 306, 3, 45, 3, 45, 5, 45, 311, 10, 45, 3, 46, 3,
+ 46, 3, 46, 7, 46, 316, 10, 46, 12, 46, 14, 46, 319, 11, 46, 3, 46, 3, 46,
+ 3, 46, 3, 46, 7, 46, 325, 10, 46, 12, 46, 14, 46, 328, 11, 46, 3, 46, 3,
+ 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 7, 46, 337, 10, 46, 12, 46, 14,
+ 46, 340, 11, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46,
+ 3, 46, 7, 46, 351, 10, 46, 12, 46, 14, 46, 354, 11, 46, 3, 46, 3, 46, 3,
+ 46, 3, 46, 3, 46, 3, 46, 7, 46, 362, 10, 46, 12, 46, 14, 46, 365, 11, 46,
+ 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 7, 46, 372, 10, 46, 12, 46, 14, 46,
+ 375, 11, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 7,
+ 46, 385, 10, 46, 12, 46, 14, 46, 388, 11, 46, 3, 46, 3, 46, 3, 46, 3, 46,
+ 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 3, 46, 7, 46, 400, 10, 46, 12, 46, 14,
+ 46, 403, 11, 46, 3, 46, 3, 46, 3, 46, 3, 46, 5, 46, 409, 10, 46, 3, 47,
+ 3, 47, 3, 47, 3, 48, 3, 48, 5, 48, 416, 10, 48, 3, 48, 3, 48, 3, 48, 7,
+ 48, 421, 10, 48, 12, 48, 14, 48, 424, 11, 48, 6, 338, 352, 386, 401, 2,
+ 49, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12,
+ 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21,
+ 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30,
+ 59, 2, 61, 2, 63, 2, 65, 2, 67, 2, 69, 2, 71, 2, 73, 2, 75, 2, 77, 2, 79,
+ 2, 81, 31, 83, 32, 85, 33, 87, 34, 89, 35, 91, 36, 93, 37, 95, 38, 3, 2,
+ 18, 4, 2, 67, 92, 99, 124, 4, 2, 71, 71, 103, 103, 4, 2, 45, 45, 47, 47,
+ 5, 2, 50, 59, 67, 72, 99, 104, 4, 2, 84, 84, 116, 116, 12, 2, 36, 36, 41,
+ 41, 65, 65, 94, 94, 98, 100, 104, 104, 112, 112, 116, 116, 118, 118, 120,
+ 120, 4, 2, 90, 90, 122, 122, 5, 2, 11, 12, 14, 15, 34, 34, 3, 2, 12, 12,
+ 4, 2, 87, 87, 119, 119, 6, 2, 12, 12, 15, 15, 36, 36, 94, 94, 6, 2, 12,
+ 12, 15, 15, 41, 41, 94, 94, 3, 2, 94, 94, 5, 2, 12, 12, 15, 15, 36, 36,
+ 5, 2, 12, 12, 15, 15, 41, 41, 4, 2, 68, 68, 100, 100, 2, 458, 2, 3, 3,
+ 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3,
+ 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19,
+ 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2,
+ 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2,
+ 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2,
+ 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2,
+ 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3,
+ 2, 2, 2, 2, 81, 3, 2, 2, 2, 2, 83, 3, 2, 2, 2, 2, 85, 3, 2, 2, 2, 2, 87,
+ 3, 2, 2, 2, 2, 89, 3, 2, 2, 2, 2, 91, 3, 2, 2, 2, 2, 93, 3, 2, 2, 2, 2,
+ 95, 3, 2, 2, 2, 3, 97, 3, 2, 2, 2, 5, 100, 3, 2, 2, 2, 7, 103, 3, 2, 2,
+ 2, 9, 106, 3, 2, 2, 2, 11, 108, 3, 2, 2, 2, 13, 111, 3, 2, 2, 2, 15, 114,
+ 3, 2, 2, 2, 17, 116, 3, 2, 2, 2, 19, 119, 3, 2, 2, 2, 21, 122, 3, 2, 2,
+ 2, 23, 124, 3, 2, 2, 2, 25, 126, 3, 2, 2, 2, 27, 128, 3, 2, 2, 2, 29, 130,
+ 3, 2, 2, 2, 31, 132, 3, 2, 2, 2, 33, 134, 3, 2, 2, 2, 35, 136, 3, 2, 2,
+ 2, 37, 138, 3, 2, 2, 2, 39, 140, 3, 2, 2, 2, 41, 142, 3, 2, 2, 2, 43, 144,
+ 3, 2, 2, 2, 45, 146, 3, 2, 2, 2, 47, 148, 3, 2, 2, 2, 49, 150, 3, 2, 2,
+ 2, 51, 152, 3, 2, 2, 2, 53, 154, 3, 2, 2, 2, 55, 159, 3, 2, 2, 2, 57, 165,
+ 3, 2, 2, 2, 59, 170, 3, 2, 2, 2, 61, 172, 3, 2, 2, 2, 63, 174, 3, 2, 2,
+ 2, 65, 176, 3, 2, 2, 2, 67, 185, 3, 2, 2, 2, 69, 187, 3, 2, 2, 2, 71, 193,
+ 3, 2, 2, 2, 73, 195, 3, 2, 2, 2, 75, 198, 3, 2, 2, 2, 77, 203, 3, 2, 2,
+ 2, 79, 226, 3, 2, 2, 2, 81, 229, 3, 2, 2, 2, 83, 235, 3, 2, 2, 2, 85, 276,
+ 3, 2, 2, 2, 87, 291, 3, 2, 2, 2, 89, 310, 3, 2, 2, 2, 91, 408, 3, 2, 2,
+ 2, 93, 410, 3, 2, 2, 2, 95, 415, 3, 2, 2, 2, 97, 98, 7, 107, 2, 2, 98,
+ 99, 7, 112, 2, 2, 99, 4, 3, 2, 2, 2, 100, 101, 7, 63, 2, 2, 101, 102, 7,
+ 63, 2, 2, 102, 6, 3, 2, 2, 2, 103, 104, 7, 35, 2, 2, 104, 105, 7, 63, 2,
+ 2, 105, 8, 3, 2, 2, 2, 106, 107, 7, 62, 2, 2, 107, 10, 3, 2, 2, 2, 108,
+ 109, 7, 62, 2, 2, 109, 110, 7, 63, 2, 2, 110, 12, 3, 2, 2, 2, 111, 112,
+ 7, 64, 2, 2, 112, 113, 7, 63, 2, 2, 113, 14, 3, 2, 2, 2, 114, 115, 7, 64,
+ 2, 2, 115, 16, 3, 2, 2, 2, 116, 117, 7, 40, 2, 2, 117, 118, 7, 40, 2, 2,
+ 118, 18, 3, 2, 2, 2, 119, 120, 7, 126, 2, 2, 120, 121, 7, 126, 2, 2, 121,
+ 20, 3, 2, 2, 2, 122, 123, 7, 93, 2, 2, 123, 22, 3, 2, 2, 2, 124, 125, 7,
+ 95, 2, 2, 125, 24, 3, 2, 2, 2, 126, 127, 7, 125, 2, 2, 127, 26, 3, 2, 2,
+ 2, 128, 129, 7, 127, 2, 2, 129, 28, 3, 2, 2, 2, 130, 131, 7, 42, 2, 2,
+ 131, 30, 3, 2, 2, 2, 132, 133, 7, 43, 2, 2, 133, 32, 3, 2, 2, 2, 134, 135,
+ 7, 48, 2, 2, 135, 34, 3, 2, 2, 2, 136, 137, 7, 46, 2, 2, 137, 36, 3, 2,
+ 2, 2, 138, 139, 7, 47, 2, 2, 139, 38, 3, 2, 2, 2, 140, 141, 7, 35, 2, 2,
+ 141, 40, 3, 2, 2, 2, 142, 143, 7, 65, 2, 2, 143, 42, 3, 2, 2, 2, 144, 145,
+ 7, 60, 2, 2, 145, 44, 3, 2, 2, 2, 146, 147, 7, 45, 2, 2, 147, 46, 3, 2,
+ 2, 2, 148, 149, 7, 44, 2, 2, 149, 48, 3, 2, 2, 2, 150, 151, 7, 49, 2, 2,
+ 151, 50, 3, 2, 2, 2, 152, 153, 7, 39, 2, 2, 153, 52, 3, 2, 2, 2, 154, 155,
+ 7, 118, 2, 2, 155, 156, 7, 116, 2, 2, 156, 157, 7, 119, 2, 2, 157, 158,
+ 7, 103, 2, 2, 158, 54, 3, 2, 2, 2, 159, 160, 7, 104, 2, 2, 160, 161, 7,
+ 99, 2, 2, 161, 162, 7, 110, 2, 2, 162, 163, 7, 117, 2, 2, 163, 164, 7,
+ 103, 2, 2, 164, 56, 3, 2, 2, 2, 165, 166, 7, 112, 2, 2, 166, 167, 7, 119,
+ 2, 2, 167, 168, 7, 110, 2, 2, 168, 169, 7, 110, 2, 2, 169, 58, 3, 2, 2,
+ 2, 170, 171, 7, 94, 2, 2, 171, 60, 3, 2, 2, 2, 172, 173, 9, 2, 2, 2, 173,
+ 62, 3, 2, 2, 2, 174, 175, 4, 50, 59, 2, 175, 64, 3, 2, 2, 2, 176, 178,
+ 9, 3, 2, 2, 177, 179, 9, 4, 2, 2, 178, 177, 3, 2, 2, 2, 178, 179, 3, 2,
+ 2, 2, 179, 181, 3, 2, 2, 2, 180, 182, 5, 63, 32, 2, 181, 180, 3, 2, 2,
+ 2, 182, 183, 3, 2, 2, 2, 183, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184,
+ 66, 3, 2, 2, 2, 185, 186, 9, 5, 2, 2, 186, 68, 3, 2, 2, 2, 187, 188, 9,
+ 6, 2, 2, 188, 70, 3, 2, 2, 2, 189, 194, 5, 73, 37, 2, 190, 194, 5, 77,
+ 39, 2, 191, 194, 5, 79, 40, 2, 192, 194, 5, 75, 38, 2, 193, 189, 3, 2,
+ 2, 2, 193, 190, 3, 2, 2, 2, 193, 191, 3, 2, 2, 2, 193, 192, 3, 2, 2, 2,
+ 194, 72, 3, 2, 2, 2, 195, 196, 5, 59, 30, 2, 196, 197, 9, 7, 2, 2, 197,
+ 74, 3, 2, 2, 2, 198, 199, 5, 59, 30, 2, 199, 200, 4, 50, 53, 2, 200, 201,
+ 4, 50, 57, 2, 201, 202, 4, 50, 57, 2, 202, 76, 3, 2, 2, 2, 203, 204, 5,
+ 59, 30, 2, 204, 205, 9, 8, 2, 2, 205, 206, 5, 67, 34, 2, 206, 207, 5, 67,
+ 34, 2, 207, 78, 3, 2, 2, 2, 208, 209, 5, 59, 30, 2, 209, 210, 7, 119, 2,
+ 2, 210, 211, 5, 67, 34, 2, 211, 212, 5, 67, 34, 2, 212, 213, 5, 67, 34,
+ 2, 213, 214, 5, 67, 34, 2, 214, 227, 3, 2, 2, 2, 215, 216, 5, 59, 30, 2,
+ 216, 217, 7, 87, 2, 2, 217, 218, 5, 67, 34, 2, 218, 219, 5, 67, 34, 2,
+ 219, 220, 5, 67, 34, 2, 220, 221, 5, 67, 34, 2, 221, 222, 5, 67, 34, 2,
+ 222, 223, 5, 67, 34, 2, 223, 224, 5, 67, 34, 2, 224, 225, 5, 67, 34, 2,
+ 225, 227, 3, 2, 2, 2, 226, 208, 3, 2, 2, 2, 226, 215, 3, 2, 2, 2, 227,
+ 80, 3, 2, 2, 2, 228, 230, 9, 9, 2, 2, 229, 228, 3, 2, 2, 2, 230, 231, 3,
+ 2, 2, 2, 231, 229, 3, 2, 2, 2, 231, 232, 3, 2, 2, 2, 232, 233, 3, 2, 2,
+ 2, 233, 234, 8, 41, 2, 2, 234, 82, 3, 2, 2, 2, 235, 236, 7, 49, 2, 2, 236,
+ 237, 7, 49, 2, 2, 237, 241, 3, 2, 2, 2, 238, 240, 10, 10, 2, 2, 239, 238,
+ 3, 2, 2, 2, 240, 243, 3, 2, 2, 2, 241, 239, 3, 2, 2, 2, 241, 242, 3, 2,
+ 2, 2, 242, 244, 3, 2, 2, 2, 243, 241, 3, 2, 2, 2, 244, 245, 8, 42, 2, 2,
+ 245, 84, 3, 2, 2, 2, 246, 248, 5, 63, 32, 2, 247, 246, 3, 2, 2, 2, 248,
+ 249, 3, 2, 2, 2, 249, 247, 3, 2, 2, 2, 249, 250, 3, 2, 2, 2, 250, 251,
+ 3, 2, 2, 2, 251, 253, 7, 48, 2, 2, 252, 254, 5, 63, 32, 2, 253, 252, 3,
+ 2, 2, 2, 254, 255, 3, 2, 2, 2, 255, 253, 3, 2, 2, 2, 255, 256, 3, 2, 2,
+ 2, 256, 258, 3, 2, 2, 2, 257, 259, 5, 65, 33, 2, 258, 257, 3, 2, 2, 2,
+ 258, 259, 3, 2, 2, 2, 259, 277, 3, 2, 2, 2, 260, 262, 5, 63, 32, 2, 261,
+ 260, 3, 2, 2, 2, 262, 263, 3, 2, 2, 2, 263, 261, 3, 2, 2, 2, 263, 264,
+ 3, 2, 2, 2, 264, 265, 3, 2, 2, 2, 265, 266, 5, 65, 33, 2, 266, 277, 3,
+ 2, 2, 2, 267, 269, 7, 48, 2, 2, 268, 270, 5, 63, 32, 2, 269, 268, 3, 2,
+ 2, 2, 270, 271, 3, 2, 2, 2, 271, 269, 3, 2, 2, 2, 271, 272, 3, 2, 2, 2,
+ 272, 274, 3, 2, 2, 2, 273, 275, 5, 65, 33, 2, 274, 273, 3, 2, 2, 2, 274,
+ 275, 3, 2, 2, 2, 275, 277, 3, 2, 2, 2, 276, 247, 3, 2, 2, 2, 276, 261,
+ 3, 2, 2, 2, 276, 267, 3, 2, 2, 2, 277, 86, 3, 2, 2, 2, 278, 280, 5, 63,
+ 32, 2, 279, 278, 3, 2, 2, 2, 280, 281, 3, 2, 2, 2, 281, 279, 3, 2, 2, 2,
+ 281, 282, 3, 2, 2, 2, 282, 292, 3, 2, 2, 2, 283, 284, 7, 50, 2, 2, 284,
+ 285, 7, 122, 2, 2, 285, 287, 3, 2, 2, 2, 286, 288, 5, 67, 34, 2, 287, 286,
+ 3, 2, 2, 2, 288, 289, 3, 2, 2, 2, 289, 287, 3, 2, 2, 2, 289, 290, 3, 2,
+ 2, 2, 290, 292, 3, 2, 2, 2, 291, 279, 3, 2, 2, 2, 291, 283, 3, 2, 2, 2,
+ 292, 88, 3, 2, 2, 2, 293, 295, 5, 63, 32, 2, 294, 293, 3, 2, 2, 2, 295,
+ 296, 3, 2, 2, 2, 296, 294, 3, 2, 2, 2, 296, 297, 3, 2, 2, 2, 297, 298,
+ 3, 2, 2, 2, 298, 299, 9, 11, 2, 2, 299, 311, 3, 2, 2, 2, 300, 301, 7, 50,
+ 2, 2, 301, 302, 7, 122, 2, 2, 302, 304, 3, 2, 2, 2, 303, 305, 5, 67, 34,
+ 2, 304, 303, 3, 2, 2, 2, 305, 306, 3, 2, 2, 2, 306, 304, 3, 2, 2, 2, 306,
+ 307, 3, 2, 2, 2, 307, 308, 3, 2, 2, 2, 308, 309, 9, 11, 2, 2, 309, 311,
+ 3, 2, 2, 2, 310, 294, 3, 2, 2, 2, 310, 300, 3, 2, 2, 2, 311, 90, 3, 2,
+ 2, 2, 312, 317, 7, 36, 2, 2, 313, 316, 5, 71, 36, 2, 314, 316, 10, 12,
+ 2, 2, 315, 313, 3, 2, 2, 2, 315, 314, 3, 2, 2, 2, 316, 319, 3, 2, 2, 2,
+ 317, 315, 3, 2, 2, 2, 317, 318, 3, 2, 2, 2, 318, 320, 3, 2, 2, 2, 319,
+ 317, 3, 2, 2, 2, 320, 409, 7, 36, 2, 2, 321, 326, 7, 41, 2, 2, 322, 325,
+ 5, 71, 36, 2, 323, 325, 10, 13, 2, 2, 324, 322, 3, 2, 2, 2, 324, 323, 3,
+ 2, 2, 2, 325, 328, 3, 2, 2, 2, 326, 324, 3, 2, 2, 2, 326, 327, 3, 2, 2,
+ 2, 327, 329, 3, 2, 2, 2, 328, 326, 3, 2, 2, 2, 329, 409, 7, 41, 2, 2, 330,
+ 331, 7, 36, 2, 2, 331, 332, 7, 36, 2, 2, 332, 333, 7, 36, 2, 2, 333, 338,
+ 3, 2, 2, 2, 334, 337, 5, 71, 36, 2, 335, 337, 10, 14, 2, 2, 336, 334, 3,
+ 2, 2, 2, 336, 335, 3, 2, 2, 2, 337, 340, 3, 2, 2, 2, 338, 339, 3, 2, 2,
+ 2, 338, 336, 3, 2, 2, 2, 339, 341, 3, 2, 2, 2, 340, 338, 3, 2, 2, 2, 341,
+ 342, 7, 36, 2, 2, 342, 343, 7, 36, 2, 2, 343, 409, 7, 36, 2, 2, 344, 345,
+ 7, 41, 2, 2, 345, 346, 7, 41, 2, 2, 346, 347, 7, 41, 2, 2, 347, 352, 3,
+ 2, 2, 2, 348, 351, 5, 71, 36, 2, 349, 351, 10, 14, 2, 2, 350, 348, 3, 2,
+ 2, 2, 350, 349, 3, 2, 2, 2, 351, 354, 3, 2, 2, 2, 352, 353, 3, 2, 2, 2,
+ 352, 350, 3, 2, 2, 2, 353, 355, 3, 2, 2, 2, 354, 352, 3, 2, 2, 2, 355,
+ 356, 7, 41, 2, 2, 356, 357, 7, 41, 2, 2, 357, 409, 7, 41, 2, 2, 358, 359,
+ 5, 69, 35, 2, 359, 363, 7, 36, 2, 2, 360, 362, 10, 15, 2, 2, 361, 360,
+ 3, 2, 2, 2, 362, 365, 3, 2, 2, 2, 363, 361, 3, 2, 2, 2, 363, 364, 3, 2,
+ 2, 2, 364, 366, 3, 2, 2, 2, 365, 363, 3, 2, 2, 2, 366, 367, 7, 36, 2, 2,
+ 367, 409, 3, 2, 2, 2, 368, 369, 5, 69, 35, 2, 369, 373, 7, 41, 2, 2, 370,
+ 372, 10, 16, 2, 2, 371, 370, 3, 2, 2, 2, 372, 375, 3, 2, 2, 2, 373, 371,
+ 3, 2, 2, 2, 373, 374, 3, 2, 2, 2, 374, 376, 3, 2, 2, 2, 375, 373, 3, 2,
+ 2, 2, 376, 377, 7, 41, 2, 2, 377, 409, 3, 2, 2, 2, 378, 379, 5, 69, 35,
+ 2, 379, 380, 7, 36, 2, 2, 380, 381, 7, 36, 2, 2, 381, 382, 7, 36, 2, 2,
+ 382, 386, 3, 2, 2, 2, 383, 385, 11, 2, 2, 2, 384, 383, 3, 2, 2, 2, 385,
+ 388, 3, 2, 2, 2, 386, 387, 3, 2, 2, 2, 386, 384, 3, 2, 2, 2, 387, 389,
+ 3, 2, 2, 2, 388, 386, 3, 2, 2, 2, 389, 390, 7, 36, 2, 2, 390, 391, 7, 36,
+ 2, 2, 391, 392, 7, 36, 2, 2, 392, 409, 3, 2, 2, 2, 393, 394, 5, 69, 35,
+ 2, 394, 395, 7, 41, 2, 2, 395, 396, 7, 41, 2, 2, 396, 397, 7, 41, 2, 2,
+ 397, 401, 3, 2, 2, 2, 398, 400, 11, 2, 2, 2, 399, 398, 3, 2, 2, 2, 400,
+ 403, 3, 2, 2, 2, 401, 402, 3, 2, 2, 2, 401, 399, 3, 2, 2, 2, 402, 404,
+ 3, 2, 2, 2, 403, 401, 3, 2, 2, 2, 404, 405, 7, 41, 2, 2, 405, 406, 7, 41,
+ 2, 2, 406, 407, 7, 41, 2, 2, 407, 409, 3, 2, 2, 2, 408, 312, 3, 2, 2, 2,
+ 408, 321, 3, 2, 2, 2, 408, 330, 3, 2, 2, 2, 408, 344, 3, 2, 2, 2, 408,
+ 358, 3, 2, 2, 2, 408, 368, 3, 2, 2, 2, 408, 378, 3, 2, 2, 2, 408, 393,
+ 3, 2, 2, 2, 409, 92, 3, 2, 2, 2, 410, 411, 9, 17, 2, 2, 411, 412, 5, 91,
+ 46, 2, 412, 94, 3, 2, 2, 2, 413, 416, 5, 61, 31, 2, 414, 416, 7, 97, 2,
+ 2, 415, 413, 3, 2, 2, 2, 415, 414, 3, 2, 2, 2, 416, 422, 3, 2, 2, 2, 417,
+ 421, 5, 61, 31, 2, 418, 421, 5, 63, 32, 2, 419, 421, 7, 97, 2, 2, 420,
+ 417, 3, 2, 2, 2, 420, 418, 3, 2, 2, 2, 420, 419, 3, 2, 2, 2, 421, 424,
+ 3, 2, 2, 2, 422, 420, 3, 2, 2, 2, 422, 423, 3, 2, 2, 2, 423, 96, 3, 2,
+ 2, 2, 424, 422, 3, 2, 2, 2, 38, 2, 178, 183, 193, 226, 231, 241, 249, 255,
+ 258, 263, 271, 274, 276, 281, 289, 291, 296, 306, 310, 315, 317, 324, 326,
+ 336, 338, 350, 352, 363, 373, 386, 401, 408, 415, 420, 422, 3, 2, 3, 2,
+}
+
+var lexerChannelNames = []string{
+ "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
+}
+
+var lexerModeNames = []string{
+ "DEFAULT_MODE",
+}
+
+var lexerLiteralNames = []string{
+ "", "'in'", "'=='", "'!='", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
+ "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", "'?'",
+ "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
+}
+
+var lexerSymbolicNames = []string{
+ "", "", "EQUALS", "NOT_EQUALS", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "TRUE", "FALSE", "NULL", "WHITESPACE",
+ "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", "BYTES", "IDENTIFIER",
+}
+
+var lexerRuleNames = []string{
+ "T__0", "EQUALS", "NOT_EQUALS", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "TRUE", "FALSE", "NULL", "BACKSLASH",
+ "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", "ESC_SEQ", "ESC_CHAR_SEQ",
+ "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", "WHITESPACE", "COMMENT",
+ "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", "BYTES", "IDENTIFIER",
+}
+
+type CELLexer struct {
+ *antlr.BaseLexer
+ channelNames []string
+ modeNames []string
+ // TODO: EOF string
+}
+
+func NewCELLexer(input antlr.CharStream) *CELLexer {
+
+ l := new(CELLexer)
+ lexerDeserializer := antlr.NewATNDeserializer(nil)
+ lexerAtn := lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn)
+ lexerDecisionToDFA := make([]*antlr.DFA, len(lexerAtn.DecisionToState))
+ for index, ds := range lexerAtn.DecisionToState {
+ lexerDecisionToDFA[index] = antlr.NewDFA(ds, index)
+ }
+
+ l.BaseLexer = antlr.NewBaseLexer(input)
+ l.Interpreter = antlr.NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, antlr.NewPredictionContextCache())
+
+ l.channelNames = lexerChannelNames
+ l.modeNames = lexerModeNames
+ l.RuleNames = lexerRuleNames
+ l.LiteralNames = lexerLiteralNames
+ l.SymbolicNames = lexerSymbolicNames
+ l.GrammarFileName = "CEL.g4"
+ // TODO: l.EOF = antlr.TokenEOF
+
+ return l
+}
+
+// CELLexer tokens.
+const (
+ CELLexerT__0 = 1
+ CELLexerEQUALS = 2
+ CELLexerNOT_EQUALS = 3
+ CELLexerLESS = 4
+ CELLexerLESS_EQUALS = 5
+ CELLexerGREATER_EQUALS = 6
+ CELLexerGREATER = 7
+ CELLexerLOGICAL_AND = 8
+ CELLexerLOGICAL_OR = 9
+ CELLexerLBRACKET = 10
+ CELLexerRPRACKET = 11
+ CELLexerLBRACE = 12
+ CELLexerRBRACE = 13
+ CELLexerLPAREN = 14
+ CELLexerRPAREN = 15
+ CELLexerDOT = 16
+ CELLexerCOMMA = 17
+ CELLexerMINUS = 18
+ CELLexerEXCLAM = 19
+ CELLexerQUESTIONMARK = 20
+ CELLexerCOLON = 21
+ CELLexerPLUS = 22
+ CELLexerSTAR = 23
+ CELLexerSLASH = 24
+ CELLexerPERCENT = 25
+ CELLexerTRUE = 26
+ CELLexerFALSE = 27
+ CELLexerNULL = 28
+ CELLexerWHITESPACE = 29
+ CELLexerCOMMENT = 30
+ CELLexerNUM_FLOAT = 31
+ CELLexerNUM_INT = 32
+ CELLexerNUM_UINT = 33
+ CELLexerSTRING = 34
+ CELLexerBYTES = 35
+ CELLexerIDENTIFIER = 36
+)
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
new file mode 100644
index 00000000000..7eaba726454
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
@@ -0,0 +1,183 @@
+// Generated from /Users/tswadell/lace/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7.
+
+package gen // CEL
+import "github.com/antlr/antlr4/runtime/Go/antlr"
+
+// CELListener is a complete listener for a parse tree produced by CELParser.
+type CELListener interface {
+ antlr.ParseTreeListener
+
+ // EnterStart is called when entering the start production.
+ EnterStart(c *StartContext)
+
+ // EnterExpr is called when entering the expr production.
+ EnterExpr(c *ExprContext)
+
+ // EnterConditionalOr is called when entering the conditionalOr production.
+ EnterConditionalOr(c *ConditionalOrContext)
+
+ // EnterConditionalAnd is called when entering the conditionalAnd production.
+ EnterConditionalAnd(c *ConditionalAndContext)
+
+ // EnterRelation is called when entering the relation production.
+ EnterRelation(c *RelationContext)
+
+ // EnterCalc is called when entering the calc production.
+ EnterCalc(c *CalcContext)
+
+ // EnterMemberExpr is called when entering the MemberExpr production.
+ EnterMemberExpr(c *MemberExprContext)
+
+ // EnterLogicalNot is called when entering the LogicalNot production.
+ EnterLogicalNot(c *LogicalNotContext)
+
+ // EnterNegate is called when entering the Negate production.
+ EnterNegate(c *NegateContext)
+
+ // EnterSelectOrCall is called when entering the SelectOrCall production.
+ EnterSelectOrCall(c *SelectOrCallContext)
+
+ // EnterPrimaryExpr is called when entering the PrimaryExpr production.
+ EnterPrimaryExpr(c *PrimaryExprContext)
+
+ // EnterIndex is called when entering the Index production.
+ EnterIndex(c *IndexContext)
+
+ // EnterCreateMessage is called when entering the CreateMessage production.
+ EnterCreateMessage(c *CreateMessageContext)
+
+ // EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production.
+ EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext)
+
+ // EnterNested is called when entering the Nested production.
+ EnterNested(c *NestedContext)
+
+ // EnterCreateList is called when entering the CreateList production.
+ EnterCreateList(c *CreateListContext)
+
+ // EnterCreateStruct is called when entering the CreateStruct production.
+ EnterCreateStruct(c *CreateStructContext)
+
+ // EnterConstantLiteral is called when entering the ConstantLiteral production.
+ EnterConstantLiteral(c *ConstantLiteralContext)
+
+ // EnterExprList is called when entering the exprList production.
+ EnterExprList(c *ExprListContext)
+
+ // EnterFieldInitializerList is called when entering the fieldInitializerList production.
+ EnterFieldInitializerList(c *FieldInitializerListContext)
+
+ // EnterMapInitializerList is called when entering the mapInitializerList production.
+ EnterMapInitializerList(c *MapInitializerListContext)
+
+ // EnterInt is called when entering the Int production.
+ EnterInt(c *IntContext)
+
+ // EnterUint is called when entering the Uint production.
+ EnterUint(c *UintContext)
+
+ // EnterDouble is called when entering the Double production.
+ EnterDouble(c *DoubleContext)
+
+ // EnterString is called when entering the String production.
+ EnterString(c *StringContext)
+
+ // EnterBytes is called when entering the Bytes production.
+ EnterBytes(c *BytesContext)
+
+ // EnterBoolTrue is called when entering the BoolTrue production.
+ EnterBoolTrue(c *BoolTrueContext)
+
+ // EnterBoolFalse is called when entering the BoolFalse production.
+ EnterBoolFalse(c *BoolFalseContext)
+
+ // EnterNull is called when entering the Null production.
+ EnterNull(c *NullContext)
+
+ // ExitStart is called when exiting the start production.
+ ExitStart(c *StartContext)
+
+ // ExitExpr is called when exiting the expr production.
+ ExitExpr(c *ExprContext)
+
+ // ExitConditionalOr is called when exiting the conditionalOr production.
+ ExitConditionalOr(c *ConditionalOrContext)
+
+ // ExitConditionalAnd is called when exiting the conditionalAnd production.
+ ExitConditionalAnd(c *ConditionalAndContext)
+
+ // ExitRelation is called when exiting the relation production.
+ ExitRelation(c *RelationContext)
+
+ // ExitCalc is called when exiting the calc production.
+ ExitCalc(c *CalcContext)
+
+ // ExitMemberExpr is called when exiting the MemberExpr production.
+ ExitMemberExpr(c *MemberExprContext)
+
+ // ExitLogicalNot is called when exiting the LogicalNot production.
+ ExitLogicalNot(c *LogicalNotContext)
+
+ // ExitNegate is called when exiting the Negate production.
+ ExitNegate(c *NegateContext)
+
+ // ExitSelectOrCall is called when exiting the SelectOrCall production.
+ ExitSelectOrCall(c *SelectOrCallContext)
+
+ // ExitPrimaryExpr is called when exiting the PrimaryExpr production.
+ ExitPrimaryExpr(c *PrimaryExprContext)
+
+ // ExitIndex is called when exiting the Index production.
+ ExitIndex(c *IndexContext)
+
+ // ExitCreateMessage is called when exiting the CreateMessage production.
+ ExitCreateMessage(c *CreateMessageContext)
+
+ // ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production.
+ ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext)
+
+ // ExitNested is called when exiting the Nested production.
+ ExitNested(c *NestedContext)
+
+ // ExitCreateList is called when exiting the CreateList production.
+ ExitCreateList(c *CreateListContext)
+
+ // ExitCreateStruct is called when exiting the CreateStruct production.
+ ExitCreateStruct(c *CreateStructContext)
+
+ // ExitConstantLiteral is called when exiting the ConstantLiteral production.
+ ExitConstantLiteral(c *ConstantLiteralContext)
+
+ // ExitExprList is called when exiting the exprList production.
+ ExitExprList(c *ExprListContext)
+
+ // ExitFieldInitializerList is called when exiting the fieldInitializerList production.
+ ExitFieldInitializerList(c *FieldInitializerListContext)
+
+ // ExitMapInitializerList is called when exiting the mapInitializerList production.
+ ExitMapInitializerList(c *MapInitializerListContext)
+
+ // ExitInt is called when exiting the Int production.
+ ExitInt(c *IntContext)
+
+ // ExitUint is called when exiting the Uint production.
+ ExitUint(c *UintContext)
+
+ // ExitDouble is called when exiting the Double production.
+ ExitDouble(c *DoubleContext)
+
+ // ExitString is called when exiting the String production.
+ ExitString(c *StringContext)
+
+ // ExitBytes is called when exiting the Bytes production.
+ ExitBytes(c *BytesContext)
+
+ // ExitBoolTrue is called when exiting the BoolTrue production.
+ ExitBoolTrue(c *BoolTrueContext)
+
+ // ExitBoolFalse is called when exiting the BoolFalse production.
+ ExitBoolFalse(c *BoolFalseContext)
+
+ // ExitNull is called when exiting the Null production.
+ ExitNull(c *NullContext)
+}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
new file mode 100644
index 00000000000..a7d65397a21
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
@@ -0,0 +1,4065 @@
+// Generated from /Users/tswadell/lace/go/src/github.com/google/cel-go/bin/../parser/gen/CEL.g4 by ANTLR 4.7.
+
+package gen // CEL
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr"
+)
+
+// Suppress unused import errors
+var _ = fmt.Printf
+var _ = reflect.Copy
+var _ = strconv.Itoa
+
+var parserATN = []uint16{
+ 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 38, 205,
+ 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7,
+ 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13,
+ 9, 13, 4, 14, 9, 14, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 5, 3, 38, 10, 3, 3, 4, 3, 4, 3, 4, 7, 4, 43, 10, 4, 12, 4, 14, 4, 46,
+ 11, 4, 3, 5, 3, 5, 3, 5, 7, 5, 51, 10, 5, 12, 5, 14, 5, 54, 11, 5, 3, 6,
+ 3, 6, 3, 6, 3, 6, 3, 6, 3, 6, 7, 6, 62, 10, 6, 12, 6, 14, 6, 65, 11, 6,
+ 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 3, 7, 7, 7, 76, 10, 7,
+ 12, 7, 14, 7, 79, 11, 7, 3, 8, 3, 8, 6, 8, 83, 10, 8, 13, 8, 14, 8, 84,
+ 3, 8, 3, 8, 6, 8, 89, 10, 8, 13, 8, 14, 8, 90, 3, 8, 5, 8, 94, 10, 8, 3,
+ 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 104, 10, 9, 3, 9, 5,
+ 9, 107, 10, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 3, 9, 5, 9, 117,
+ 10, 9, 3, 9, 7, 9, 120, 10, 9, 12, 9, 14, 9, 123, 11, 9, 3, 10, 5, 10,
+ 126, 10, 10, 3, 10, 3, 10, 3, 10, 5, 10, 131, 10, 10, 3, 10, 5, 10, 134,
+ 10, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 3, 10, 5, 10, 142, 10, 10, 3,
+ 10, 5, 10, 145, 10, 10, 3, 10, 3, 10, 3, 10, 5, 10, 150, 10, 10, 3, 10,
+ 3, 10, 5, 10, 154, 10, 10, 3, 11, 3, 11, 3, 11, 7, 11, 159, 10, 11, 12,
+ 11, 14, 11, 162, 11, 11, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12, 3, 12,
+ 7, 12, 171, 10, 12, 12, 12, 14, 12, 174, 11, 12, 3, 13, 3, 13, 3, 13, 3,
+ 13, 3, 13, 3, 13, 3, 13, 3, 13, 7, 13, 184, 10, 13, 12, 13, 14, 13, 187,
+ 11, 13, 3, 14, 5, 14, 190, 10, 14, 3, 14, 3, 14, 3, 14, 5, 14, 195, 10,
+ 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 3, 14, 5, 14, 203, 10, 14, 3, 14,
+ 2, 5, 10, 12, 16, 15, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 2,
+ 5, 3, 2, 3, 9, 3, 2, 25, 27, 4, 2, 20, 20, 24, 24, 2, 229, 2, 28, 3, 2,
+ 2, 2, 4, 31, 3, 2, 2, 2, 6, 39, 3, 2, 2, 2, 8, 47, 3, 2, 2, 2, 10, 55,
+ 3, 2, 2, 2, 12, 66, 3, 2, 2, 2, 14, 93, 3, 2, 2, 2, 16, 95, 3, 2, 2, 2,
+ 18, 153, 3, 2, 2, 2, 20, 155, 3, 2, 2, 2, 22, 163, 3, 2, 2, 2, 24, 175,
+ 3, 2, 2, 2, 26, 202, 3, 2, 2, 2, 28, 29, 5, 4, 3, 2, 29, 30, 7, 2, 2, 3,
+ 30, 3, 3, 2, 2, 2, 31, 37, 5, 6, 4, 2, 32, 33, 7, 22, 2, 2, 33, 34, 5,
+ 6, 4, 2, 34, 35, 7, 23, 2, 2, 35, 36, 5, 4, 3, 2, 36, 38, 3, 2, 2, 2, 37,
+ 32, 3, 2, 2, 2, 37, 38, 3, 2, 2, 2, 38, 5, 3, 2, 2, 2, 39, 44, 5, 8, 5,
+ 2, 40, 41, 7, 11, 2, 2, 41, 43, 5, 8, 5, 2, 42, 40, 3, 2, 2, 2, 43, 46,
+ 3, 2, 2, 2, 44, 42, 3, 2, 2, 2, 44, 45, 3, 2, 2, 2, 45, 7, 3, 2, 2, 2,
+ 46, 44, 3, 2, 2, 2, 47, 52, 5, 10, 6, 2, 48, 49, 7, 10, 2, 2, 49, 51, 5,
+ 10, 6, 2, 50, 48, 3, 2, 2, 2, 51, 54, 3, 2, 2, 2, 52, 50, 3, 2, 2, 2, 52,
+ 53, 3, 2, 2, 2, 53, 9, 3, 2, 2, 2, 54, 52, 3, 2, 2, 2, 55, 56, 8, 6, 1,
+ 2, 56, 57, 5, 12, 7, 2, 57, 63, 3, 2, 2, 2, 58, 59, 12, 3, 2, 2, 59, 60,
+ 9, 2, 2, 2, 60, 62, 5, 10, 6, 4, 61, 58, 3, 2, 2, 2, 62, 65, 3, 2, 2, 2,
+ 63, 61, 3, 2, 2, 2, 63, 64, 3, 2, 2, 2, 64, 11, 3, 2, 2, 2, 65, 63, 3,
+ 2, 2, 2, 66, 67, 8, 7, 1, 2, 67, 68, 5, 14, 8, 2, 68, 77, 3, 2, 2, 2, 69,
+ 70, 12, 4, 2, 2, 70, 71, 9, 3, 2, 2, 71, 76, 5, 12, 7, 5, 72, 73, 12, 3,
+ 2, 2, 73, 74, 9, 4, 2, 2, 74, 76, 5, 12, 7, 4, 75, 69, 3, 2, 2, 2, 75,
+ 72, 3, 2, 2, 2, 76, 79, 3, 2, 2, 2, 77, 75, 3, 2, 2, 2, 77, 78, 3, 2, 2,
+ 2, 78, 13, 3, 2, 2, 2, 79, 77, 3, 2, 2, 2, 80, 94, 5, 16, 9, 2, 81, 83,
+ 7, 21, 2, 2, 82, 81, 3, 2, 2, 2, 83, 84, 3, 2, 2, 2, 84, 82, 3, 2, 2, 2,
+ 84, 85, 3, 2, 2, 2, 85, 86, 3, 2, 2, 2, 86, 94, 5, 16, 9, 2, 87, 89, 7,
+ 20, 2, 2, 88, 87, 3, 2, 2, 2, 89, 90, 3, 2, 2, 2, 90, 88, 3, 2, 2, 2, 90,
+ 91, 3, 2, 2, 2, 91, 92, 3, 2, 2, 2, 92, 94, 5, 16, 9, 2, 93, 80, 3, 2,
+ 2, 2, 93, 82, 3, 2, 2, 2, 93, 88, 3, 2, 2, 2, 94, 15, 3, 2, 2, 2, 95, 96,
+ 8, 9, 1, 2, 96, 97, 5, 18, 10, 2, 97, 121, 3, 2, 2, 2, 98, 99, 12, 5, 2,
+ 2, 99, 100, 7, 18, 2, 2, 100, 106, 7, 38, 2, 2, 101, 103, 7, 16, 2, 2,
+ 102, 104, 5, 20, 11, 2, 103, 102, 3, 2, 2, 2, 103, 104, 3, 2, 2, 2, 104,
+ 105, 3, 2, 2, 2, 105, 107, 7, 17, 2, 2, 106, 101, 3, 2, 2, 2, 106, 107,
+ 3, 2, 2, 2, 107, 120, 3, 2, 2, 2, 108, 109, 12, 4, 2, 2, 109, 110, 7, 12,
+ 2, 2, 110, 111, 5, 4, 3, 2, 111, 112, 7, 13, 2, 2, 112, 120, 3, 2, 2, 2,
+ 113, 114, 12, 3, 2, 2, 114, 116, 7, 14, 2, 2, 115, 117, 5, 22, 12, 2, 116,
+ 115, 3, 2, 2, 2, 116, 117, 3, 2, 2, 2, 117, 118, 3, 2, 2, 2, 118, 120,
+ 7, 15, 2, 2, 119, 98, 3, 2, 2, 2, 119, 108, 3, 2, 2, 2, 119, 113, 3, 2,
+ 2, 2, 120, 123, 3, 2, 2, 2, 121, 119, 3, 2, 2, 2, 121, 122, 3, 2, 2, 2,
+ 122, 17, 3, 2, 2, 2, 123, 121, 3, 2, 2, 2, 124, 126, 7, 18, 2, 2, 125,
+ 124, 3, 2, 2, 2, 125, 126, 3, 2, 2, 2, 126, 127, 3, 2, 2, 2, 127, 133,
+ 7, 38, 2, 2, 128, 130, 7, 16, 2, 2, 129, 131, 5, 20, 11, 2, 130, 129, 3,
+ 2, 2, 2, 130, 131, 3, 2, 2, 2, 131, 132, 3, 2, 2, 2, 132, 134, 7, 17, 2,
+ 2, 133, 128, 3, 2, 2, 2, 133, 134, 3, 2, 2, 2, 134, 154, 3, 2, 2, 2, 135,
+ 136, 7, 16, 2, 2, 136, 137, 5, 4, 3, 2, 137, 138, 7, 17, 2, 2, 138, 154,
+ 3, 2, 2, 2, 139, 141, 7, 12, 2, 2, 140, 142, 5, 20, 11, 2, 141, 140, 3,
+ 2, 2, 2, 141, 142, 3, 2, 2, 2, 142, 144, 3, 2, 2, 2, 143, 145, 7, 19, 2,
+ 2, 144, 143, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 146, 3, 2, 2, 2, 146,
+ 154, 7, 13, 2, 2, 147, 149, 7, 14, 2, 2, 148, 150, 5, 24, 13, 2, 149, 148,
+ 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 151, 3, 2, 2, 2, 151, 154, 7, 15,
+ 2, 2, 152, 154, 5, 26, 14, 2, 153, 125, 3, 2, 2, 2, 153, 135, 3, 2, 2,
+ 2, 153, 139, 3, 2, 2, 2, 153, 147, 3, 2, 2, 2, 153, 152, 3, 2, 2, 2, 154,
+ 19, 3, 2, 2, 2, 155, 160, 5, 4, 3, 2, 156, 157, 7, 19, 2, 2, 157, 159,
+ 5, 4, 3, 2, 158, 156, 3, 2, 2, 2, 159, 162, 3, 2, 2, 2, 160, 158, 3, 2,
+ 2, 2, 160, 161, 3, 2, 2, 2, 161, 21, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2,
+ 163, 164, 7, 38, 2, 2, 164, 165, 7, 23, 2, 2, 165, 172, 5, 4, 3, 2, 166,
+ 167, 7, 19, 2, 2, 167, 168, 7, 38, 2, 2, 168, 169, 7, 23, 2, 2, 169, 171,
+ 5, 4, 3, 2, 170, 166, 3, 2, 2, 2, 171, 174, 3, 2, 2, 2, 172, 170, 3, 2,
+ 2, 2, 172, 173, 3, 2, 2, 2, 173, 23, 3, 2, 2, 2, 174, 172, 3, 2, 2, 2,
+ 175, 176, 5, 4, 3, 2, 176, 177, 7, 23, 2, 2, 177, 185, 5, 4, 3, 2, 178,
+ 179, 7, 19, 2, 2, 179, 180, 5, 4, 3, 2, 180, 181, 7, 23, 2, 2, 181, 182,
+ 5, 4, 3, 2, 182, 184, 3, 2, 2, 2, 183, 178, 3, 2, 2, 2, 184, 187, 3, 2,
+ 2, 2, 185, 183, 3, 2, 2, 2, 185, 186, 3, 2, 2, 2, 186, 25, 3, 2, 2, 2,
+ 187, 185, 3, 2, 2, 2, 188, 190, 7, 20, 2, 2, 189, 188, 3, 2, 2, 2, 189,
+ 190, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 203, 7, 34, 2, 2, 192, 203,
+ 7, 35, 2, 2, 193, 195, 7, 20, 2, 2, 194, 193, 3, 2, 2, 2, 194, 195, 3,
+ 2, 2, 2, 195, 196, 3, 2, 2, 2, 196, 203, 7, 33, 2, 2, 197, 203, 7, 36,
+ 2, 2, 198, 203, 7, 37, 2, 2, 199, 203, 7, 28, 2, 2, 200, 203, 7, 29, 2,
+ 2, 201, 203, 7, 30, 2, 2, 202, 189, 3, 2, 2, 2, 202, 192, 3, 2, 2, 2, 202,
+ 194, 3, 2, 2, 2, 202, 197, 3, 2, 2, 2, 202, 198, 3, 2, 2, 2, 202, 199,
+ 3, 2, 2, 2, 202, 200, 3, 2, 2, 2, 202, 201, 3, 2, 2, 2, 203, 27, 3, 2,
+ 2, 2, 29, 37, 44, 52, 63, 75, 77, 84, 90, 93, 103, 106, 116, 119, 121,
+ 125, 130, 133, 141, 144, 149, 153, 160, 172, 185, 189, 194, 202,
+}
+
+var literalNames = []string{
+ "", "'in'", "'=='", "'!='", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'",
+ "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", "'?'",
+ "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'",
+}
+var symbolicNames = []string{
+ "", "", "EQUALS", "NOT_EQUALS", "LESS", "LESS_EQUALS", "GREATER_EQUALS",
+ "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE",
+ "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK",
+ "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "TRUE", "FALSE", "NULL", "WHITESPACE",
+ "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", "BYTES", "IDENTIFIER",
+}
+
+var ruleNames = []string{
+ "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc",
+ "unary", "member", "primary", "exprList", "fieldInitializerList", "mapInitializerList",
+ "literal",
+}
+
+type CELParser struct {
+ *antlr.BaseParser
+}
+
+func NewCELParser(input antlr.TokenStream) *CELParser {
+ this := new(CELParser)
+ deserializer := antlr.NewATNDeserializer(nil)
+ deserializedATN := deserializer.DeserializeFromUInt16(parserATN)
+ decisionToDFA := make([]*antlr.DFA, len(deserializedATN.DecisionToState))
+ for index, ds := range deserializedATN.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(ds, index)
+ }
+ this.BaseParser = antlr.NewBaseParser(input)
+
+ this.Interpreter = antlr.NewParserATNSimulator(this, deserializedATN, decisionToDFA, antlr.NewPredictionContextCache())
+ this.RuleNames = ruleNames
+ this.LiteralNames = literalNames
+ this.SymbolicNames = symbolicNames
+ this.GrammarFileName = "CEL.g4"
+
+ return this
+}
+
+// CELParser tokens.
+const (
+ CELParserEOF = antlr.TokenEOF
+ CELParserT__0 = 1
+ CELParserEQUALS = 2
+ CELParserNOT_EQUALS = 3
+ CELParserLESS = 4
+ CELParserLESS_EQUALS = 5
+ CELParserGREATER_EQUALS = 6
+ CELParserGREATER = 7
+ CELParserLOGICAL_AND = 8
+ CELParserLOGICAL_OR = 9
+ CELParserLBRACKET = 10
+ CELParserRPRACKET = 11
+ CELParserLBRACE = 12
+ CELParserRBRACE = 13
+ CELParserLPAREN = 14
+ CELParserRPAREN = 15
+ CELParserDOT = 16
+ CELParserCOMMA = 17
+ CELParserMINUS = 18
+ CELParserEXCLAM = 19
+ CELParserQUESTIONMARK = 20
+ CELParserCOLON = 21
+ CELParserPLUS = 22
+ CELParserSTAR = 23
+ CELParserSLASH = 24
+ CELParserPERCENT = 25
+ CELParserTRUE = 26
+ CELParserFALSE = 27
+ CELParserNULL = 28
+ CELParserWHITESPACE = 29
+ CELParserCOMMENT = 30
+ CELParserNUM_FLOAT = 31
+ CELParserNUM_INT = 32
+ CELParserNUM_UINT = 33
+ CELParserSTRING = 34
+ CELParserBYTES = 35
+ CELParserIDENTIFIER = 36
+)
+
+// CELParser rules.
+const (
+ CELParserRULE_start = 0
+ CELParserRULE_expr = 1
+ CELParserRULE_conditionalOr = 2
+ CELParserRULE_conditionalAnd = 3
+ CELParserRULE_relation = 4
+ CELParserRULE_calc = 5
+ CELParserRULE_unary = 6
+ CELParserRULE_member = 7
+ CELParserRULE_primary = 8
+ CELParserRULE_exprList = 9
+ CELParserRULE_fieldInitializerList = 10
+ CELParserRULE_mapInitializerList = 11
+ CELParserRULE_literal = 12
+)
+
+// IStartContext is an interface to support dynamic dispatch.
+type IStartContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetE returns the e rule contexts.
+ GetE() IExprContext
+
+ // SetE sets the e rule contexts.
+ SetE(IExprContext)
+
+ // IsStartContext differentiates from other interfaces.
+ IsStartContext()
+}
+
+type StartContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IExprContext
+}
+
+func NewEmptyStartContext() *StartContext {
+ var p = new(StartContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_start
+ return p
+}
+
+func (*StartContext) IsStartContext() {}
+
+func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext {
+ var p = new(StartContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_start
+
+ return p
+}
+
+func (s *StartContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *StartContext) GetE() IExprContext { return s.e }
+
+func (s *StartContext) SetE(v IExprContext) { s.e = v }
+
+func (s *StartContext) EOF() antlr.TerminalNode {
+ return s.GetToken(CELParserEOF, 0)
+}
+
+func (s *StartContext) Expr() IExprContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExprContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *StartContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterStart(s)
+ }
+}
+
+func (s *StartContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitStart(s)
+ }
+}
+
+func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitStart(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Start() (localctx IStartContext) {
+ localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 0, CELParserRULE_start)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(26)
+
+ var _x = p.Expr()
+
+ localctx.(*StartContext).e = _x
+ }
+ {
+ p.SetState(27)
+ p.Match(CELParserEOF)
+ }
+
+ return localctx
+}
+
+// IExprContext is an interface to support dynamic dispatch.
+type IExprContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOp returns the op token.
+ GetOp() antlr.Token
+
+ // SetOp sets the op token.
+ SetOp(antlr.Token)
+
+ // GetE returns the e rule contexts.
+ GetE() IConditionalOrContext
+
+ // GetE1 returns the e1 rule contexts.
+ GetE1() IConditionalOrContext
+
+ // GetE2 returns the e2 rule contexts.
+ GetE2() IExprContext
+
+ // SetE sets the e rule contexts.
+ SetE(IConditionalOrContext)
+
+ // SetE1 sets the e1 rule contexts.
+ SetE1(IConditionalOrContext)
+
+ // SetE2 sets the e2 rule contexts.
+ SetE2(IExprContext)
+
+ // IsExprContext differentiates from other interfaces.
+ IsExprContext()
+}
+
+type ExprContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IConditionalOrContext
+ op antlr.Token
+ e1 IConditionalOrContext
+ e2 IExprContext
+}
+
+func NewEmptyExprContext() *ExprContext {
+ var p = new(ExprContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_expr
+ return p
+}
+
+func (*ExprContext) IsExprContext() {}
+
+func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext {
+ var p = new(ExprContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_expr
+
+ return p
+}
+
+func (s *ExprContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ExprContext) GetOp() antlr.Token { return s.op }
+
+func (s *ExprContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *ExprContext) GetE() IConditionalOrContext { return s.e }
+
+func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 }
+
+func (s *ExprContext) GetE2() IExprContext { return s.e2 }
+
+func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v }
+
+func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v }
+
+func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v }
+
+func (s *ExprContext) AllConditionalOr() []IConditionalOrContext {
+ var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IConditionalOrContext)(nil)).Elem())
+ var tst = make([]IConditionalOrContext, len(ts))
+
+ for i, t := range ts {
+ if t != nil {
+ tst[i] = t.(IConditionalOrContext)
+ }
+ }
+
+ return tst
+}
+
+func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IConditionalOrContext)(nil)).Elem(), i)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IConditionalOrContext)
+}
+
+func (s *ExprContext) Expr() IExprContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExprContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExprContext)
+}
+
+func (s *ExprContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterExpr(s)
+ }
+}
+
+func (s *ExprContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitExpr(s)
+ }
+}
+
+func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitExpr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Expr() (localctx IExprContext) {
+ localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 2, CELParserRULE_expr)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(29)
+
+ var _x = p.ConditionalOr()
+
+ localctx.(*ExprContext).e = _x
+ }
+ p.SetState(35)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ if _la == CELParserQUESTIONMARK {
+ {
+ p.SetState(30)
+
+ var _m = p.Match(CELParserQUESTIONMARK)
+
+ localctx.(*ExprContext).op = _m
+ }
+ {
+ p.SetState(31)
+
+ var _x = p.ConditionalOr()
+
+ localctx.(*ExprContext).e1 = _x
+ }
+ {
+ p.SetState(32)
+ p.Match(CELParserCOLON)
+ }
+ {
+ p.SetState(33)
+
+ var _x = p.Expr()
+
+ localctx.(*ExprContext).e2 = _x
+ }
+
+ }
+
+ return localctx
+}
+
+// IConditionalOrContext is an interface to support dynamic dispatch.
+type IConditionalOrContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS9 returns the s9 token.
+ GetS9() antlr.Token
+
+ // SetS9 sets the s9 token.
+ SetS9(antlr.Token)
+
+ // GetOps returns the ops token list.
+ GetOps() []antlr.Token
+
+ // SetOps sets the ops token list.
+ SetOps([]antlr.Token)
+
+ // GetE returns the e rule contexts.
+ GetE() IConditionalAndContext
+
+ // Get_conditionalAnd returns the _conditionalAnd rule contexts.
+ Get_conditionalAnd() IConditionalAndContext
+
+ // SetE sets the e rule contexts.
+ SetE(IConditionalAndContext)
+
+ // Set_conditionalAnd sets the _conditionalAnd rule contexts.
+ Set_conditionalAnd(IConditionalAndContext)
+
+ // GetE1 returns the e1 rule context list.
+ GetE1() []IConditionalAndContext
+
+ // SetE1 sets the e1 rule context list.
+ SetE1([]IConditionalAndContext)
+
+ // IsConditionalOrContext differentiates from other interfaces.
+ IsConditionalOrContext()
+}
+
+type ConditionalOrContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IConditionalAndContext
+ s9 antlr.Token
+ ops []antlr.Token
+ _conditionalAnd IConditionalAndContext
+ e1 []IConditionalAndContext
+}
+
+func NewEmptyConditionalOrContext() *ConditionalOrContext {
+ var p = new(ConditionalOrContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_conditionalOr
+ return p
+}
+
+func (*ConditionalOrContext) IsConditionalOrContext() {}
+
+func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext {
+ var p = new(ConditionalOrContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_conditionalOr
+
+ return p
+}
+
+func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 }
+
+func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v }
+
+func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops }
+
+func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v }
+
+func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e }
+
+func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd }
+
+func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v }
+
+func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v }
+
+func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 }
+
+func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v }
+
+func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext {
+ var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IConditionalAndContext)(nil)).Elem())
+ var tst = make([]IConditionalAndContext, len(ts))
+
+ for i, t := range ts {
+ if t != nil {
+ tst[i] = t.(IConditionalAndContext)
+ }
+ }
+
+ return tst
+}
+
+func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IConditionalAndContext)(nil)).Elem(), i)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IConditionalAndContext)
+}
+
+func (s *ConditionalOrContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterConditionalOr(s)
+ }
+}
+
+func (s *ConditionalOrContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitConditionalOr(s)
+ }
+}
+
+func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitConditionalOr(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
+ localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 4, CELParserRULE_conditionalOr)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(37)
+
+ var _x = p.ConditionalAnd()
+
+ localctx.(*ConditionalOrContext).e = _x
+ }
+ p.SetState(42)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ for _la == CELParserLOGICAL_OR {
+ {
+ p.SetState(38)
+
+ var _m = p.Match(CELParserLOGICAL_OR)
+
+ localctx.(*ConditionalOrContext).s9 = _m
+ }
+ localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9)
+ {
+ p.SetState(39)
+
+ var _x = p.ConditionalAnd()
+
+ localctx.(*ConditionalOrContext)._conditionalAnd = _x
+ }
+ localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd)
+
+ p.SetState(44)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+ }
+
+ return localctx
+}
+
+// IConditionalAndContext is an interface to support dynamic dispatch.
+type IConditionalAndContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetS8 returns the s8 token.
+ GetS8() antlr.Token
+
+ // SetS8 sets the s8 token.
+ SetS8(antlr.Token)
+
+ // GetOps returns the ops token list.
+ GetOps() []antlr.Token
+
+ // SetOps sets the ops token list.
+ SetOps([]antlr.Token)
+
+ // GetE returns the e rule contexts.
+ GetE() IRelationContext
+
+ // Get_relation returns the _relation rule contexts.
+ Get_relation() IRelationContext
+
+ // SetE sets the e rule contexts.
+ SetE(IRelationContext)
+
+ // Set_relation sets the _relation rule contexts.
+ Set_relation(IRelationContext)
+
+ // GetE1 returns the e1 rule context list.
+ GetE1() []IRelationContext
+
+ // SetE1 sets the e1 rule context list.
+ SetE1([]IRelationContext)
+
+ // IsConditionalAndContext differentiates from other interfaces.
+ IsConditionalAndContext()
+}
+
+type ConditionalAndContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ e IRelationContext
+ s8 antlr.Token
+ ops []antlr.Token
+ _relation IRelationContext
+ e1 []IRelationContext
+}
+
+func NewEmptyConditionalAndContext() *ConditionalAndContext {
+ var p = new(ConditionalAndContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_conditionalAnd
+ return p
+}
+
+func (*ConditionalAndContext) IsConditionalAndContext() {}
+
+func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext {
+ var p = new(ConditionalAndContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_conditionalAnd
+
+ return p
+}
+
+func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 }
+
+func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v }
+
+func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops }
+
+func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v }
+
+func (s *ConditionalAndContext) GetE() IRelationContext { return s.e }
+
+func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation }
+
+func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v }
+
+func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v }
+
+func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 }
+
+func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v }
+
+func (s *ConditionalAndContext) AllRelation() []IRelationContext {
+ var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IRelationContext)(nil)).Elem())
+ var tst = make([]IRelationContext, len(ts))
+
+ for i, t := range ts {
+ if t != nil {
+ tst[i] = t.(IRelationContext)
+ }
+ }
+
+ return tst
+}
+
+func (s *ConditionalAndContext) Relation(i int) IRelationContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IRelationContext)(nil)).Elem(), i)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IRelationContext)
+}
+
+func (s *ConditionalAndContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterConditionalAnd(s)
+ }
+}
+
+func (s *ConditionalAndContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitConditionalAnd(s)
+ }
+}
+
+func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitConditionalAnd(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
+ localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd)
+ var _la int
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(45)
+
+ var _x = p.relation(0)
+
+ localctx.(*ConditionalAndContext).e = _x
+ }
+ p.SetState(50)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+
+ for _la == CELParserLOGICAL_AND {
+ {
+ p.SetState(46)
+
+ var _m = p.Match(CELParserLOGICAL_AND)
+
+ localctx.(*ConditionalAndContext).s8 = _m
+ }
+ localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8)
+ {
+ p.SetState(47)
+
+ var _x = p.relation(0)
+
+ localctx.(*ConditionalAndContext)._relation = _x
+ }
+ localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation)
+
+ p.SetState(52)
+ p.GetErrorHandler().Sync(p)
+ _la = p.GetTokenStream().LA(1)
+ }
+
+ return localctx
+}
+
+// IRelationContext is an interface to support dynamic dispatch.
+type IRelationContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // GetOp returns the op token.
+ GetOp() antlr.Token
+
+ // SetOp sets the op token.
+ SetOp(antlr.Token)
+
+ // IsRelationContext differentiates from other interfaces.
+ IsRelationContext()
+}
+
+type RelationContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+ op antlr.Token
+}
+
+func NewEmptyRelationContext() *RelationContext {
+ var p = new(RelationContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CELParserRULE_relation
+ return p
+}
+
+func (*RelationContext) IsRelationContext() {}
+
+func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext {
+ var p = new(RelationContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CELParserRULE_relation
+
+ return p
+}
+
+func (s *RelationContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *RelationContext) GetOp() antlr.Token { return s.op }
+
+func (s *RelationContext) SetOp(v antlr.Token) { s.op = v }
+
+func (s *RelationContext) Calc() ICalcContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*ICalcContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(ICalcContext)
+}
+
+func (s *RelationContext) AllRelation() []IRelationContext {
+ var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IRelationContext)(nil)).Elem())
+ var tst = make([]IRelationContext, len(ts))
+
+ for i, t := range ts {
+ if t != nil {
+ tst[i] = t.(IRelationContext)
+ }
+ }
+
+ return tst
+}
+
+func (s *RelationContext) Relation(i int) IRelationContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IRelationContext)(nil)).Elem(), i)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IRelationContext)
+}
+
+func (s *RelationContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.EnterRelation(s)
+ }
+}
+
+func (s *RelationContext) ExitRule(listener antlr.ParseTreeListener) {
+ if listenerT, ok := listener.(CELListener); ok {
+ listenerT.ExitRelation(s)
+ }
+}
+
+func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CELVisitor:
+ return t.VisitRelation(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CELParser) Relation() (localctx IRelationContext) {
+ return p.relation(0)
+}
+
+func (p *CELParser) relation(_p int) (localctx IRelationContext) {
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+ _parentState := p.GetState()
+ localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState)
+ var _prevctx IRelationContext = localctx
+ var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
+ _startState := 8
+ p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p)
+ var _la int
+
+ defer func() {
+ p.UnrollRecursionContexts(_parentctx)
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(54)
+ p.calc(0)
+ }
+
+ p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
+ p.SetState(61)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext())
+
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ if p.GetParseListeners() != nil {
+ p.TriggerExitRuleEvent()
+ }
+ _prevctx = localctx
+ localctx = NewRelationContext(p, _parentctx, _parentState)
+ p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation)
+ p.SetState(56)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 1)) {
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
+ }
+ p.SetState(57)
+
+ var _lt = p.GetTokenStream().LT(1)
+
+ localctx.(*RelationContext).op = _lt
+
+ _la = p.GetTokenStream().LA(1)
+
+ if !(((_la)&-(0x1f+1)) == 0 && ((1<::`.
+ //
+ // When the macros is a var-arg style macro, the `arg-count` value is represented as a `*`.
+ MacroKey() string
+
+ // Expander returns the MacroExpander to apply when the macro key matches the parsed call
+ // signature.
+ Expander() MacroExpander
+}
+
+// Macro type which declares the function name and arg count expected for the
+// macro, as well as a macro expansion function.
+type macro struct {
+ function string
+ receiverStyle bool
+ varArgStyle bool
+ argCount int
+ expander MacroExpander
+}
+
+// Function returns the macro's function name (i.e. the function whose syntax it mimics).
+func (m *macro) Function() string {
+ return m.function
+}
+
+// ArgCount returns the number of arguments the macro expects.
+func (m *macro) ArgCount() int {
+ return m.argCount
+}
+
+// IsReceiverStyle returns whether the macro is receiver style.
+func (m *macro) IsReceiverStyle() bool {
+ return m.receiverStyle
+}
+
+// Expander implements the Macro interface method.
+func (m *macro) Expander() MacroExpander {
+ return m.expander
+}
+
+// MacroKey implements the Macro interface method.
+func (m *macro) MacroKey() string {
+ if m.varArgStyle {
+ return makeVarArgMacroKey(m.function, m.receiverStyle)
+ }
+ return makeMacroKey(m.function, m.argCount, m.receiverStyle)
+}
+
+func makeMacroKey(name string, args int, receiverStyle bool) string {
+ return fmt.Sprintf("%s:%d:%v", name, args, receiverStyle)
+}
+
+func makeVarArgMacroKey(name string, receiverStyle bool) string {
+ return fmt.Sprintf("%s:*:%v", name, receiverStyle)
+}
+
+// MacroExpander converts the target and args of a function call that matches a Macro.
+//
+// Note: when the Macros.IsReceiverStyle() is true, the target argument will be nil.
+type MacroExpander func(eh ExprHelper,
+ target *exprpb.Expr,
+ args []*exprpb.Expr) (*exprpb.Expr, *common.Error)
+
+// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is
+// consistent with the source position and expression id generation code leveraged by both
+// the parser and type-checker.
+type ExprHelper interface {
+ // LiteralBool creates an Expr value for a bool literal.
+ LiteralBool(value bool) *exprpb.Expr
+
+ // LiteralBytes creates an Expr value for a byte literal.
+ LiteralBytes(value []byte) *exprpb.Expr
+
+ // LiteralDouble creates an Expr value for double literal.
+ LiteralDouble(value float64) *exprpb.Expr
+
+ // LiteralInt creates an Expr value for an int literal.
+ LiteralInt(value int64) *exprpb.Expr
+
+ // LiteralString creates am Expr value for a string literal.
+ LiteralString(value string) *exprpb.Expr
+
+ // LiteralUint creates an Expr value for a uint literal.
+ LiteralUint(value uint64) *exprpb.Expr
+
+ // NewList creates a CreateList instruction where the list is comprised of the optional set
+ // of elements provided as arguments.
+ NewList(elems ...*exprpb.Expr) *exprpb.Expr
+
+ // NewMap creates a CreateStruct instruction for a map where the map is comprised of the
+ // optional set of key, value entries.
+ NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewMapEntry creates a Map Entry for the key, value pair.
+ NewMapEntry(key *exprpb.Expr, val *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry
+
+ // NewObject creates a CreateStruct instruction for an object with a given type name and
+ // optional set of field initializers.
+ NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr
+
+ // NewObjectFieldInit creates a new Object field initializer from the field name and value.
+ NewObjectFieldInit(field string, init *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry
+
+ // Fold creates a fold comprehension instruction.
+ //
+ // - iterVar is the iteration variable name.
+ // - iterRange represents the expression that resolves to a list or map where the elements or
+ // keys (repsectively) will be iterated over.
+ // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+ // - accuInit is the initial expression whose value will be set for the accuVar prior to
+ // folding.
+ // - condition is the expression to test to determine whether to continue folding.
+ // - step is the expression to evaluation at the conclusion of a single fold iteration.
+ // - result is the computation to evaluate at the conclusion of the fold.
+ //
+ // The accuVar should not shadow variable names that you would like to reference within the
+ // environment in the step and condition expressions. Presently, the name __result__ is commonly
+ // used by built-in macros but this may change in the future.
+ Fold(iterVar string,
+ iterRange *exprpb.Expr,
+ accuVar string,
+ accuInit *exprpb.Expr,
+ condition *exprpb.Expr,
+ step *exprpb.Expr,
+ result *exprpb.Expr) *exprpb.Expr
+
+ // Ident creates an identifier Expr value.
+ Ident(name string) *exprpb.Expr
+
+ // GlobalCall creates a function call Expr value for a global (free) function.
+ GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr
+
+ // ReceiverCall creates a function call Expr value for a receiver-style function.
+ ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr
+
+ // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics.
+ PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // Select create a field traversal Expr value.
+ Select(operand *exprpb.Expr, field string) *exprpb.Expr
+
+ // OffsetLocation returns the Location of the expression identifier.
+ OffsetLocation(exprID int64) common.Location
+}
+
+var (
+ // AllMacros includes the list of all spec-supported macros.
+ AllMacros = []Macro{
+ // The macro "has(m.f)" which tests the presence of a field, avoiding the need to specify
+ // the field as a string.
+ NewGlobalMacro(operators.Has, 1, makeHas),
+
+ // The macro "range.all(var, predicate)", which is true if for all elements in range the
+ // predicate holds.
+ NewReceiverMacro(operators.All, 2, makeAll),
+
+ // The macro "range.exists(var, predicate)", which is true if for at least one element in
+ // range the predicate holds.
+ NewReceiverMacro(operators.Exists, 2, makeExists),
+
+ // The macro "range.exists_one(var, predicate)", which is true if for exactly one element
+ // in range the predicate holds.
+ NewReceiverMacro(operators.ExistsOne, 2, makeExistsOne),
+
+ // The macro "range.map(var, function)", applies the function to the vars in the range.
+ NewReceiverMacro(operators.Map, 2, makeMap),
+
+ // The macro "range.map(var, predicate, function)", applies the function to the vars in
+ // the range for which the predicate holds true. The other variables are filtered out.
+ NewReceiverMacro(operators.Map, 3, makeMap),
+
+ // The macro "range.filter(var, predicate)", filters out the variables for which the
+ // predicate is false.
+ NewReceiverMacro(operators.Filter, 2, makeFilter),
+ }
+
+ // NoMacros list.
+ NoMacros = []Macro{}
+)
+
+// AccumulatorName is the traditional variable name assigned to the fold accumulator variable.
+const AccumulatorName = "__result__"
+
+type quantifierKind int
+
+const (
+ quantifierAll quantifierKind = iota
+ quantifierExists
+ quantifierExistsOne
+)
+
+func makeAll(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ return makeQuantifier(quantifierAll, eh, target, args)
+}
+
+func makeExists(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ return makeQuantifier(quantifierExists, eh, target, args)
+}
+
+func makeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ return makeQuantifier(quantifierExistsOne, eh, target, args)
+}
+
+func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ v, found := extractIdent(args[0])
+ if !found {
+ location := eh.OffsetLocation(args[0].Id)
+ return nil, &common.Error{
+ Message: "argument must be a simple name",
+ Location: location}
+ }
+ accuIdent := func() *exprpb.Expr {
+ return eh.Ident(AccumulatorName)
+ }
+
+ var init *exprpb.Expr
+ var condition *exprpb.Expr
+ var step *exprpb.Expr
+ var result *exprpb.Expr
+ switch kind {
+ case quantifierAll:
+ init = eh.LiteralBool(true)
+ condition = eh.GlobalCall(operators.NotStrictlyFalse, accuIdent())
+ step = eh.GlobalCall(operators.LogicalAnd, accuIdent(), args[1])
+ result = accuIdent()
+ case quantifierExists:
+ init = eh.LiteralBool(false)
+ condition = eh.GlobalCall(
+ operators.NotStrictlyFalse,
+ eh.GlobalCall(operators.LogicalNot, accuIdent()))
+ step = eh.GlobalCall(operators.LogicalOr, accuIdent(), args[1])
+ result = accuIdent()
+ case quantifierExistsOne:
+ // TODO: make consistent with the CEL semantics.
+ zeroExpr := eh.LiteralInt(0)
+ oneExpr := eh.LiteralInt(1)
+ init = zeroExpr
+ condition = eh.GlobalCall(operators.LessEquals, accuIdent(), oneExpr)
+ step = eh.GlobalCall(operators.Conditional, args[1],
+ eh.GlobalCall(operators.Add, accuIdent(), oneExpr), accuIdent())
+ result = eh.GlobalCall(operators.Equals, accuIdent(), oneExpr)
+ default:
+ return nil, &common.Error{Message: fmt.Sprintf("unrecognized quantifier '%v'", kind)}
+ }
+ return eh.Fold(v, target, AccumulatorName, init, condition, step, result), nil
+}
+
+func makeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ v, found := extractIdent(args[0])
+ if !found {
+ return nil, &common.Error{Message: "argument is not an identifier"}
+ }
+
+ var fn *exprpb.Expr
+ var filter *exprpb.Expr
+
+ if len(args) == 3 {
+ filter = args[1]
+ fn = args[2]
+ } else {
+ filter = nil
+ fn = args[1]
+ }
+
+ accuExpr := eh.Ident(AccumulatorName)
+ init := eh.NewList()
+ condition := eh.LiteralBool(true)
+ // TODO: use compiler internal method for faster, stateful add.
+ step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(fn))
+
+ if filter != nil {
+ step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr)
+ }
+ return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil
+}
+
+func makeFilter(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ v, found := extractIdent(args[0])
+ if !found {
+ return nil, &common.Error{Message: "argument is not an identifier"}
+ }
+
+ filter := args[1]
+ accuExpr := eh.Ident(AccumulatorName)
+ init := eh.NewList()
+ condition := eh.LiteralBool(true)
+ // TODO: use compiler internal method for faster, stateful add.
+ step := eh.GlobalCall(operators.Add, accuExpr, eh.NewList(args[0]))
+ step = eh.GlobalCall(operators.Conditional, filter, step, accuExpr)
+ return eh.Fold(v, target, AccumulatorName, init, condition, step, accuExpr), nil
+}
+
+func extractIdent(e *exprpb.Expr) (string, bool) {
+ switch e.ExprKind.(type) {
+ case *exprpb.Expr_IdentExpr:
+ return e.GetIdentExpr().Name, true
+ }
+ return "", false
+}
+
+func makeHas(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if s, ok := args[0].ExprKind.(*exprpb.Expr_SelectExpr); ok {
+ return eh.PresenceTest(s.SelectExpr.Operand, s.SelectExpr.Field), nil
+ }
+ return nil, &common.Error{Message: "invalid argument to has() macro"}
+}
diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go
new file mode 100644
index 00000000000..3deda8a2332
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/parser.go
@@ -0,0 +1,593 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package parser declares an expression parser with support for macro
+// expansion.
+package parser
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr"
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/parser/gen"
+
+ structpb "github.com/golang/protobuf/ptypes/struct"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Parse converts a source input a parsed expression.
+// This function calls ParseWithMacros with AllMacros.
+func Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) {
+ return ParseWithMacros(source, AllMacros)
+}
+
+// ParseWithMacros converts a source input and macros set to a parsed expression.
+func ParseWithMacros(source common.Source, macros []Macro) (*exprpb.ParsedExpr, *common.Errors) {
+ macroMap := make(map[string]Macro)
+ for _, m := range macros {
+ macroMap[m.MacroKey()] = m
+ }
+ p := parser{
+ errors: &parseErrors{common.NewErrors(source)},
+ helper: newParserHelper(source),
+ macros: macroMap,
+ }
+ e := p.parse(source.Content())
+ return &exprpb.ParsedExpr{
+ Expr: e,
+ SourceInfo: p.helper.getSourceInfo(),
+ }, p.errors.Errors
+}
+
+type parser struct {
+ gen.BaseCELVisitor
+ errors *parseErrors
+ helper *parserHelper
+ macros map[string]Macro
+}
+
+var _ gen.CELVisitor = (*parser)(nil)
+
+func (p *parser) parse(expression string) *exprpb.Expr {
+ stream := antlr.NewInputStream(expression)
+ lexer := gen.NewCELLexer(stream)
+ prsr := gen.NewCELParser(antlr.NewCommonTokenStream(lexer, 0))
+
+ lexer.RemoveErrorListeners()
+ prsr.RemoveErrorListeners()
+ lexer.AddErrorListener(p)
+ prsr.AddErrorListener(p)
+
+ return p.Visit(prsr.Start()).(*exprpb.Expr)
+}
+
+// Visitor implementations.
+func (p *parser) Visit(tree antlr.ParseTree) interface{} {
+ switch tree.(type) {
+ case *gen.StartContext:
+ return p.VisitStart(tree.(*gen.StartContext))
+ case *gen.ExprContext:
+ return p.VisitExpr(tree.(*gen.ExprContext))
+ case *gen.ConditionalAndContext:
+ return p.VisitConditionalAnd(tree.(*gen.ConditionalAndContext))
+ case *gen.ConditionalOrContext:
+ return p.VisitConditionalOr(tree.(*gen.ConditionalOrContext))
+ case *gen.RelationContext:
+ return p.VisitRelation(tree.(*gen.RelationContext))
+ case *gen.CalcContext:
+ return p.VisitCalc(tree.(*gen.CalcContext))
+ case *gen.LogicalNotContext:
+ return p.VisitLogicalNot(tree.(*gen.LogicalNotContext))
+ case *gen.MemberExprContext:
+ return p.VisitMemberExpr(tree.(*gen.MemberExprContext))
+ case *gen.PrimaryExprContext:
+ return p.VisitPrimaryExpr(tree.(*gen.PrimaryExprContext))
+ case *gen.SelectOrCallContext:
+ return p.VisitSelectOrCall(tree.(*gen.SelectOrCallContext))
+ case *gen.MapInitializerListContext:
+ return p.VisitMapInitializerList(tree.(*gen.MapInitializerListContext))
+ case *gen.NegateContext:
+ return p.VisitNegate(tree.(*gen.NegateContext))
+ case *gen.IndexContext:
+ return p.VisitIndex(tree.(*gen.IndexContext))
+ case *gen.UnaryContext:
+ return p.VisitUnary(tree.(*gen.UnaryContext))
+ case *gen.CreateListContext:
+ return p.VisitCreateList(tree.(*gen.CreateListContext))
+ case *gen.CreateMessageContext:
+ return p.VisitCreateMessage(tree.(*gen.CreateMessageContext))
+ case *gen.CreateStructContext:
+ return p.VisitCreateStruct(tree.(*gen.CreateStructContext))
+ }
+
+ text := "<>"
+ if tree != nil {
+ text = tree.GetText()
+ }
+ panic(fmt.Sprintf("unknown parsetree type: '%+v': %+v [%s]", reflect.TypeOf(tree), tree, text))
+}
+
+// Visit a parse tree produced by CELParser#start.
+func (p *parser) VisitStart(ctx *gen.StartContext) interface{} {
+ return p.Visit(ctx.Expr())
+}
+
+// Visit a parse tree produced by CELParser#expr.
+func (p *parser) VisitExpr(ctx *gen.ExprContext) interface{} {
+ result := p.Visit(ctx.GetE()).(*exprpb.Expr)
+ if ctx.GetOp() == nil {
+ return result
+ }
+ opID := p.helper.id(ctx.GetOp())
+ ifTrue := p.Visit(ctx.GetE1()).(*exprpb.Expr)
+ ifFalse := p.Visit(ctx.GetE2()).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse)
+}
+
+// Visit a parse tree produced by CELParser#conditionalOr.
+func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) interface{} {
+ result := p.Visit(ctx.GetE()).(*exprpb.Expr)
+ if ctx.GetOps() == nil {
+ return result
+ }
+ b := newBalancer(p.helper, operators.LogicalOr, result)
+ for i, op := range ctx.GetOps() {
+ next := p.Visit(ctx.GetE1()[i]).(*exprpb.Expr)
+ opID := p.helper.id(op)
+ b.addTerm(opID, next)
+ }
+ return b.balance()
+}
+
+// Visit a parse tree produced by CELParser#conditionalAnd.
+func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) interface{} {
+ result := p.Visit(ctx.GetE()).(*exprpb.Expr)
+ if ctx.GetOps() == nil {
+ return result
+ }
+ b := newBalancer(p.helper, operators.LogicalAnd, result)
+ for i, op := range ctx.GetOps() {
+ next := p.Visit(ctx.GetE1()[i]).(*exprpb.Expr)
+ opID := p.helper.id(op)
+ b.addTerm(opID, next)
+ }
+ return b.balance()
+}
+
+// Visit a parse tree produced by CELParser#relation.
+func (p *parser) VisitRelation(ctx *gen.RelationContext) interface{} {
+ if ctx.Calc() != nil {
+ return p.Visit(ctx.Calc())
+ }
+ opText := ""
+ if ctx.GetOp() != nil {
+ opText = ctx.GetOp().GetText()
+ }
+ if op, found := operators.Find(opText); found {
+ lhs := p.Visit(ctx.Relation(0)).(*exprpb.Expr)
+ opID := p.helper.id(ctx.GetOp())
+ rhs := p.Visit(ctx.Relation(1)).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, op, lhs, rhs)
+ }
+ return p.reportError(ctx, "operator not found")
+}
+
+// Visit a parse tree produced by CELParser#calc.
+func (p *parser) VisitCalc(ctx *gen.CalcContext) interface{} {
+ if ctx.Unary() != nil {
+ return p.Visit(ctx.Unary())
+ }
+ opText := ""
+ if ctx.GetOp() != nil {
+ opText = ctx.GetOp().GetText()
+ }
+ if op, found := operators.Find(opText); found {
+ lhs := p.Visit(ctx.Calc(0)).(*exprpb.Expr)
+ opID := p.helper.id(ctx.GetOp())
+ rhs := p.Visit(ctx.Calc(1)).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, op, lhs, rhs)
+ }
+ return p.reportError(ctx, "operator not found")
+}
+
+func (p *parser) VisitUnary(ctx *gen.UnaryContext) interface{} {
+ return p.helper.newLiteralString(ctx, "<>")
+}
+
+// Visit a parse tree produced by CELParser#MemberExpr.
+func (p *parser) VisitMemberExpr(ctx *gen.MemberExprContext) interface{} {
+ switch ctx.Member().(type) {
+ case *gen.PrimaryExprContext:
+ return p.VisitPrimaryExpr(ctx.Member().(*gen.PrimaryExprContext))
+ case *gen.SelectOrCallContext:
+ return p.VisitSelectOrCall(ctx.Member().(*gen.SelectOrCallContext))
+ case *gen.IndexContext:
+ return p.VisitIndex(ctx.Member().(*gen.IndexContext))
+ case *gen.CreateMessageContext:
+ return p.VisitCreateMessage(ctx.Member().(*gen.CreateMessageContext))
+ }
+ return p.reportError(ctx, "unsupported simple expression")
+}
+
+// Visit a parse tree produced by CELParser#LogicalNot.
+func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) interface{} {
+ if len(ctx.GetOps())%2 == 0 {
+ return p.Visit(ctx.Member())
+ }
+ opID := p.helper.id(ctx.GetOps()[0])
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, operators.LogicalNot, target)
+}
+
+func (p *parser) VisitNegate(ctx *gen.NegateContext) interface{} {
+ if len(ctx.GetOps())%2 == 0 {
+ return p.Visit(ctx.Member())
+ }
+ opID := p.helper.id(ctx.GetOps()[0])
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, operators.Negate, target)
+}
+
+// Visit a parse tree produced by CELParser#SelectOrCall.
+func (p *parser) VisitSelectOrCall(ctx *gen.SelectOrCallContext) interface{} {
+ operand := p.Visit(ctx.Member()).(*exprpb.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ id := ctx.GetId().GetText()
+ if ctx.GetOpen() != nil {
+ opID := p.helper.id(ctx.GetOpen())
+ return p.receiverCallOrMacro(opID, id, operand, p.visitList(ctx.GetArgs())...)
+ }
+ return p.helper.newSelect(ctx.GetOp(), operand, id)
+}
+
+// Visit a parse tree produced by CELParser#PrimaryExpr.
+func (p *parser) VisitPrimaryExpr(ctx *gen.PrimaryExprContext) interface{} {
+ switch ctx.Primary().(type) {
+ case *gen.NestedContext:
+ return p.VisitNested(ctx.Primary().(*gen.NestedContext))
+ case *gen.IdentOrGlobalCallContext:
+ return p.VisitIdentOrGlobalCall(ctx.Primary().(*gen.IdentOrGlobalCallContext))
+ case *gen.CreateListContext:
+ return p.VisitCreateList(ctx.Primary().(*gen.CreateListContext))
+ case *gen.CreateStructContext:
+ return p.VisitCreateStruct(ctx.Primary().(*gen.CreateStructContext))
+ case *gen.ConstantLiteralContext:
+ return p.VisitConstantLiteral(ctx.Primary().(*gen.ConstantLiteralContext))
+ }
+
+ return p.reportError(ctx, "invalid primary expression")
+}
+
+// Visit a parse tree produced by CELParser#Index.
+func (p *parser) VisitIndex(ctx *gen.IndexContext) interface{} {
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
+ opID := p.helper.id(ctx.GetOp())
+ index := p.Visit(ctx.GetIndex()).(*exprpb.Expr)
+ return p.globalCallOrMacro(opID, operators.Index, target, index)
+}
+
+// Visit a parse tree produced by CELParser#CreateMessage.
+func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) interface{} {
+ target := p.Visit(ctx.Member()).(*exprpb.Expr)
+ objID := p.helper.id(ctx.GetOp())
+ if messageName, found := p.extractQualifiedName(target); found {
+ entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
+ return p.helper.newObject(objID, messageName, entries...)
+ }
+ return p.helper.newExpr(objID)
+}
+
+// Visit a parse tree of field initializers.
+func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) interface{} {
+ if ctx == nil || ctx.GetFields() == nil {
+ return []*exprpb.Expr_CreateStruct_Entry{}
+ }
+
+ result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetFields()))
+ for i, f := range ctx.GetFields() {
+ initID := p.helper.id(ctx.GetCols()[i])
+ value := p.Visit(ctx.GetValues()[i]).(*exprpb.Expr)
+ field := p.helper.newObjectField(initID, f.GetText(), value)
+ result[i] = field
+ }
+ return result
+}
+
+// Visit a parse tree produced by CELParser#IdentOrGlobalCall.
+func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) interface{} {
+ identName := ""
+ if ctx.GetLeadingDot() != nil {
+ identName = "."
+ }
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil {
+ return p.helper.newExpr(ctx)
+ }
+ identName += ctx.GetId().GetText()
+ if ctx.GetOp() != nil {
+ opID := p.helper.id(ctx.GetOp())
+ return p.globalCallOrMacro(opID, identName, p.visitList(ctx.GetArgs())...)
+ }
+ return p.helper.newIdent(ctx.GetId(), identName)
+}
+
+// Visit a parse tree produced by CELParser#Nested.
+func (p *parser) VisitNested(ctx *gen.NestedContext) interface{} {
+ return p.Visit(ctx.GetE())
+}
+
+// Visit a parse tree produced by CELParser#CreateList.
+func (p *parser) VisitCreateList(ctx *gen.CreateListContext) interface{} {
+ listID := p.helper.id(ctx.GetOp())
+ return p.helper.newList(listID, p.visitList(ctx.GetElems())...)
+}
+
+// Visit a parse tree produced by CELParser#CreateStruct.
+func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) interface{} {
+ structID := p.helper.id(ctx.GetOp())
+ entries := []*exprpb.Expr_CreateStruct_Entry{}
+ if ctx.GetEntries() != nil {
+ entries = p.Visit(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
+ }
+ return p.helper.newMap(structID, entries...)
+}
+
+// Visit a parse tree produced by CELParser#ConstantLiteral.
+func (p *parser) VisitConstantLiteral(ctx *gen.ConstantLiteralContext) interface{} {
+ switch ctx.Literal().(type) {
+ case *gen.IntContext:
+ return p.VisitInt(ctx.Literal().(*gen.IntContext))
+ case *gen.UintContext:
+ return p.VisitUint(ctx.Literal().(*gen.UintContext))
+ case *gen.DoubleContext:
+ return p.VisitDouble(ctx.Literal().(*gen.DoubleContext))
+ case *gen.StringContext:
+ return p.VisitString(ctx.Literal().(*gen.StringContext))
+ case *gen.BytesContext:
+ return p.VisitBytes(ctx.Literal().(*gen.BytesContext))
+ case *gen.BoolFalseContext:
+ return p.VisitBoolFalse(ctx.Literal().(*gen.BoolFalseContext))
+ case *gen.BoolTrueContext:
+ return p.VisitBoolTrue(ctx.Literal().(*gen.BoolTrueContext))
+ case *gen.NullContext:
+ return p.VisitNull(ctx.Literal().(*gen.NullContext))
+ }
+ return p.reportError(ctx, "invalid literal")
+}
+
+// Visit a parse tree produced by CELParser#exprList.
+func (p *parser) VisitExprList(ctx *gen.ExprListContext) interface{} {
+ if ctx == nil || ctx.GetE() == nil {
+ return []*exprpb.Expr{}
+ }
+
+ result := make([]*exprpb.Expr, len(ctx.GetE()))
+ for i, e := range ctx.GetE() {
+ exp := p.Visit(e).(*exprpb.Expr)
+ result[i] = exp
+ }
+ return result
+}
+
+// Visit a parse tree produced by CELParser#mapInitializerList.
+func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) interface{} {
+ if ctx == nil || ctx.GetKeys() == nil {
+ return []*exprpb.Expr_CreateStruct_Entry{}
+ }
+
+ result := make([]*exprpb.Expr_CreateStruct_Entry, len(ctx.GetCols()))
+ for i, col := range ctx.GetCols() {
+ colID := p.helper.id(col)
+ key := p.Visit(ctx.GetKeys()[i]).(*exprpb.Expr)
+ value := p.Visit(ctx.GetValues()[i]).(*exprpb.Expr)
+ entry := p.helper.newMapEntry(colID, key, value)
+ result[i] = entry
+ }
+ return result
+}
+
+// Visit a parse tree produced by CELParser#Int.
+func (p *parser) VisitInt(ctx *gen.IntContext) interface{} {
+ text := ctx.GetTok().GetText()
+ if ctx.GetSign() != nil {
+ text = ctx.GetSign().GetText() + text
+ }
+ i, err := strconv.ParseInt(text, 10, 64)
+ if err != nil {
+ return p.reportError(ctx, "invalid int literal")
+ }
+ return p.helper.newLiteralInt(ctx, i)
+}
+
+// Visit a parse tree produced by CELParser#Uint.
+func (p *parser) VisitUint(ctx *gen.UintContext) interface{} {
+ text := ctx.GetTok().GetText()
+ // trim the 'u' designator included in the uint literal.
+ text = text[:len(text)-1]
+ i, err := strconv.ParseUint(text, 10, 64)
+ if err != nil {
+ return p.reportError(ctx, "invalid uint literal")
+ }
+ return p.helper.newLiteralUint(ctx, i)
+}
+
+// Visit a parse tree produced by CELParser#Double.
+func (p *parser) VisitDouble(ctx *gen.DoubleContext) interface{} {
+ txt := ctx.GetTok().GetText()
+ if ctx.GetSign() != nil {
+ txt = ctx.GetSign().GetText() + txt
+ }
+ f, err := strconv.ParseFloat(txt, 64)
+ if err != nil {
+ return p.reportError(ctx, "invalid double literal")
+ }
+ return p.helper.newLiteralDouble(ctx, f)
+
+}
+
+// Visit a parse tree produced by CELParser#String.
+func (p *parser) VisitString(ctx *gen.StringContext) interface{} {
+ s := p.unquote(ctx, ctx.GetText())
+ return p.helper.newLiteralString(ctx, s)
+}
+
+// Visit a parse tree produced by CELParser#Bytes.
+func (p *parser) VisitBytes(ctx *gen.BytesContext) interface{} {
+ // TODO(ozben): Not sure if this is the right encoding.
+ b := []byte(p.unquote(ctx, ctx.GetTok().GetText()[1:]))
+ return p.helper.newLiteralBytes(ctx, b)
+}
+
+// Visit a parse tree produced by CELParser#BoolTrue.
+func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) interface{} {
+ return p.helper.newLiteralBool(ctx, true)
+}
+
+// Visit a parse tree produced by CELParser#BoolFalse.
+func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) interface{} {
+ return p.helper.newLiteralBool(ctx, false)
+}
+
+// Visit a parse tree produced by CELParser#Null.
+func (p *parser) VisitNull(ctx *gen.NullContext) interface{} {
+ return p.helper.newLiteral(ctx,
+ &exprpb.Constant{
+ ConstantKind: &exprpb.Constant_NullValue{
+ NullValue: structpb.NullValue_NULL_VALUE}})
+}
+
+func (p *parser) visitList(ctx gen.IExprListContext) []*exprpb.Expr {
+ if ctx == nil {
+ return []*exprpb.Expr{}
+ }
+ return p.visitSlice(ctx.GetE())
+}
+
+func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr {
+ if expressions == nil {
+ return []*exprpb.Expr{}
+ }
+ result := make([]*exprpb.Expr, len(expressions))
+ for i, e := range expressions {
+ ex := p.Visit(e).(*exprpb.Expr)
+ result[i] = ex
+ }
+ return result
+}
+
+func (p *parser) extractQualifiedName(e *exprpb.Expr) (string, bool) {
+ if e == nil {
+ return "", false
+ }
+ switch e.ExprKind.(type) {
+ case *exprpb.Expr_IdentExpr:
+ return e.GetIdentExpr().Name, true
+ case *exprpb.Expr_SelectExpr:
+ s := e.GetSelectExpr()
+ if prefix, found := p.extractQualifiedName(s.Operand); found {
+ return prefix + "." + s.Field, true
+ }
+ }
+ // TODO: Add a method to Source to get location from character offset.
+ location := p.helper.getLocation(e.Id)
+ p.reportError(location, "expected a qualified name")
+ return "", false
+}
+
+func (p *parser) unquote(ctx interface{}, value string) string {
+ text, err := unescape(value)
+ if err != nil {
+ p.reportError(ctx, err.Error())
+ return value
+ }
+ return text
+}
+
+func (p *parser) reportError(ctx interface{}, format string, args ...interface{}) *exprpb.Expr {
+ var location common.Location
+ switch ctx.(type) {
+ case common.Location:
+ location = ctx.(common.Location)
+ case antlr.Token, antlr.ParserRuleContext:
+ err := p.helper.newExpr(ctx)
+ location = p.helper.getLocation(err.Id)
+ }
+ err := p.helper.newExpr(ctx)
+ // Provide arguments to the report error.
+ p.errors.ReportError(location, format, args...)
+ return err
+}
+
+// ANTLR Parse listener implementations
+func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) {
+ // TODO: Snippet
+ l := common.NewLocation(line, column)
+ p.errors.syntaxError(l, msg)
+}
+
+func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) {
+ // Intentional
+}
+
+func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs antlr.ATNConfigSet) {
+ // Intentional
+}
+
+func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs antlr.ATNConfigSet) {
+ // Intentional
+}
+
+func (p *parser) globalCallOrMacro(exprID int64, function string, args ...*exprpb.Expr) *exprpb.Expr {
+ if expr, found := p.expandMacro(exprID, function, nil, args...); found {
+ return expr
+ }
+ return p.helper.newGlobalCall(exprID, function, args...)
+}
+
+func (p *parser) receiverCallOrMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
+ if expr, found := p.expandMacro(exprID, function, target, args...); found {
+ return expr
+ }
+ return p.helper.newReceiverCall(exprID, function, target, args...)
+}
+
+func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr, args ...*exprpb.Expr) (*exprpb.Expr, bool) {
+ macro, found := p.macros[makeMacroKey(function, len(args), target != nil)]
+ if !found {
+ macro, found = p.macros[makeVarArgMacroKey(function, target != nil)]
+ if !found {
+ return nil, false
+ }
+ }
+ eh := exprHelperPool.Get().(*exprHelper)
+ defer exprHelperPool.Put(eh)
+ eh.parserHelper = p.helper
+ eh.id = exprID
+ expr, err := macro.Expander()(eh, target, args)
+ if err != nil {
+ if err.Location != nil {
+ return p.reportError(err.Location, err.Message), true
+ }
+ return p.reportError(p.helper.getLocation(exprID), err.Message), true
+ }
+ return expr, true
+}
diff --git a/vendor/github.com/google/cel-go/parser/unescape.go b/vendor/github.com/google/cel-go/parser/unescape.go
new file mode 100644
index 00000000000..bca155a727c
--- /dev/null
+++ b/vendor/github.com/google/cel-go/parser/unescape.go
@@ -0,0 +1,225 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parser
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// Unescape takes a quoted string, unquotes, and unescapes it.
+//
+// This function performs escaping compatible with GoogleSQL.
+func unescape(value string) (string, error) {
+ // All strings normalize newlines to the \n representation.
+ value = newlineNormalizer.Replace(value)
+ n := len(value)
+
+ // Nothing to unescape / decode.
+ if n < 2 {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ // Raw string preceded by the 'r|R' prefix.
+ isRawLiteral := false
+ if value[0] == 'r' || value[0] == 'R' {
+ value = value[1:]
+ n = len(value)
+ isRawLiteral = true
+ }
+
+ // Quoted string of some form, must have same first and last char.
+ if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ // Normalize the multi-line CEL string representation to a standard
+ // Go quoted string.
+ if n >= 6 {
+ if strings.HasPrefix(value, "'''") {
+ if !strings.HasSuffix(value, "'''") {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+ value = "\"" + value[3:n-3] + "\""
+ } else if strings.HasPrefix(value, `"""`) {
+ if !strings.HasSuffix(value, `"""`) {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+ value = "\"" + value[3:n-3] + "\""
+ }
+ n = len(value)
+ }
+ value = value[1 : n-1]
+ // If there is nothing to escape, then return.
+ if isRawLiteral || !strings.ContainsRune(value, '\\') {
+ return value, nil
+ }
+
+ // Otherwise the string contains escape characters.
+ // The following logic is adapted from `strconv/quote.go`
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*n/2)
+ for len(value) > 0 {
+ c, multibyte, rest, err := unescapeChar(value)
+ if err != nil {
+ return "", err
+ }
+ value = rest
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ }
+ return string(buf), nil
+}
+
+// unescapeChar takes a string input and returns the following info:
+//
+// value - the escaped unicode rune at the front of the string.
+// multibyte - whether the rune value might require multiple bytes to represent.
+// tail - the remainder of the input string.
+// err - error value, if the character could not be unescaped.
+//
+// When multibyte is true the return value may still fit within a single byte,
+// but a multibyte conversion is attempted which is more expensive than when the
+// value is known to fit within one byte.
+func unescapeChar(s string) (value rune, multibyte bool, tail string, err error) {
+ // 1. Character is not an escape sequence.
+ switch c := s[0]; {
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // 2. Last character is the start of an escape sequence.
+ if len(s) <= 1 {
+ err = fmt.Errorf("unable to unescape string, found '\\' as last character")
+ return
+ }
+
+ c := s[1]
+ s = s[2:]
+ // 3. Common escape sequences shared with Google SQL
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case '\\':
+ value = '\\'
+ case '\'':
+ value = '\''
+ case '"':
+ value = '"'
+ case '`':
+ value = '`'
+ case '?':
+ value = '?'
+
+ // 4. Unicode escape sequences, reproduced from `strconv/quote.go`
+ case 'x', 'X', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+ multibyte = true
+
+ // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7]
+ case '0', '1', '2', '3':
+ if len(s) < 2 {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v := rune(c - '0')
+ for j := 0; j < 2; j++ {
+ x := s[j]
+ if x < '0' || x > '7' {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v = v*8 + rune(x-'0')
+ }
+ if v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+ s = s[2:]
+ multibyte = true
+
+ // Unknown escape sequence.
+ default:
+ err = fmt.Errorf("unable to unescape string")
+ }
+
+ tail = s
+ return
+}
+
+func unhex(b byte) (rune, bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return 0, false
+}
+
+var (
+ newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n")
+)
diff --git a/vendor/golang.org/x/text/width/gen.go b/vendor/golang.org/x/text/width/gen.go
new file mode 100644
index 00000000000..092277e1f64
--- /dev/null
+++ b/vendor/golang.org/x/text/width/gen.go
@@ -0,0 +1,115 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// This program generates the trie for width operations. The generated table
+// includes width category information as well as the normalization mappings.
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "unicode/utf8"
+
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/internal/triegen"
+)
+
+// See gen_common.go for flags.
+
+func main() {
+ gen.Init()
+ genTables()
+ genTests()
+ gen.Repackage("gen_trieval.go", "trieval.go", "width")
+ gen.Repackage("gen_common.go", "common_test.go", "width")
+}
+
+func genTables() {
+ t := triegen.NewTrie("width")
+ // fold and inverse mappings. See mapComment for a description of the format
+ // of each entry. Add dummy value to make an index of 0 mean no mapping.
+ inverse := [][4]byte{{}}
+ mapping := map[[4]byte]int{[4]byte{}: 0}
+
+ getWidthData(func(r rune, tag elem, alt rune) {
+ idx := 0
+ if alt != 0 {
+ var buf [4]byte
+ buf[0] = byte(utf8.EncodeRune(buf[1:], alt))
+ s := string(r)
+ buf[buf[0]] ^= s[len(s)-1]
+ var ok bool
+ if idx, ok = mapping[buf]; !ok {
+ idx = len(mapping)
+ if idx > math.MaxUint8 {
+ log.Fatalf("Index %d does not fit in a byte.", idx)
+ }
+ mapping[buf] = idx
+ inverse = append(inverse, buf)
+ }
+ }
+ t.Insert(r, uint64(tag|elem(idx)))
+ })
+
+ w := &bytes.Buffer{}
+ gen.WriteUnicodeVersion(w)
+
+ sz, err := t.Gen(w)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ sz += writeMappings(w, inverse)
+
+ fmt.Fprintf(w, "// Total table size %d bytes (%dKiB)\n", sz, sz/1024)
+
+ gen.WriteVersionedGoFile(*outputFile, "width", w.Bytes())
+}
+
+const inverseDataComment = `
+// inverseData contains 4-byte entries of the following format:
+// <0 padding>
+// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
+// UTF-8 encoding of the original rune. Mappings often have the following
+// pattern:
+// A -> A (U+FF21 -> U+0041)
+// B -> B (U+FF22 -> U+0042)
+// ...
+// By xor-ing the last byte the same entry can be shared by many mappings. This
+// reduces the total number of distinct entries by about two thirds.
+// The resulting entry for the aforementioned mappings is
+// { 0x01, 0xE0, 0x00, 0x00 }
+// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
+// E0 ^ A1 = 41.
+// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
+// E0 ^ A2 = 42.
+// Note that because of the xor-ing, the byte sequence stored in the entry is
+// not valid UTF-8.`
+
+func writeMappings(w io.Writer, data [][4]byte) int {
+ fmt.Fprintln(w, inverseDataComment)
+ fmt.Fprintf(w, "var inverseData = [%d][4]byte{\n", len(data))
+ for _, x := range data {
+ fmt.Fprintf(w, "{ 0x%02x, 0x%02x, 0x%02x, 0x%02x },\n", x[0], x[1], x[2], x[3])
+ }
+ fmt.Fprintln(w, "}")
+ return len(data) * 4
+}
+
+func genTests() {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "\nvar mapRunes = map[rune]struct{r rune; e elem}{\n")
+ getWidthData(func(r rune, tag elem, alt rune) {
+ if alt != 0 {
+ fmt.Fprintf(w, "\t0x%X: {0x%X, 0x%X},\n", r, alt, tag)
+ }
+ })
+ fmt.Fprintln(w, "}")
+ gen.WriteGoFile("runes_test.go", "width", w.Bytes())
+}
diff --git a/vendor/golang.org/x/text/width/gen_common.go b/vendor/golang.org/x/text/width/gen_common.go
new file mode 100644
index 00000000000..601e7526843
--- /dev/null
+++ b/vendor/golang.org/x/text/width/gen_common.go
@@ -0,0 +1,96 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// This code is shared between the main code generator and the test code.
+
+import (
+ "flag"
+ "log"
+ "strconv"
+ "strings"
+
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/internal/ucd"
+)
+
+var (
+ outputFile = flag.String("out", "tables.go", "output file")
+)
+
+var typeMap = map[string]elem{
+ "A": tagAmbiguous,
+ "N": tagNeutral,
+ "Na": tagNarrow,
+ "W": tagWide,
+ "F": tagFullwidth,
+ "H": tagHalfwidth,
+}
+
+// getWidthData calls f for every entry for which it is defined.
+//
+// f may be called multiple times for the same rune. The last call to f is the
+// correct value. f is not called for all runes. The default tag type is
+// Neutral.
+func getWidthData(f func(r rune, tag elem, alt rune)) {
+ // Set the default values for Unified Ideographs. In line with Annex 11,
+ // we encode full ranges instead of the defined runes in Unified_Ideograph.
+ for _, b := range []struct{ lo, hi rune }{
+ {0x4E00, 0x9FFF}, // the CJK Unified Ideographs block,
+ {0x3400, 0x4DBF}, // the CJK Unified Ideographs Externsion A block,
+ {0xF900, 0xFAFF}, // the CJK Compatibility Ideographs block,
+ {0x20000, 0x2FFFF}, // the Supplementary Ideographic Plane,
+ {0x30000, 0x3FFFF}, // the Tertiary Ideographic Plane,
+ } {
+ for r := b.lo; r <= b.hi; r++ {
+ f(r, tagWide, 0)
+ }
+ }
+
+ inverse := map[rune]rune{}
+ maps := map[string]bool{
+ "": true,
+ "": true,
+ }
+
+ // We cannot reuse package norm's decomposition, as we need an unexpanded
+ // decomposition. We make use of the opportunity to verify that the
+ // decomposition type is as expected.
+ ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) {
+ r := p.Rune(0)
+ s := strings.SplitN(p.String(ucd.DecompMapping), " ", 2)
+ if !maps[s[0]] {
+ return
+ }
+ x, err := strconv.ParseUint(s[1], 16, 32)
+ if err != nil {
+ log.Fatalf("Error parsing rune %q", s[1])
+ }
+ if inverse[r] != 0 || inverse[rune(x)] != 0 {
+ log.Fatalf("Circular dependency in mapping between %U and %U", r, x)
+ }
+ inverse[r] = rune(x)
+ inverse[rune(x)] = r
+ })
+
+ // ;
+ ucd.Parse(gen.OpenUCDFile("EastAsianWidth.txt"), func(p *ucd.Parser) {
+ tag, ok := typeMap[p.String(1)]
+ if !ok {
+ log.Fatalf("Unknown width type %q", p.String(1))
+ }
+ r := p.Rune(0)
+ alt, ok := inverse[r]
+ if tag == tagFullwidth || tag == tagHalfwidth && r != wonSign {
+ tag |= tagNeedsFold
+ if !ok {
+ log.Fatalf("Narrow or wide rune %U has no decomposition", r)
+ }
+ }
+ f(r, tag, alt)
+ })
+}
diff --git a/vendor/golang.org/x/text/width/gen_trieval.go b/vendor/golang.org/x/text/width/gen_trieval.go
new file mode 100644
index 00000000000..c17334aa618
--- /dev/null
+++ b/vendor/golang.org/x/text/width/gen_trieval.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package main
+
+// elem is an entry of the width trie. The high byte is used to encode the type
+// of the rune. The low byte is used to store the index to a mapping entry in
+// the inverseData array.
+type elem uint16
+
+const (
+ tagNeutral elem = iota << typeShift
+ tagAmbiguous
+ tagWide
+ tagNarrow
+ tagFullwidth
+ tagHalfwidth
+)
+
+const (
+ numTypeBits = 3
+ typeShift = 16 - numTypeBits
+
+ // tagNeedsFold is true for all fullwidth and halfwidth runes except for
+ // the Won sign U+20A9.
+ tagNeedsFold = 0x1000
+
+ // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide
+ // variant.
+ wonSign rune = 0x20A9
+)
diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go
new file mode 100644
index 00000000000..49bfbf72683
--- /dev/null
+++ b/vendor/golang.org/x/text/width/kind_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=Kind"; DO NOT EDIT.
+
+package width
+
+import "fmt"
+
+const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth"
+
+var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89}
+
+func (i Kind) String() string {
+ if i < 0 || i >= Kind(len(_Kind_index)-1) {
+ return fmt.Sprintf("Kind(%d)", i)
+ }
+ return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go
new file mode 100644
index 00000000000..f4988626731
--- /dev/null
+++ b/vendor/golang.org/x/text/width/tables10.0.0.go
@@ -0,0 +1,1318 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.10
+
+package width
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "10.0.0"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return widthValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = widthIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = widthIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = widthIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return widthValues[c0]
+ }
+ i := widthIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = widthIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = widthIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return widthValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = widthIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = widthIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = widthIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return widthValues[c0]
+ }
+ i := widthIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = widthIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = widthIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c59df54630d3dc4a.
+type widthTrie struct{}
+
+func newWidthTrie(i int) *widthTrie {
+ return &widthTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *widthTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ default:
+ return uint16(widthValues[n<<6+uint32(b)])
+ }
+}
+
+// widthValues: 101 blocks, 6464 entries, 12928 bytes
+// The third block is the zero block.
+var widthValues = [6464]uint16{
+ // Block 0x0, offset 0x0
+ 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002,
+ 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002,
+ 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002,
+ 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002,
+ 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002,
+ 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002,
+ // Block 0x1, offset 0x40
+ 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003,
+ 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003,
+ 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003,
+ 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003,
+ 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003,
+ 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004,
+ 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004,
+ 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004,
+ 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004,
+ 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004,
+ 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005,
+ 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000,
+ 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008,
+ 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000,
+ 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000,
+ 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000,
+ // Block 0x4, offset 0x100
+ 0x106: 0x2000,
+ 0x110: 0x2000,
+ 0x117: 0x2000,
+ 0x118: 0x2000,
+ 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000,
+ 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000,
+ 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000,
+ 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000,
+ 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000,
+ 0x13c: 0x2000, 0x13e: 0x2000,
+ // Block 0x5, offset 0x140
+ 0x141: 0x2000,
+ 0x151: 0x2000,
+ 0x153: 0x2000,
+ 0x15b: 0x2000,
+ 0x166: 0x2000, 0x167: 0x2000,
+ 0x16b: 0x2000,
+ 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000,
+ 0x178: 0x2000,
+ 0x17f: 0x2000,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000,
+ 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000,
+ 0x18d: 0x2000,
+ 0x192: 0x2000, 0x193: 0x2000,
+ 0x1a6: 0x2000, 0x1a7: 0x2000,
+ 0x1ab: 0x2000,
+ // Block 0x7, offset 0x1c0
+ 0x1ce: 0x2000, 0x1d0: 0x2000,
+ 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000,
+ 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000,
+ // Block 0x8, offset 0x200
+ 0x211: 0x2000,
+ 0x221: 0x2000,
+ // Block 0x9, offset 0x240
+ 0x244: 0x2000,
+ 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000,
+ 0x24d: 0x2000, 0x250: 0x2000,
+ 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000,
+ 0x25f: 0x2000,
+ // Block 0xa, offset 0x280
+ 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000,
+ 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000,
+ 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000,
+ 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000,
+ 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000,
+ 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000,
+ 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000,
+ 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000,
+ 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000,
+ 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000,
+ 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000,
+ 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000,
+ 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000,
+ 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000,
+ 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000,
+ 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000,
+ 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000,
+ 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000,
+ // Block 0xc, offset 0x300
+ 0x311: 0x2000,
+ 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000,
+ 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000,
+ 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000,
+ 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000,
+ 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000,
+ 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000,
+ 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000,
+ // Block 0xd, offset 0x340
+ 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000,
+ 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000,
+ // Block 0xe, offset 0x380
+ 0x381: 0x2000,
+ 0x390: 0x2000, 0x391: 0x2000,
+ 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000,
+ 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000,
+ 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000,
+ 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000,
+ 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000,
+ 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000,
+ 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000,
+ 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000,
+ 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000,
+ 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000,
+ // Block 0x10, offset 0x400
+ 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000,
+ 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000,
+ 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000,
+ 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000,
+ 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000,
+ 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000,
+ 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000,
+ 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000,
+ 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000,
+ 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000,
+ 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000,
+ // Block 0x11, offset 0x440
+ 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000,
+ 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000,
+ 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000,
+ 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000,
+ 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000,
+ 0x45e: 0x4000, 0x45f: 0x4000,
+ // Block 0x12, offset 0x480
+ 0x490: 0x2000,
+ 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000,
+ 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000,
+ 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000,
+ 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000,
+ 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000,
+ 0x4bb: 0x2000,
+ 0x4be: 0x2000,
+ // Block 0x13, offset 0x4c0
+ 0x4f4: 0x2000,
+ 0x4ff: 0x2000,
+ // Block 0x14, offset 0x500
+ 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000,
+ 0x529: 0xa009,
+ 0x52c: 0x2000,
+ // Block 0x15, offset 0x540
+ 0x543: 0x2000, 0x545: 0x2000,
+ 0x549: 0x2000,
+ 0x553: 0x2000, 0x556: 0x2000,
+ 0x561: 0x2000, 0x562: 0x2000,
+ 0x566: 0x2000,
+ 0x56b: 0x2000,
+ // Block 0x16, offset 0x580
+ 0x593: 0x2000, 0x594: 0x2000,
+ 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000,
+ 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000,
+ 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000,
+ 0x5aa: 0x2000, 0x5ab: 0x2000,
+ 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000,
+ 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000,
+ // Block 0x17, offset 0x5c0
+ 0x5c9: 0x2000,
+ 0x5d0: 0x200a, 0x5d1: 0x200b,
+ 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000,
+ 0x5d8: 0x2000, 0x5d9: 0x2000,
+ 0x5f8: 0x2000, 0x5f9: 0x2000,
+ // Block 0x18, offset 0x600
+ 0x612: 0x2000, 0x614: 0x2000,
+ 0x627: 0x2000,
+ // Block 0x19, offset 0x640
+ 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000,
+ 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000,
+ 0x64f: 0x2000, 0x651: 0x2000,
+ 0x655: 0x2000,
+ 0x65a: 0x2000, 0x65d: 0x2000,
+ 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000,
+ 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000,
+ 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000,
+ 0x674: 0x2000, 0x675: 0x2000,
+ 0x676: 0x2000, 0x677: 0x2000,
+ 0x67c: 0x2000, 0x67d: 0x2000,
+ // Block 0x1a, offset 0x680
+ 0x688: 0x2000,
+ 0x68c: 0x2000,
+ 0x692: 0x2000,
+ 0x6a0: 0x2000, 0x6a1: 0x2000,
+ 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000,
+ 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000,
+ // Block 0x1b, offset 0x6c0
+ 0x6c2: 0x2000, 0x6c3: 0x2000,
+ 0x6c6: 0x2000, 0x6c7: 0x2000,
+ 0x6d5: 0x2000,
+ 0x6d9: 0x2000,
+ 0x6e5: 0x2000,
+ 0x6ff: 0x2000,
+ // Block 0x1c, offset 0x700
+ 0x712: 0x2000,
+ 0x71a: 0x4000, 0x71b: 0x4000,
+ 0x729: 0x4000,
+ 0x72a: 0x4000,
+ // Block 0x1d, offset 0x740
+ 0x769: 0x4000,
+ 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000,
+ 0x770: 0x4000, 0x773: 0x4000,
+ // Block 0x1e, offset 0x780
+ 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000,
+ 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000,
+ 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000,
+ 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000,
+ 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000,
+ 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000,
+ 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000,
+ 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000,
+ 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000,
+ 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000,
+ 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000,
+ 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000,
+ 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000,
+ 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000,
+ 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000,
+ 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000,
+ // Block 0x20, offset 0x800
+ 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000,
+ 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000,
+ 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000,
+ 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000,
+ 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000,
+ 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000,
+ 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000,
+ 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000,
+ 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000,
+ 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000,
+ 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000,
+ // Block 0x21, offset 0x840
+ 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000,
+ 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000,
+ 0x850: 0x2000, 0x851: 0x2000,
+ 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000,
+ 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000,
+ 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000,
+ 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000,
+ 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000,
+ 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000,
+ // Block 0x22, offset 0x880
+ 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000,
+ 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000,
+ 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000,
+ 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000,
+ 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000,
+ 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000,
+ 0x8b2: 0x2000, 0x8b3: 0x2000,
+ 0x8b6: 0x2000, 0x8b7: 0x2000,
+ 0x8bc: 0x2000, 0x8bd: 0x2000,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x2000, 0x8c1: 0x2000,
+ 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f,
+ 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000,
+ 0x8e2: 0x2000, 0x8e3: 0x2000,
+ 0x8e4: 0x2000, 0x8e5: 0x2000,
+ 0x8ef: 0x2000,
+ 0x8fd: 0x4000, 0x8fe: 0x4000,
+ // Block 0x24, offset 0x900
+ 0x905: 0x2000,
+ 0x906: 0x2000, 0x909: 0x2000,
+ 0x90e: 0x2000, 0x90f: 0x2000,
+ 0x914: 0x4000, 0x915: 0x4000,
+ 0x91c: 0x2000,
+ 0x91e: 0x2000,
+ // Block 0x25, offset 0x940
+ 0x940: 0x2000, 0x942: 0x2000,
+ 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000,
+ 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000,
+ 0x952: 0x4000, 0x953: 0x4000,
+ 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000,
+ 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000,
+ 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000,
+ 0x97f: 0x4000,
+ // Block 0x26, offset 0x980
+ 0x993: 0x4000,
+ 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000,
+ 0x9aa: 0x4000, 0x9ab: 0x4000,
+ 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000,
+ // Block 0x27, offset 0x9c0
+ 0x9c4: 0x4000, 0x9c5: 0x4000,
+ 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000,
+ 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000,
+ 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000,
+ 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000,
+ 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000,
+ 0x9e8: 0x2000, 0x9e9: 0x2000,
+ 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000,
+ 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000,
+ 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000,
+ 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000,
+ // Block 0x28, offset 0xa00
+ 0xa05: 0x4000,
+ 0xa0a: 0x4000, 0xa0b: 0x4000,
+ 0xa28: 0x4000,
+ 0xa3d: 0x2000,
+ // Block 0x29, offset 0xa40
+ 0xa4c: 0x4000, 0xa4e: 0x4000,
+ 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000,
+ 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000,
+ 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000,
+ // Block 0x2a, offset 0xa80
+ 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000,
+ 0xab0: 0x4000,
+ 0xabf: 0x4000,
+ // Block 0x2b, offset 0xac0
+ 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000,
+ 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000,
+ // Block 0x2c, offset 0xb00
+ 0xb05: 0x6010,
+ 0xb06: 0x6011,
+ // Block 0x2d, offset 0xb40
+ 0xb5b: 0x4000, 0xb5c: 0x4000,
+ // Block 0x2e, offset 0xb80
+ 0xb90: 0x4000,
+ 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000,
+ 0xb98: 0x2000, 0xb99: 0x2000,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000,
+ 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000,
+ 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000,
+ 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000,
+ 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000,
+ 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000,
+ 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000,
+ 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000,
+ 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000,
+ 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000,
+ 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000,
+ 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000,
+ 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000,
+ 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000,
+ 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000,
+ 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000,
+ 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000,
+ 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000,
+ 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000,
+ 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000,
+ 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000,
+ 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000,
+ 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000,
+ 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000,
+ 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000,
+ 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000,
+ 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000,
+ 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000,
+ 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000,
+ 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000,
+ 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000,
+ 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000,
+ 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000,
+ 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000,
+ // Block 0x33, offset 0xcc0
+ 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000,
+ 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000,
+ 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000,
+ 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000,
+ 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000,
+ 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000,
+ 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000,
+ 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000,
+ 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000,
+ 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000,
+ 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000,
+ 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000,
+ 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000,
+ 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000,
+ 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000,
+ 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a,
+ 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020,
+ 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023,
+ 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026,
+ 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028,
+ 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000,
+ 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f,
+ 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000,
+ 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000,
+ 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000,
+ 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036,
+ 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038,
+ 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035,
+ 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000,
+ 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d,
+ 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000,
+ // Block 0x36, offset 0xd80
+ 0xd85: 0x4000,
+ 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000,
+ 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000,
+ 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000,
+ 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000,
+ 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000,
+ 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000,
+ 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000,
+ 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e,
+ 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e,
+ 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037,
+ 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037,
+ 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040,
+ 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044,
+ 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045,
+ 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c,
+ 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000,
+ 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000,
+ 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000,
+ 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000,
+ 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000,
+ 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000,
+ 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000,
+ 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000,
+ 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000,
+ 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000,
+ 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000,
+ 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000,
+ 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000,
+ 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000,
+ 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000,
+ 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000,
+ 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000,
+ 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000,
+ 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000,
+ 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000,
+ 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000,
+ 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000,
+ 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000,
+ 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000,
+ 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000,
+ 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000,
+ 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000,
+ 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000,
+ 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000,
+ 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000,
+ 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000,
+ 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000,
+ 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000,
+ 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000,
+ 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000,
+ 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000,
+ 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000,
+ 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000,
+ 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000,
+ 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000,
+ 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000,
+ 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000,
+ 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000,
+ 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000,
+ 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000,
+ 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000,
+ 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000,
+ 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000,
+ 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000,
+ 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000,
+ 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000,
+ 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000,
+ 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000,
+ 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000,
+ 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000,
+ 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000,
+ 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000,
+ 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000,
+ 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000,
+ 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000,
+ 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000,
+ 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000,
+ 0xf86: 0x4000,
+ // Block 0x3f, offset 0xfc0
+ 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000,
+ 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000,
+ 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000,
+ 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000,
+ 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000,
+ 0xffc: 0x4000,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000,
+ 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000,
+ 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000,
+ 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000,
+ 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000,
+ 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000,
+ 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000,
+ 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000,
+ 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000,
+ 0x1058: 0x4000, 0x1059: 0x4000,
+ 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000,
+ 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000,
+ 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000,
+ 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000,
+ 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000,
+ 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000,
+ 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000,
+ 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000,
+ 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000,
+ 0x10aa: 0x4000, 0x10ab: 0x4000,
+ // Block 0x43, offset 0x10c0
+ 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012,
+ 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012,
+ 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012,
+ 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012,
+ 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012,
+ 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049,
+ 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049,
+ 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049,
+ 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049,
+ 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049,
+ 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049,
+ 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049,
+ 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049,
+ 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049,
+ 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049,
+ 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d,
+ 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053,
+ 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059,
+ 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f,
+ 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065,
+ 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056,
+ 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f,
+ 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072,
+ 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075,
+ 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078,
+ 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b,
+ 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b,
+ 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b,
+ 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c,
+ 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c,
+ 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c,
+ // Block 0x46, offset 0x1180
+ 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080,
+ 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082,
+ 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f,
+ 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087,
+ 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a,
+ 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d,
+ 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091,
+ 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095,
+ 0x11bd: 0x2000,
+ // Block 0x47, offset 0x11c0
+ 0x11e0: 0x4000, 0x11e1: 0x4000,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000,
+ 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000,
+ 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000,
+ 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000,
+ 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000,
+ 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000,
+ 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000,
+ 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000,
+ 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000,
+ 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000,
+ 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000,
+ 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000,
+ 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000,
+ 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000,
+ 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000,
+ 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000,
+ 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000,
+ 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000,
+ 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000,
+ 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000,
+ 0x129e: 0x4000,
+ // Block 0x4b, offset 0x12c0
+ 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000,
+ 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000,
+ 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000,
+ 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000,
+ 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000,
+ 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000,
+ 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000,
+ 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000,
+ 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000,
+ 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000,
+ 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000,
+ 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000,
+ // Block 0x4d, offset 0x1340
+ 0x1344: 0x4000,
+ // Block 0x4e, offset 0x1380
+ 0x138f: 0x4000,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000,
+ 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000,
+ 0x13d0: 0x2000, 0x13d1: 0x2000,
+ 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000,
+ 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000,
+ 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000,
+ 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000,
+ 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000,
+ 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000,
+ 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000,
+ 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000,
+ 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000,
+ 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000,
+ 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000,
+ 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000,
+ 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000,
+ 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000,
+ 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000,
+ 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000,
+ 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000,
+ 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000,
+ 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000,
+ 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000,
+ 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000,
+ 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000,
+ 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000,
+ 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000,
+ 0x1490: 0x4000, 0x1491: 0x4000,
+ 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000,
+ 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000,
+ 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000,
+ 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000,
+ 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000,
+ 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000,
+ 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000,
+ 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000,
+ 0x14d0: 0x4000, 0x14d1: 0x4000,
+ 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000,
+ 0x14e4: 0x4000, 0x14e5: 0x4000,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000,
+ 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000,
+ 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000,
+ 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000,
+ 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000,
+ 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000,
+ 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000,
+ 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000,
+ 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000,
+ 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000,
+ 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000,
+ 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000,
+ 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000,
+ 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000,
+ 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000,
+ 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000,
+ 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000,
+ 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000,
+ 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000,
+ 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000,
+ 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000,
+ 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000,
+ 0x1592: 0x4000, 0x1593: 0x4000,
+ 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000,
+ 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000,
+ 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000,
+ 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000,
+ 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000,
+ 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000,
+ 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000,
+ 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000,
+ 0x15d2: 0x4000, 0x15d3: 0x4000,
+ 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000,
+ 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000,
+ 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000,
+ 0x15f0: 0x4000, 0x15f4: 0x4000,
+ 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000,
+ 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000,
+ 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000,
+ 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000,
+ 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000,
+ 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000,
+ 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000,
+ 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000,
+ 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000,
+ 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000,
+ 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000,
+ 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000,
+ 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000,
+ 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000,
+ 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000,
+ 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000,
+ 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000,
+ 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000,
+ 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000,
+ 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000,
+ 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000,
+ 0x167c: 0x4000, 0x167f: 0x4000,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000,
+ 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000,
+ 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000,
+ 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000,
+ 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000,
+ 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000,
+ 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000,
+ 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000,
+ 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000,
+ 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000,
+ 0x16bc: 0x4000, 0x16bd: 0x4000,
+ // Block 0x5b, offset 0x16c0
+ 0x16cb: 0x4000,
+ 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000,
+ 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000,
+ 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000,
+ 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000,
+ 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000,
+ 0x16fa: 0x4000,
+ // Block 0x5c, offset 0x1700
+ 0x1715: 0x4000, 0x1716: 0x4000,
+ 0x1724: 0x4000,
+ // Block 0x5d, offset 0x1740
+ 0x177b: 0x4000,
+ 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000,
+ 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000,
+ 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000,
+ 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000,
+ 0x17d2: 0x4000,
+ 0x17eb: 0x4000, 0x17ec: 0x4000,
+ 0x17f4: 0x4000, 0x17f5: 0x4000,
+ 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000,
+ // Block 0x60, offset 0x1800
+ 0x1810: 0x4000, 0x1811: 0x4000,
+ 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000,
+ 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000,
+ 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000,
+ 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000,
+ 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000,
+ 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000,
+ 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000,
+ 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000,
+ 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000,
+ 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000,
+ 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000,
+ 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000,
+ 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000,
+ 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000,
+ 0x186a: 0x4000, 0x186b: 0x4000,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000,
+ 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000,
+ 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000,
+ 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x4000,
+ 0x18d0: 0x4000, 0x18d1: 0x4000,
+ 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000,
+ 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000,
+ 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000,
+ 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000,
+ 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000,
+ 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000,
+ 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000,
+ 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000,
+ 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000,
+ 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000,
+ 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000,
+ 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000,
+ 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000,
+ 0x193c: 0x2000, 0x193d: 0x2000,
+}
+
+// widthIndex: 22 blocks, 1408 entries, 1408 bytes
+// Block 0 is the zero block.
+var widthIndex = [1408]uint8{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05,
+ 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b,
+ 0xd0: 0x0c, 0xd1: 0x0d,
+ 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06,
+ 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a,
+ 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13,
+ // Block 0x4, offset 0x100
+ 0x104: 0x0e, 0x105: 0x0f,
+ // Block 0x5, offset 0x140
+ 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16,
+ 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b,
+ 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21,
+ 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29,
+ 0x166: 0x2a,
+ 0x16c: 0x2b, 0x16d: 0x2c,
+ 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f,
+ // Block 0x6, offset 0x180
+ 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37,
+ 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e,
+ 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e,
+ 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e,
+ 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e,
+ 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e,
+ 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e,
+ 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e,
+ 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e,
+ 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e,
+ 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e,
+ 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e,
+ 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e,
+ 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e,
+ 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e,
+ // Block 0x8, offset 0x200
+ 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e,
+ 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e,
+ 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e,
+ 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e,
+ 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e,
+ 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e,
+ 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e,
+ 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e,
+ 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e,
+ 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c,
+ 0x265: 0x3d,
+ 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e,
+ 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e,
+ // Block 0xa, offset 0x280
+ 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e,
+ 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e,
+ 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e,
+ 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08,
+ 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08,
+ 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08,
+ 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08,
+ 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08,
+ 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08,
+ 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08,
+ 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08,
+ // Block 0xc, offset 0x300
+ 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08,
+ 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08,
+ 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08,
+ 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08,
+ 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e,
+ 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e,
+ 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44,
+ // Block 0xd, offset 0x340
+ 0x37f: 0x45,
+ // Block 0xe, offset 0x380
+ 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e,
+ 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e,
+ 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e,
+ 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46,
+ 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e,
+ 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e,
+ 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a,
+ // Block 0x10, offset 0x400
+ 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f,
+ 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55,
+ 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b,
+ 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d,
+ 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61,
+ // Block 0x11, offset 0x440
+ 0x456: 0x0b, 0x457: 0x06,
+ 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e,
+ 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06,
+ 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06,
+ 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06,
+ 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06,
+ // Block 0x12, offset 0x480
+ 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08,
+ 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08,
+ 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08,
+ 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08,
+ 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08,
+ 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08,
+ 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08,
+ 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62,
+ // Block 0x14, offset 0x500
+ 0x520: 0x10,
+ 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09,
+ 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11,
+ // Block 0x15, offset 0x540
+ 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09,
+ 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11,
+}
+
+// inverseData contains 4-byte entries of the following format:
+// <0 padding>
+// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
+// UTF-8 encoding of the original rune. Mappings often have the following
+// pattern:
+// A -> A (U+FF21 -> U+0041)
+// B -> B (U+FF22 -> U+0042)
+// ...
+// By xor-ing the last byte the same entry can be shared by many mappings. This
+// reduces the total number of distinct entries by about two thirds.
+// The resulting entry for the aforementioned mappings is
+// { 0x01, 0xE0, 0x00, 0x00 }
+// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
+// E0 ^ A1 = 41.
+// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
+// E0 ^ A2 = 42.
+// Note that because of the xor-ing, the byte sequence stored in the entry is
+// not valid UTF-8.
+var inverseData = [150][4]byte{
+ {0x00, 0x00, 0x00, 0x00},
+ {0x03, 0xe3, 0x80, 0xa0},
+ {0x03, 0xef, 0xbc, 0xa0},
+ {0x03, 0xef, 0xbc, 0xe0},
+ {0x03, 0xef, 0xbd, 0xe0},
+ {0x03, 0xef, 0xbf, 0x02},
+ {0x03, 0xef, 0xbf, 0x00},
+ {0x03, 0xef, 0xbf, 0x0e},
+ {0x03, 0xef, 0xbf, 0x0c},
+ {0x03, 0xef, 0xbf, 0x0f},
+ {0x03, 0xef, 0xbf, 0x39},
+ {0x03, 0xef, 0xbf, 0x3b},
+ {0x03, 0xef, 0xbf, 0x3f},
+ {0x03, 0xef, 0xbf, 0x2a},
+ {0x03, 0xef, 0xbf, 0x0d},
+ {0x03, 0xef, 0xbf, 0x25},
+ {0x03, 0xef, 0xbd, 0x1a},
+ {0x03, 0xef, 0xbd, 0x26},
+ {0x01, 0xa0, 0x00, 0x00},
+ {0x03, 0xef, 0xbd, 0x25},
+ {0x03, 0xef, 0xbd, 0x23},
+ {0x03, 0xef, 0xbd, 0x2e},
+ {0x03, 0xef, 0xbe, 0x07},
+ {0x03, 0xef, 0xbe, 0x05},
+ {0x03, 0xef, 0xbd, 0x06},
+ {0x03, 0xef, 0xbd, 0x13},
+ {0x03, 0xef, 0xbd, 0x0b},
+ {0x03, 0xef, 0xbd, 0x16},
+ {0x03, 0xef, 0xbd, 0x0c},
+ {0x03, 0xef, 0xbd, 0x15},
+ {0x03, 0xef, 0xbd, 0x0d},
+ {0x03, 0xef, 0xbd, 0x1c},
+ {0x03, 0xef, 0xbd, 0x02},
+ {0x03, 0xef, 0xbd, 0x1f},
+ {0x03, 0xef, 0xbd, 0x1d},
+ {0x03, 0xef, 0xbd, 0x17},
+ {0x03, 0xef, 0xbd, 0x08},
+ {0x03, 0xef, 0xbd, 0x09},
+ {0x03, 0xef, 0xbd, 0x0e},
+ {0x03, 0xef, 0xbd, 0x04},
+ {0x03, 0xef, 0xbd, 0x05},
+ {0x03, 0xef, 0xbe, 0x3f},
+ {0x03, 0xef, 0xbe, 0x00},
+ {0x03, 0xef, 0xbd, 0x2c},
+ {0x03, 0xef, 0xbe, 0x06},
+ {0x03, 0xef, 0xbe, 0x0c},
+ {0x03, 0xef, 0xbe, 0x0f},
+ {0x03, 0xef, 0xbe, 0x0d},
+ {0x03, 0xef, 0xbe, 0x0b},
+ {0x03, 0xef, 0xbe, 0x19},
+ {0x03, 0xef, 0xbe, 0x15},
+ {0x03, 0xef, 0xbe, 0x11},
+ {0x03, 0xef, 0xbe, 0x31},
+ {0x03, 0xef, 0xbe, 0x33},
+ {0x03, 0xef, 0xbd, 0x0f},
+ {0x03, 0xef, 0xbe, 0x30},
+ {0x03, 0xef, 0xbe, 0x3e},
+ {0x03, 0xef, 0xbe, 0x32},
+ {0x03, 0xef, 0xbe, 0x36},
+ {0x03, 0xef, 0xbd, 0x14},
+ {0x03, 0xef, 0xbe, 0x2e},
+ {0x03, 0xef, 0xbd, 0x1e},
+ {0x03, 0xef, 0xbe, 0x10},
+ {0x03, 0xef, 0xbf, 0x13},
+ {0x03, 0xef, 0xbf, 0x15},
+ {0x03, 0xef, 0xbf, 0x17},
+ {0x03, 0xef, 0xbf, 0x1f},
+ {0x03, 0xef, 0xbf, 0x1d},
+ {0x03, 0xef, 0xbf, 0x1b},
+ {0x03, 0xef, 0xbf, 0x09},
+ {0x03, 0xef, 0xbf, 0x0b},
+ {0x03, 0xef, 0xbf, 0x37},
+ {0x03, 0xef, 0xbe, 0x04},
+ {0x01, 0xe0, 0x00, 0x00},
+ {0x03, 0xe2, 0xa6, 0x1a},
+ {0x03, 0xe2, 0xa6, 0x26},
+ {0x03, 0xe3, 0x80, 0x23},
+ {0x03, 0xe3, 0x80, 0x2e},
+ {0x03, 0xe3, 0x80, 0x25},
+ {0x03, 0xe3, 0x83, 0x1e},
+ {0x03, 0xe3, 0x83, 0x14},
+ {0x03, 0xe3, 0x82, 0x06},
+ {0x03, 0xe3, 0x82, 0x0b},
+ {0x03, 0xe3, 0x82, 0x0c},
+ {0x03, 0xe3, 0x82, 0x0d},
+ {0x03, 0xe3, 0x82, 0x02},
+ {0x03, 0xe3, 0x83, 0x0f},
+ {0x03, 0xe3, 0x83, 0x08},
+ {0x03, 0xe3, 0x83, 0x09},
+ {0x03, 0xe3, 0x83, 0x2c},
+ {0x03, 0xe3, 0x83, 0x0c},
+ {0x03, 0xe3, 0x82, 0x13},
+ {0x03, 0xe3, 0x82, 0x16},
+ {0x03, 0xe3, 0x82, 0x15},
+ {0x03, 0xe3, 0x82, 0x1c},
+ {0x03, 0xe3, 0x82, 0x1f},
+ {0x03, 0xe3, 0x82, 0x1d},
+ {0x03, 0xe3, 0x82, 0x1a},
+ {0x03, 0xe3, 0x82, 0x17},
+ {0x03, 0xe3, 0x82, 0x08},
+ {0x03, 0xe3, 0x82, 0x09},
+ {0x03, 0xe3, 0x82, 0x0e},
+ {0x03, 0xe3, 0x82, 0x04},
+ {0x03, 0xe3, 0x82, 0x05},
+ {0x03, 0xe3, 0x82, 0x3f},
+ {0x03, 0xe3, 0x83, 0x00},
+ {0x03, 0xe3, 0x83, 0x06},
+ {0x03, 0xe3, 0x83, 0x05},
+ {0x03, 0xe3, 0x83, 0x0d},
+ {0x03, 0xe3, 0x83, 0x0b},
+ {0x03, 0xe3, 0x83, 0x07},
+ {0x03, 0xe3, 0x83, 0x19},
+ {0x03, 0xe3, 0x83, 0x15},
+ {0x03, 0xe3, 0x83, 0x11},
+ {0x03, 0xe3, 0x83, 0x31},
+ {0x03, 0xe3, 0x83, 0x33},
+ {0x03, 0xe3, 0x83, 0x30},
+ {0x03, 0xe3, 0x83, 0x3e},
+ {0x03, 0xe3, 0x83, 0x32},
+ {0x03, 0xe3, 0x83, 0x36},
+ {0x03, 0xe3, 0x83, 0x2e},
+ {0x03, 0xe3, 0x82, 0x07},
+ {0x03, 0xe3, 0x85, 0x04},
+ {0x03, 0xe3, 0x84, 0x10},
+ {0x03, 0xe3, 0x85, 0x30},
+ {0x03, 0xe3, 0x85, 0x0d},
+ {0x03, 0xe3, 0x85, 0x13},
+ {0x03, 0xe3, 0x85, 0x15},
+ {0x03, 0xe3, 0x85, 0x17},
+ {0x03, 0xe3, 0x85, 0x1f},
+ {0x03, 0xe3, 0x85, 0x1d},
+ {0x03, 0xe3, 0x85, 0x1b},
+ {0x03, 0xe3, 0x85, 0x09},
+ {0x03, 0xe3, 0x85, 0x0f},
+ {0x03, 0xe3, 0x85, 0x0b},
+ {0x03, 0xe3, 0x85, 0x37},
+ {0x03, 0xe3, 0x85, 0x3b},
+ {0x03, 0xe3, 0x85, 0x39},
+ {0x03, 0xe3, 0x85, 0x3f},
+ {0x02, 0xc2, 0x02, 0x00},
+ {0x02, 0xc2, 0x0e, 0x00},
+ {0x02, 0xc2, 0x0c, 0x00},
+ {0x02, 0xc2, 0x00, 0x00},
+ {0x03, 0xe2, 0x82, 0x0f},
+ {0x03, 0xe2, 0x94, 0x2a},
+ {0x03, 0xe2, 0x86, 0x39},
+ {0x03, 0xe2, 0x86, 0x3b},
+ {0x03, 0xe2, 0x86, 0x3f},
+ {0x03, 0xe2, 0x96, 0x0d},
+ {0x03, 0xe2, 0x97, 0x25},
+}
+
+// Total table size 14936 bytes (14KiB)
diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go
new file mode 100644
index 00000000000..7069e26345b
--- /dev/null
+++ b/vendor/golang.org/x/text/width/tables9.0.0.go
@@ -0,0 +1,1286 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build !go1.10
+
+package width
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "9.0.0"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return widthValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = widthIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = widthIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = widthIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return widthValues[c0]
+ }
+ i := widthIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = widthIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = widthIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return widthValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = widthIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := widthIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = widthIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = widthIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return widthValues[c0]
+ }
+ i := widthIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = widthIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = widthIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3.
+type widthTrie struct{}
+
+func newWidthTrie(i int) *widthTrie {
+ return &widthTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *widthTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ default:
+ return uint16(widthValues[n<<6+uint32(b)])
+ }
+}
+
+// widthValues: 99 blocks, 6336 entries, 12672 bytes
+// The third block is the zero block.
+var widthValues = [6336]uint16{
+ // Block 0x0, offset 0x0
+ 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002,
+ 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002,
+ 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002,
+ 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002,
+ 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002,
+ 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002,
+ // Block 0x1, offset 0x40
+ 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003,
+ 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003,
+ 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003,
+ 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003,
+ 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003,
+ 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004,
+ 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004,
+ 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004,
+ 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004,
+ 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004,
+ 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005,
+ 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000,
+ 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008,
+ 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000,
+ 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000,
+ 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000,
+ // Block 0x4, offset 0x100
+ 0x106: 0x2000,
+ 0x110: 0x2000,
+ 0x117: 0x2000,
+ 0x118: 0x2000,
+ 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000,
+ 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000,
+ 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000,
+ 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000,
+ 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000,
+ 0x13c: 0x2000, 0x13e: 0x2000,
+ // Block 0x5, offset 0x140
+ 0x141: 0x2000,
+ 0x151: 0x2000,
+ 0x153: 0x2000,
+ 0x15b: 0x2000,
+ 0x166: 0x2000, 0x167: 0x2000,
+ 0x16b: 0x2000,
+ 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000,
+ 0x178: 0x2000,
+ 0x17f: 0x2000,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000,
+ 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000,
+ 0x18d: 0x2000,
+ 0x192: 0x2000, 0x193: 0x2000,
+ 0x1a6: 0x2000, 0x1a7: 0x2000,
+ 0x1ab: 0x2000,
+ // Block 0x7, offset 0x1c0
+ 0x1ce: 0x2000, 0x1d0: 0x2000,
+ 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000,
+ 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000,
+ // Block 0x8, offset 0x200
+ 0x211: 0x2000,
+ 0x221: 0x2000,
+ // Block 0x9, offset 0x240
+ 0x244: 0x2000,
+ 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000,
+ 0x24d: 0x2000, 0x250: 0x2000,
+ 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000,
+ 0x25f: 0x2000,
+ // Block 0xa, offset 0x280
+ 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000,
+ 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000,
+ 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000,
+ 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000,
+ 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000,
+ 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000,
+ 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000,
+ 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000,
+ 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000,
+ 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000,
+ 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000,
+ 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000,
+ 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000,
+ 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000,
+ 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000,
+ 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000,
+ 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000,
+ 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000,
+ // Block 0xc, offset 0x300
+ 0x311: 0x2000,
+ 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000,
+ 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000,
+ 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000,
+ 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000,
+ 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000,
+ 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000,
+ 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000,
+ // Block 0xd, offset 0x340
+ 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000,
+ 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000,
+ // Block 0xe, offset 0x380
+ 0x381: 0x2000,
+ 0x390: 0x2000, 0x391: 0x2000,
+ 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000,
+ 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000,
+ 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000,
+ 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000,
+ 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000,
+ 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000,
+ 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000,
+ 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000,
+ 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000,
+ 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000,
+ // Block 0x10, offset 0x400
+ 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000,
+ 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000,
+ 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000,
+ 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000,
+ 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000,
+ 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000,
+ 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000,
+ 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000,
+ 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000,
+ 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000,
+ 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000,
+ // Block 0x11, offset 0x440
+ 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000,
+ 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000,
+ 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000,
+ 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000,
+ 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000,
+ 0x45e: 0x4000, 0x45f: 0x4000,
+ // Block 0x12, offset 0x480
+ 0x490: 0x2000,
+ 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000,
+ 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000,
+ 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000,
+ 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000,
+ 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000,
+ 0x4bb: 0x2000,
+ 0x4be: 0x2000,
+ // Block 0x13, offset 0x4c0
+ 0x4f4: 0x2000,
+ 0x4ff: 0x2000,
+ // Block 0x14, offset 0x500
+ 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000,
+ 0x529: 0xa009,
+ 0x52c: 0x2000,
+ // Block 0x15, offset 0x540
+ 0x543: 0x2000, 0x545: 0x2000,
+ 0x549: 0x2000,
+ 0x553: 0x2000, 0x556: 0x2000,
+ 0x561: 0x2000, 0x562: 0x2000,
+ 0x566: 0x2000,
+ 0x56b: 0x2000,
+ // Block 0x16, offset 0x580
+ 0x593: 0x2000, 0x594: 0x2000,
+ 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000,
+ 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000,
+ 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000,
+ 0x5aa: 0x2000, 0x5ab: 0x2000,
+ 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000,
+ 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000,
+ // Block 0x17, offset 0x5c0
+ 0x5c9: 0x2000,
+ 0x5d0: 0x200a, 0x5d1: 0x200b,
+ 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000,
+ 0x5d8: 0x2000, 0x5d9: 0x2000,
+ 0x5f8: 0x2000, 0x5f9: 0x2000,
+ // Block 0x18, offset 0x600
+ 0x612: 0x2000, 0x614: 0x2000,
+ 0x627: 0x2000,
+ // Block 0x19, offset 0x640
+ 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000,
+ 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000,
+ 0x64f: 0x2000, 0x651: 0x2000,
+ 0x655: 0x2000,
+ 0x65a: 0x2000, 0x65d: 0x2000,
+ 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000,
+ 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000,
+ 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000,
+ 0x674: 0x2000, 0x675: 0x2000,
+ 0x676: 0x2000, 0x677: 0x2000,
+ 0x67c: 0x2000, 0x67d: 0x2000,
+ // Block 0x1a, offset 0x680
+ 0x688: 0x2000,
+ 0x68c: 0x2000,
+ 0x692: 0x2000,
+ 0x6a0: 0x2000, 0x6a1: 0x2000,
+ 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000,
+ 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000,
+ // Block 0x1b, offset 0x6c0
+ 0x6c2: 0x2000, 0x6c3: 0x2000,
+ 0x6c6: 0x2000, 0x6c7: 0x2000,
+ 0x6d5: 0x2000,
+ 0x6d9: 0x2000,
+ 0x6e5: 0x2000,
+ 0x6ff: 0x2000,
+ // Block 0x1c, offset 0x700
+ 0x712: 0x2000,
+ 0x71a: 0x4000, 0x71b: 0x4000,
+ 0x729: 0x4000,
+ 0x72a: 0x4000,
+ // Block 0x1d, offset 0x740
+ 0x769: 0x4000,
+ 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000,
+ 0x770: 0x4000, 0x773: 0x4000,
+ // Block 0x1e, offset 0x780
+ 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000,
+ 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000,
+ 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000,
+ 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000,
+ 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000,
+ 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000,
+ 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000,
+ 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000,
+ 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000,
+ 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000,
+ 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000,
+ 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000,
+ 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000,
+ 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000,
+ 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000,
+ 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000,
+ // Block 0x20, offset 0x800
+ 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000,
+ 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000,
+ 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000,
+ 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000,
+ 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000,
+ 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000,
+ 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000,
+ 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000,
+ 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000,
+ 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000,
+ 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000,
+ // Block 0x21, offset 0x840
+ 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000,
+ 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000,
+ 0x850: 0x2000, 0x851: 0x2000,
+ 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000,
+ 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000,
+ 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000,
+ 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000,
+ 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000,
+ 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000,
+ // Block 0x22, offset 0x880
+ 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000,
+ 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000,
+ 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000,
+ 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000,
+ 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000,
+ 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000,
+ 0x8b2: 0x2000, 0x8b3: 0x2000,
+ 0x8b6: 0x2000, 0x8b7: 0x2000,
+ 0x8bc: 0x2000, 0x8bd: 0x2000,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x2000, 0x8c1: 0x2000,
+ 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f,
+ 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000,
+ 0x8e2: 0x2000, 0x8e3: 0x2000,
+ 0x8e4: 0x2000, 0x8e5: 0x2000,
+ 0x8ef: 0x2000,
+ 0x8fd: 0x4000, 0x8fe: 0x4000,
+ // Block 0x24, offset 0x900
+ 0x905: 0x2000,
+ 0x906: 0x2000, 0x909: 0x2000,
+ 0x90e: 0x2000, 0x90f: 0x2000,
+ 0x914: 0x4000, 0x915: 0x4000,
+ 0x91c: 0x2000,
+ 0x91e: 0x2000,
+ // Block 0x25, offset 0x940
+ 0x940: 0x2000, 0x942: 0x2000,
+ 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000,
+ 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000,
+ 0x952: 0x4000, 0x953: 0x4000,
+ 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000,
+ 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000,
+ 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000,
+ 0x97f: 0x4000,
+ // Block 0x26, offset 0x980
+ 0x993: 0x4000,
+ 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000,
+ 0x9aa: 0x4000, 0x9ab: 0x4000,
+ 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000,
+ // Block 0x27, offset 0x9c0
+ 0x9c4: 0x4000, 0x9c5: 0x4000,
+ 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000,
+ 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000,
+ 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000,
+ 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000,
+ 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000,
+ 0x9e8: 0x2000, 0x9e9: 0x2000,
+ 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000,
+ 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000,
+ 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000,
+ 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000,
+ // Block 0x28, offset 0xa00
+ 0xa05: 0x4000,
+ 0xa0a: 0x4000, 0xa0b: 0x4000,
+ 0xa28: 0x4000,
+ 0xa3d: 0x2000,
+ // Block 0x29, offset 0xa40
+ 0xa4c: 0x4000, 0xa4e: 0x4000,
+ 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000,
+ 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000,
+ 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000,
+ // Block 0x2a, offset 0xa80
+ 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000,
+ 0xab0: 0x4000,
+ 0xabf: 0x4000,
+ // Block 0x2b, offset 0xac0
+ 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000,
+ 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000,
+ // Block 0x2c, offset 0xb00
+ 0xb05: 0x6010,
+ 0xb06: 0x6011,
+ // Block 0x2d, offset 0xb40
+ 0xb5b: 0x4000, 0xb5c: 0x4000,
+ // Block 0x2e, offset 0xb80
+ 0xb90: 0x4000,
+ 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000,
+ 0xb98: 0x2000, 0xb99: 0x2000,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000,
+ 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000,
+ 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000,
+ 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000,
+ 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000,
+ 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000,
+ 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000,
+ 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000,
+ 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000,
+ 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000,
+ 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000,
+ 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000,
+ 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000,
+ 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000,
+ 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000,
+ 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000,
+ 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000,
+ 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000,
+ 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000,
+ 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000,
+ 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000,
+ 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000,
+ 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000,
+ 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000,
+ 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000,
+ 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000,
+ 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000,
+ 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000,
+ 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000,
+ 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000,
+ 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000,
+ 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000,
+ 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000,
+ 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000,
+ // Block 0x33, offset 0xcc0
+ 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000,
+ 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000,
+ 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000,
+ 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000,
+ 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000,
+ 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000,
+ 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000,
+ 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000,
+ 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000,
+ 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000,
+ 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000,
+ 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000,
+ 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000,
+ 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000,
+ 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000,
+ 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a,
+ 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020,
+ 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023,
+ 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026,
+ 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028,
+ 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000,
+ 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f,
+ 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000,
+ 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000,
+ 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000,
+ 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036,
+ 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038,
+ 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035,
+ 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000,
+ 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d,
+ 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000,
+ // Block 0x36, offset 0xd80
+ 0xd85: 0x4000,
+ 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000,
+ 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000,
+ 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000,
+ 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000,
+ 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000,
+ 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000,
+ 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000,
+ 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e,
+ 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e,
+ 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037,
+ 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037,
+ 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040,
+ 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044,
+ 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045,
+ 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c,
+ 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000,
+ 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000,
+ 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000,
+ 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000,
+ 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000,
+ 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000,
+ 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000,
+ 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000,
+ 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000,
+ 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000,
+ 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000,
+ 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000,
+ 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000,
+ 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000,
+ 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000,
+ 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000,
+ 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000,
+ 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000,
+ 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000,
+ 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000,
+ 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000,
+ 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000,
+ 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000,
+ 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000,
+ 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000,
+ 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000,
+ 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000,
+ 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000,
+ 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000,
+ 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000,
+ 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000,
+ 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000,
+ 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000,
+ 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000,
+ 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000,
+ 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000,
+ 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000,
+ 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000,
+ 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000,
+ 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000,
+ 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000,
+ 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000,
+ 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000,
+ 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000,
+ 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000,
+ 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000,
+ 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000,
+ 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000,
+ 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000,
+ 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000,
+ 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000,
+ 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000,
+ 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000,
+ 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000,
+ 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000,
+ 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000,
+ 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000,
+ 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000,
+ 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000,
+ 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000,
+ 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000,
+ 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000,
+ 0xf86: 0x4000,
+ // Block 0x3f, offset 0xfc0
+ 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000,
+ 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000,
+ 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000,
+ 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000,
+ 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000,
+ 0xffc: 0x4000,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000,
+ 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000,
+ 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000,
+ 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000,
+ 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000,
+ 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000,
+ 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000,
+ 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000,
+ 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000,
+ 0x1058: 0x4000, 0x1059: 0x4000,
+ 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000,
+ 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000,
+ 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000,
+ 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000,
+ 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000,
+ 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000,
+ 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000,
+ 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000,
+ 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000,
+ 0x10aa: 0x4000, 0x10ab: 0x4000,
+ // Block 0x43, offset 0x10c0
+ 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012,
+ 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012,
+ 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012,
+ 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012,
+ 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012,
+ 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049,
+ 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049,
+ 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049,
+ 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049,
+ 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049,
+ 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049,
+ 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049,
+ 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049,
+ 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049,
+ 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049,
+ 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d,
+ 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053,
+ 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059,
+ 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f,
+ 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065,
+ 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056,
+ 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f,
+ 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072,
+ 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075,
+ 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078,
+ 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b,
+ 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b,
+ 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b,
+ 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c,
+ 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c,
+ 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c,
+ // Block 0x46, offset 0x1180
+ 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080,
+ 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082,
+ 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f,
+ 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087,
+ 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a,
+ 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d,
+ 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091,
+ 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095,
+ 0x11bd: 0x2000,
+ // Block 0x47, offset 0x11c0
+ 0x11e0: 0x4000,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000,
+ 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000,
+ 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000,
+ 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000,
+ 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000,
+ 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000,
+ 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000,
+ 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000,
+ 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000,
+ 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000,
+ 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000,
+ 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000,
+ 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000,
+ 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000,
+ 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000,
+ 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x4000, 0x1281: 0x4000,
+ // Block 0x4b, offset 0x12c0
+ 0x12c4: 0x4000,
+ // Block 0x4c, offset 0x1300
+ 0x130f: 0x4000,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000,
+ 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000,
+ 0x1350: 0x2000, 0x1351: 0x2000,
+ 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000,
+ 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000,
+ 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000,
+ 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000,
+ 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000,
+ 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000,
+ 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000,
+ 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000,
+ 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000,
+ 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000,
+ 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000,
+ 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000,
+ 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000,
+ 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000,
+ 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000,
+ 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000,
+ 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000,
+ 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000,
+ 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000,
+ 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000,
+ 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000,
+ 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000,
+ 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000,
+ 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000,
+ 0x1410: 0x4000, 0x1411: 0x4000,
+ 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000,
+ 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000,
+ 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000,
+ 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000,
+ 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000,
+ 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000,
+ 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000,
+ 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000,
+ 0x1450: 0x4000, 0x1451: 0x4000,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000,
+ 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000,
+ 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000,
+ 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000,
+ 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000,
+ 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000,
+ 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000,
+ 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000,
+ 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000,
+ 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000,
+ 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000,
+ 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000,
+ 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000,
+ 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000,
+ 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000,
+ 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000,
+ 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000,
+ 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000,
+ 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000,
+ 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000,
+ 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000,
+ 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000,
+ 0x1512: 0x4000, 0x1513: 0x4000,
+ 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000,
+ 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000,
+ 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000,
+ 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000,
+ 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000,
+ 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000,
+ 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000,
+ 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000,
+ 0x1552: 0x4000, 0x1553: 0x4000,
+ 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000,
+ 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000,
+ 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000,
+ 0x1570: 0x4000, 0x1574: 0x4000,
+ 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000,
+ 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000,
+ 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000,
+ 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000,
+ 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000,
+ 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000,
+ 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000,
+ 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000,
+ 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000,
+ 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000,
+ 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000,
+ 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000,
+ 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000,
+ 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000,
+ 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000,
+ 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000,
+ 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000,
+ 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000,
+ 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000,
+ 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000,
+ 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000,
+ 0x15fc: 0x4000, 0x15ff: 0x4000,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000,
+ 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000,
+ 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000,
+ 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000,
+ 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000,
+ 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000,
+ 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000,
+ 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000,
+ 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000,
+ 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000,
+ 0x163c: 0x4000, 0x163d: 0x4000,
+ // Block 0x59, offset 0x1640
+ 0x164b: 0x4000,
+ 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000,
+ 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000,
+ 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000,
+ 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000,
+ 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000,
+ 0x167a: 0x4000,
+ // Block 0x5a, offset 0x1680
+ 0x1695: 0x4000, 0x1696: 0x4000,
+ 0x16a4: 0x4000,
+ // Block 0x5b, offset 0x16c0
+ 0x16fb: 0x4000,
+ 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000,
+ 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000,
+ 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000,
+ 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000,
+ 0x1752: 0x4000,
+ 0x176b: 0x4000, 0x176c: 0x4000,
+ 0x1774: 0x4000, 0x1775: 0x4000,
+ 0x1776: 0x4000,
+ // Block 0x5e, offset 0x1780
+ 0x1790: 0x4000, 0x1791: 0x4000,
+ 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000,
+ 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000,
+ 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000,
+ 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000,
+ 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000,
+ 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000,
+ 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000,
+ 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000,
+ 0x17d0: 0x4000, 0x17d1: 0x4000,
+ 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000,
+ 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000,
+ 0x17de: 0x4000,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000,
+ 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000,
+ 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x4000,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000,
+ 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000,
+ 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000,
+ 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000,
+ 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000,
+ 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000,
+ 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000,
+ 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000,
+ 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000,
+ 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000,
+ 0x18bc: 0x2000, 0x18bd: 0x2000,
+}
+
+// widthIndex: 22 blocks, 1408 entries, 1408 bytes
+// Block 0 is the zero block.
+var widthIndex = [1408]uint8{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05,
+ 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b,
+ 0xd0: 0x0c, 0xd1: 0x0d,
+ 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06,
+ 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a,
+ 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13,
+ // Block 0x4, offset 0x100
+ 0x104: 0x0e, 0x105: 0x0f,
+ // Block 0x5, offset 0x140
+ 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16,
+ 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b,
+ 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21,
+ 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29,
+ 0x166: 0x2a,
+ 0x16c: 0x2b, 0x16d: 0x2c,
+ 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f,
+ // Block 0x6, offset 0x180
+ 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37,
+ 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e,
+ 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e,
+ 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e,
+ 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e,
+ 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e,
+ 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e,
+ 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e,
+ 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e,
+ 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e,
+ 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e,
+ 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e,
+ 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e,
+ 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e,
+ 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e,
+ // Block 0x8, offset 0x200
+ 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e,
+ 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e,
+ 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e,
+ 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e,
+ 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e,
+ 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e,
+ 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e,
+ 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e,
+ 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e,
+ 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c,
+ 0x265: 0x3d,
+ 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e,
+ 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e,
+ // Block 0xa, offset 0x280
+ 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e,
+ 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e,
+ 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e,
+ 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08,
+ 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08,
+ 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08,
+ 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08,
+ 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08,
+ 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08,
+ 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08,
+ 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08,
+ // Block 0xc, offset 0x300
+ 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08,
+ 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08,
+ 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08,
+ 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08,
+ 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e,
+ 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e,
+ 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44,
+ // Block 0xd, offset 0x340
+ 0x37f: 0x45,
+ // Block 0xe, offset 0x380
+ 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e,
+ 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e,
+ 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e,
+ 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46,
+ 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e,
+ 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x48,
+ // Block 0x10, offset 0x400
+ 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d,
+ 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53,
+ 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59,
+ 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b,
+ 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f,
+ // Block 0x11, offset 0x440
+ 0x456: 0x0b, 0x457: 0x06,
+ 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e,
+ 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06,
+ 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06,
+ 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06,
+ 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06,
+ // Block 0x12, offset 0x480
+ 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08,
+ 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08,
+ 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08,
+ 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08,
+ 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08,
+ 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08,
+ 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08,
+ 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60,
+ // Block 0x14, offset 0x500
+ 0x520: 0x10,
+ 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09,
+ 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11,
+ // Block 0x15, offset 0x540
+ 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09,
+ 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11,
+}
+
+// inverseData contains 4-byte entries of the following format:
+// <0 padding>
+// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
+// UTF-8 encoding of the original rune. Mappings often have the following
+// pattern:
+// A -> A (U+FF21 -> U+0041)
+// B -> B (U+FF22 -> U+0042)
+// ...
+// By xor-ing the last byte the same entry can be shared by many mappings. This
+// reduces the total number of distinct entries by about two thirds.
+// The resulting entry for the aforementioned mappings is
+// { 0x01, 0xE0, 0x00, 0x00 }
+// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
+// E0 ^ A1 = 41.
+// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
+// E0 ^ A2 = 42.
+// Note that because of the xor-ing, the byte sequence stored in the entry is
+// not valid UTF-8.
+var inverseData = [150][4]byte{
+ {0x00, 0x00, 0x00, 0x00},
+ {0x03, 0xe3, 0x80, 0xa0},
+ {0x03, 0xef, 0xbc, 0xa0},
+ {0x03, 0xef, 0xbc, 0xe0},
+ {0x03, 0xef, 0xbd, 0xe0},
+ {0x03, 0xef, 0xbf, 0x02},
+ {0x03, 0xef, 0xbf, 0x00},
+ {0x03, 0xef, 0xbf, 0x0e},
+ {0x03, 0xef, 0xbf, 0x0c},
+ {0x03, 0xef, 0xbf, 0x0f},
+ {0x03, 0xef, 0xbf, 0x39},
+ {0x03, 0xef, 0xbf, 0x3b},
+ {0x03, 0xef, 0xbf, 0x3f},
+ {0x03, 0xef, 0xbf, 0x2a},
+ {0x03, 0xef, 0xbf, 0x0d},
+ {0x03, 0xef, 0xbf, 0x25},
+ {0x03, 0xef, 0xbd, 0x1a},
+ {0x03, 0xef, 0xbd, 0x26},
+ {0x01, 0xa0, 0x00, 0x00},
+ {0x03, 0xef, 0xbd, 0x25},
+ {0x03, 0xef, 0xbd, 0x23},
+ {0x03, 0xef, 0xbd, 0x2e},
+ {0x03, 0xef, 0xbe, 0x07},
+ {0x03, 0xef, 0xbe, 0x05},
+ {0x03, 0xef, 0xbd, 0x06},
+ {0x03, 0xef, 0xbd, 0x13},
+ {0x03, 0xef, 0xbd, 0x0b},
+ {0x03, 0xef, 0xbd, 0x16},
+ {0x03, 0xef, 0xbd, 0x0c},
+ {0x03, 0xef, 0xbd, 0x15},
+ {0x03, 0xef, 0xbd, 0x0d},
+ {0x03, 0xef, 0xbd, 0x1c},
+ {0x03, 0xef, 0xbd, 0x02},
+ {0x03, 0xef, 0xbd, 0x1f},
+ {0x03, 0xef, 0xbd, 0x1d},
+ {0x03, 0xef, 0xbd, 0x17},
+ {0x03, 0xef, 0xbd, 0x08},
+ {0x03, 0xef, 0xbd, 0x09},
+ {0x03, 0xef, 0xbd, 0x0e},
+ {0x03, 0xef, 0xbd, 0x04},
+ {0x03, 0xef, 0xbd, 0x05},
+ {0x03, 0xef, 0xbe, 0x3f},
+ {0x03, 0xef, 0xbe, 0x00},
+ {0x03, 0xef, 0xbd, 0x2c},
+ {0x03, 0xef, 0xbe, 0x06},
+ {0x03, 0xef, 0xbe, 0x0c},
+ {0x03, 0xef, 0xbe, 0x0f},
+ {0x03, 0xef, 0xbe, 0x0d},
+ {0x03, 0xef, 0xbe, 0x0b},
+ {0x03, 0xef, 0xbe, 0x19},
+ {0x03, 0xef, 0xbe, 0x15},
+ {0x03, 0xef, 0xbe, 0x11},
+ {0x03, 0xef, 0xbe, 0x31},
+ {0x03, 0xef, 0xbe, 0x33},
+ {0x03, 0xef, 0xbd, 0x0f},
+ {0x03, 0xef, 0xbe, 0x30},
+ {0x03, 0xef, 0xbe, 0x3e},
+ {0x03, 0xef, 0xbe, 0x32},
+ {0x03, 0xef, 0xbe, 0x36},
+ {0x03, 0xef, 0xbd, 0x14},
+ {0x03, 0xef, 0xbe, 0x2e},
+ {0x03, 0xef, 0xbd, 0x1e},
+ {0x03, 0xef, 0xbe, 0x10},
+ {0x03, 0xef, 0xbf, 0x13},
+ {0x03, 0xef, 0xbf, 0x15},
+ {0x03, 0xef, 0xbf, 0x17},
+ {0x03, 0xef, 0xbf, 0x1f},
+ {0x03, 0xef, 0xbf, 0x1d},
+ {0x03, 0xef, 0xbf, 0x1b},
+ {0x03, 0xef, 0xbf, 0x09},
+ {0x03, 0xef, 0xbf, 0x0b},
+ {0x03, 0xef, 0xbf, 0x37},
+ {0x03, 0xef, 0xbe, 0x04},
+ {0x01, 0xe0, 0x00, 0x00},
+ {0x03, 0xe2, 0xa6, 0x1a},
+ {0x03, 0xe2, 0xa6, 0x26},
+ {0x03, 0xe3, 0x80, 0x23},
+ {0x03, 0xe3, 0x80, 0x2e},
+ {0x03, 0xe3, 0x80, 0x25},
+ {0x03, 0xe3, 0x83, 0x1e},
+ {0x03, 0xe3, 0x83, 0x14},
+ {0x03, 0xe3, 0x82, 0x06},
+ {0x03, 0xe3, 0x82, 0x0b},
+ {0x03, 0xe3, 0x82, 0x0c},
+ {0x03, 0xe3, 0x82, 0x0d},
+ {0x03, 0xe3, 0x82, 0x02},
+ {0x03, 0xe3, 0x83, 0x0f},
+ {0x03, 0xe3, 0x83, 0x08},
+ {0x03, 0xe3, 0x83, 0x09},
+ {0x03, 0xe3, 0x83, 0x2c},
+ {0x03, 0xe3, 0x83, 0x0c},
+ {0x03, 0xe3, 0x82, 0x13},
+ {0x03, 0xe3, 0x82, 0x16},
+ {0x03, 0xe3, 0x82, 0x15},
+ {0x03, 0xe3, 0x82, 0x1c},
+ {0x03, 0xe3, 0x82, 0x1f},
+ {0x03, 0xe3, 0x82, 0x1d},
+ {0x03, 0xe3, 0x82, 0x1a},
+ {0x03, 0xe3, 0x82, 0x17},
+ {0x03, 0xe3, 0x82, 0x08},
+ {0x03, 0xe3, 0x82, 0x09},
+ {0x03, 0xe3, 0x82, 0x0e},
+ {0x03, 0xe3, 0x82, 0x04},
+ {0x03, 0xe3, 0x82, 0x05},
+ {0x03, 0xe3, 0x82, 0x3f},
+ {0x03, 0xe3, 0x83, 0x00},
+ {0x03, 0xe3, 0x83, 0x06},
+ {0x03, 0xe3, 0x83, 0x05},
+ {0x03, 0xe3, 0x83, 0x0d},
+ {0x03, 0xe3, 0x83, 0x0b},
+ {0x03, 0xe3, 0x83, 0x07},
+ {0x03, 0xe3, 0x83, 0x19},
+ {0x03, 0xe3, 0x83, 0x15},
+ {0x03, 0xe3, 0x83, 0x11},
+ {0x03, 0xe3, 0x83, 0x31},
+ {0x03, 0xe3, 0x83, 0x33},
+ {0x03, 0xe3, 0x83, 0x30},
+ {0x03, 0xe3, 0x83, 0x3e},
+ {0x03, 0xe3, 0x83, 0x32},
+ {0x03, 0xe3, 0x83, 0x36},
+ {0x03, 0xe3, 0x83, 0x2e},
+ {0x03, 0xe3, 0x82, 0x07},
+ {0x03, 0xe3, 0x85, 0x04},
+ {0x03, 0xe3, 0x84, 0x10},
+ {0x03, 0xe3, 0x85, 0x30},
+ {0x03, 0xe3, 0x85, 0x0d},
+ {0x03, 0xe3, 0x85, 0x13},
+ {0x03, 0xe3, 0x85, 0x15},
+ {0x03, 0xe3, 0x85, 0x17},
+ {0x03, 0xe3, 0x85, 0x1f},
+ {0x03, 0xe3, 0x85, 0x1d},
+ {0x03, 0xe3, 0x85, 0x1b},
+ {0x03, 0xe3, 0x85, 0x09},
+ {0x03, 0xe3, 0x85, 0x0f},
+ {0x03, 0xe3, 0x85, 0x0b},
+ {0x03, 0xe3, 0x85, 0x37},
+ {0x03, 0xe3, 0x85, 0x3b},
+ {0x03, 0xe3, 0x85, 0x39},
+ {0x03, 0xe3, 0x85, 0x3f},
+ {0x02, 0xc2, 0x02, 0x00},
+ {0x02, 0xc2, 0x0e, 0x00},
+ {0x02, 0xc2, 0x0c, 0x00},
+ {0x02, 0xc2, 0x00, 0x00},
+ {0x03, 0xe2, 0x82, 0x0f},
+ {0x03, 0xe2, 0x94, 0x2a},
+ {0x03, 0xe2, 0x86, 0x39},
+ {0x03, 0xe2, 0x86, 0x3b},
+ {0x03, 0xe2, 0x86, 0x3f},
+ {0x03, 0xe2, 0x96, 0x0d},
+ {0x03, 0xe2, 0x97, 0x25},
+}
+
+// Total table size 14680 bytes (14KiB)
diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go
new file mode 100644
index 00000000000..0049f700a2f
--- /dev/null
+++ b/vendor/golang.org/x/text/width/transform.go
@@ -0,0 +1,239 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package width
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/transform"
+)
+
+type foldTransform struct {
+ transform.NopResetter
+}
+
+func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) {
+ for n < len(src) {
+ if src[n] < utf8.RuneSelf {
+ // ASCII fast path.
+ for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ {
+ }
+ continue
+ }
+ v, size := trie.lookup(src[n:])
+ if size == 0 { // incomplete UTF-8 encoding
+ if !atEOF {
+ err = transform.ErrShortSrc
+ } else {
+ n = len(src)
+ }
+ break
+ }
+ if elem(v)&tagNeedsFold != 0 {
+ err = transform.ErrEndOfSpan
+ break
+ }
+ n += size
+ }
+ return n, err
+}
+
+func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ for nSrc < len(src) {
+ if src[nSrc] < utf8.RuneSelf {
+ // ASCII fast path.
+ start, end := nSrc, len(src)
+ if d := len(dst) - nDst; d < end-start {
+ end = nSrc + d
+ }
+ for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ {
+ }
+ n := copy(dst[nDst:], src[start:nSrc])
+ if nDst += n; nDst == len(dst) {
+ nSrc = start + n
+ if nSrc == len(src) {
+ return nDst, nSrc, nil
+ }
+ if src[nSrc] < utf8.RuneSelf {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ }
+ continue
+ }
+ v, size := trie.lookup(src[nSrc:])
+ if size == 0 { // incomplete UTF-8 encoding
+ if !atEOF {
+ return nDst, nSrc, transform.ErrShortSrc
+ }
+ size = 1 // gobble 1 byte
+ }
+ if elem(v)&tagNeedsFold == 0 {
+ if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ nDst += size
+ } else {
+ data := inverseData[byte(v)]
+ if len(dst)-nDst < int(data[0]) {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ i := 1
+ for end := int(data[0]); i < end; i++ {
+ dst[nDst] = data[i]
+ nDst++
+ }
+ dst[nDst] = data[i] ^ src[nSrc+size-1]
+ nDst++
+ }
+ nSrc += size
+ }
+ return nDst, nSrc, nil
+}
+
+type narrowTransform struct {
+ transform.NopResetter
+}
+
+func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) {
+ for n < len(src) {
+ if src[n] < utf8.RuneSelf {
+ // ASCII fast path.
+ for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ {
+ }
+ continue
+ }
+ v, size := trie.lookup(src[n:])
+ if size == 0 { // incomplete UTF-8 encoding
+ if !atEOF {
+ err = transform.ErrShortSrc
+ } else {
+ n = len(src)
+ }
+ break
+ }
+ if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous {
+ } else {
+ err = transform.ErrEndOfSpan
+ break
+ }
+ n += size
+ }
+ return n, err
+}
+
+func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ for nSrc < len(src) {
+ if src[nSrc] < utf8.RuneSelf {
+ // ASCII fast path.
+ start, end := nSrc, len(src)
+ if d := len(dst) - nDst; d < end-start {
+ end = nSrc + d
+ }
+ for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ {
+ }
+ n := copy(dst[nDst:], src[start:nSrc])
+ if nDst += n; nDst == len(dst) {
+ nSrc = start + n
+ if nSrc == len(src) {
+ return nDst, nSrc, nil
+ }
+ if src[nSrc] < utf8.RuneSelf {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ }
+ continue
+ }
+ v, size := trie.lookup(src[nSrc:])
+ if size == 0 { // incomplete UTF-8 encoding
+ if !atEOF {
+ return nDst, nSrc, transform.ErrShortSrc
+ }
+ size = 1 // gobble 1 byte
+ }
+ if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous {
+ if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ nDst += size
+ } else {
+ data := inverseData[byte(v)]
+ if len(dst)-nDst < int(data[0]) {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ i := 1
+ for end := int(data[0]); i < end; i++ {
+ dst[nDst] = data[i]
+ nDst++
+ }
+ dst[nDst] = data[i] ^ src[nSrc+size-1]
+ nDst++
+ }
+ nSrc += size
+ }
+ return nDst, nSrc, nil
+}
+
+type wideTransform struct {
+ transform.NopResetter
+}
+
+func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) {
+ for n < len(src) {
+ // TODO: Consider ASCII fast path. Special-casing ASCII handling can
+ // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably
+ // not enough to warrant the extra code and complexity.
+ v, size := trie.lookup(src[n:])
+ if size == 0 { // incomplete UTF-8 encoding
+ if !atEOF {
+ err = transform.ErrShortSrc
+ } else {
+ n = len(src)
+ }
+ break
+ }
+ if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow {
+ } else {
+ err = transform.ErrEndOfSpan
+ break
+ }
+ n += size
+ }
+ return n, err
+}
+
+func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ for nSrc < len(src) {
+ // TODO: Consider ASCII fast path. Special-casing ASCII handling can
+ // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably
+ // not enough to warrant the extra code and complexity.
+ v, size := trie.lookup(src[nSrc:])
+ if size == 0 { // incomplete UTF-8 encoding
+ if !atEOF {
+ return nDst, nSrc, transform.ErrShortSrc
+ }
+ size = 1 // gobble 1 byte
+ }
+ if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow {
+ if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ nDst += size
+ } else {
+ data := inverseData[byte(v)]
+ if len(dst)-nDst < int(data[0]) {
+ return nDst, nSrc, transform.ErrShortDst
+ }
+ i := 1
+ for end := int(data[0]); i < end; i++ {
+ dst[nDst] = data[i]
+ nDst++
+ }
+ dst[nDst] = data[i] ^ src[nSrc+size-1]
+ nDst++
+ }
+ nSrc += size
+ }
+ return nDst, nSrc, nil
+}
diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go
new file mode 100644
index 00000000000..ca8e45fd19e
--- /dev/null
+++ b/vendor/golang.org/x/text/width/trieval.go
@@ -0,0 +1,30 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package width
+
+// elem is an entry of the width trie. The high byte is used to encode the type
+// of the rune. The low byte is used to store the index to a mapping entry in
+// the inverseData array.
+type elem uint16
+
+const (
+ tagNeutral elem = iota << typeShift
+ tagAmbiguous
+ tagWide
+ tagNarrow
+ tagFullwidth
+ tagHalfwidth
+)
+
+const (
+ numTypeBits = 3
+ typeShift = 16 - numTypeBits
+
+ // tagNeedsFold is true for all fullwidth and halfwidth runes except for
+ // the Won sign U+20A9.
+ tagNeedsFold = 0x1000
+
+ // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide
+ // variant.
+ wonSign rune = 0x20A9
+)
diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go
new file mode 100644
index 00000000000..f1639ca68af
--- /dev/null
+++ b/vendor/golang.org/x/text/width/width.go
@@ -0,0 +1,206 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate stringer -type=Kind
+//go:generate go run gen.go gen_common.go gen_trieval.go
+
+// Package width provides functionality for handling different widths in text.
+//
+// Wide characters behave like ideographs; they tend to allow line breaks after
+// each character and remain upright in vertical text layout. Narrow characters
+// are kept together in words or runs that are rotated sideways in vertical text
+// layout.
+//
+// For more information, see http://unicode.org/reports/tr11/.
+package width // import "golang.org/x/text/width"
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/transform"
+)
+
+// TODO
+// 1) Reduce table size by compressing blocks.
+// 2) API proposition for computing display length
+// (approximation, fixed pitch only).
+// 3) Implement display length.
+
+// Kind indicates the type of width property as defined in http://unicode.org/reports/tr11/.
+type Kind int
+
+const (
+ // Neutral characters do not occur in legacy East Asian character sets.
+ Neutral Kind = iota
+
+ // EastAsianAmbiguous characters that can be sometimes wide and sometimes
+ // narrow and require additional information not contained in the character
+ // code to further resolve their width.
+ EastAsianAmbiguous
+
+ // EastAsianWide characters are wide in its usual form. They occur only in
+ // the context of East Asian typography. These runes may have explicit
+ // halfwidth counterparts.
+ EastAsianWide
+
+ // EastAsianNarrow characters are narrow in its usual form. They often have
+ // fullwidth counterparts.
+ EastAsianNarrow
+
+ // Note: there exist Narrow runes that do not have fullwidth or wide
+ // counterparts, despite what the definition says (e.g. U+27E6).
+
+ // EastAsianFullwidth characters have a compatibility decompositions of type
+ // wide that map to a narrow counterpart.
+ EastAsianFullwidth
+
+ // EastAsianHalfwidth characters have a compatibility decomposition of type
+ // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON
+ // SIGN.
+ EastAsianHalfwidth
+
+ // Note: there exist runes that have a halfwidth counterparts but that are
+ // classified as Ambiguous, rather than wide (e.g. U+2190).
+)
+
+// TODO: the generated tries need to return size 1 for invalid runes for the
+// width to be computed correctly (each byte should render width 1)
+
+var trie = newWidthTrie(0)
+
+// Lookup reports the Properties of the first rune in b and the number of bytes
+// of its UTF-8 encoding.
+func Lookup(b []byte) (p Properties, size int) {
+ v, sz := trie.lookup(b)
+ return Properties{elem(v), b[sz-1]}, sz
+}
+
+// LookupString reports the Properties of the first rune in s and the number of
+// bytes of its UTF-8 encoding.
+func LookupString(s string) (p Properties, size int) {
+ v, sz := trie.lookupString(s)
+ return Properties{elem(v), s[sz-1]}, sz
+}
+
+// LookupRune reports the Properties of rune r.
+func LookupRune(r rune) Properties {
+ var buf [4]byte
+ n := utf8.EncodeRune(buf[:], r)
+ v, _ := trie.lookup(buf[:n])
+ last := byte(r)
+ if r >= utf8.RuneSelf {
+ last = 0x80 + byte(r&0x3f)
+ }
+ return Properties{elem(v), last}
+}
+
+// Properties provides access to width properties of a rune.
+type Properties struct {
+ elem elem
+ last byte
+}
+
+func (e elem) kind() Kind {
+ return Kind(e >> typeShift)
+}
+
+// Kind returns the Kind of a rune as defined in Unicode TR #11.
+// See http://unicode.org/reports/tr11/ for more details.
+func (p Properties) Kind() Kind {
+ return p.elem.kind()
+}
+
+// Folded returns the folded variant of a rune or 0 if the rune is canonical.
+func (p Properties) Folded() rune {
+ if p.elem&tagNeedsFold != 0 {
+ buf := inverseData[byte(p.elem)]
+ buf[buf[0]] ^= p.last
+ r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]])
+ return r
+ }
+ return 0
+}
+
+// Narrow returns the narrow variant of a rune or 0 if the rune is already
+// narrow or doesn't have a narrow variant.
+func (p Properties) Narrow() rune {
+ if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) {
+ buf := inverseData[byte(p.elem)]
+ buf[buf[0]] ^= p.last
+ r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]])
+ return r
+ }
+ return 0
+}
+
+// Wide returns the wide variant of a rune or 0 if the rune is already
+// wide or doesn't have a wide variant.
+func (p Properties) Wide() rune {
+ if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) {
+ buf := inverseData[byte(p.elem)]
+ buf[buf[0]] ^= p.last
+ r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]])
+ return r
+ }
+ return 0
+}
+
+// TODO for Properties:
+// - Add Fullwidth/Halfwidth or Inverted methods for computing variants
+// mapping.
+// - Add width information (including information on non-spacing runes).
+
+// Transformer implements the transform.Transformer interface.
+type Transformer struct {
+ t transform.SpanningTransformer
+}
+
+// Reset implements the transform.Transformer interface.
+func (t Transformer) Reset() { t.t.Reset() }
+
+// Transform implements the transform.Transformer interface.
+func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ return t.t.Transform(dst, src, atEOF)
+}
+
+// Span implements the transform.SpanningTransformer interface.
+func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) {
+ return t.t.Span(src, atEOF)
+}
+
+// Bytes returns a new byte slice with the result of applying t to b.
+func (t Transformer) Bytes(b []byte) []byte {
+ b, _, _ = transform.Bytes(t, b)
+ return b
+}
+
+// String returns a string with the result of applying t to s.
+func (t Transformer) String(s string) string {
+ s, _, _ = transform.String(t, s)
+ return s
+}
+
+var (
+ // Fold is a transform that maps all runes to their canonical width.
+ //
+ // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm
+ // provide a more generic folding mechanism.
+ Fold Transformer = Transformer{foldTransform{}}
+
+ // Widen is a transform that maps runes to their wide variant, if
+ // available.
+ Widen Transformer = Transformer{wideTransform{}}
+
+ // Narrow is a transform that maps runes to their narrow variant, if
+ // available.
+ Narrow Transformer = Transformer{narrowTransform{}}
+)
+
+// TODO: Consider the following options:
+// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some
+// generalized variant of this.
+// - Consider a wide Won character to be the default width (or some generalized
+// variant of this).
+// - Filter the set of characters that gets converted (the preferred approach is
+// to allow applying filters to transforms).
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/cel_service.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/cel_service.pb.go
new file mode 100644
index 00000000000..b1016742dab
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/cel_service.pb.go
@@ -0,0 +1,194 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/expr/v1alpha1/cel_service.proto
+
+package expr
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+func init() {
+ proto.RegisterFile("google/api/expr/v1alpha1/cel_service.proto", fileDescriptor_f35b2125e64b6d66)
+}
+
+var fileDescriptor_f35b2125e64b6d66 = []byte{
+ // 240 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xd1, 0x31, 0x4b, 0xc4, 0x30,
+ 0x14, 0xc0, 0x71, 0x2b, 0xea, 0x90, 0x45, 0xc8, 0x24, 0x87, 0x93, 0xe0, 0x09, 0x0e, 0x09, 0x77,
+ 0x8e, 0x3a, 0xdd, 0xe1, 0x5e, 0x74, 0x10, 0x6e, 0x91, 0x67, 0x78, 0xe6, 0x82, 0x69, 0x5e, 0x4c,
+ 0x6a, 0xf1, 0xcb, 0xf8, 0x3d, 0x1d, 0x25, 0x69, 0xab, 0x88, 0xc4, 0xde, 0xd8, 0xbe, 0x5f, 0xfe,
+ 0x81, 0x17, 0x76, 0xa9, 0x89, 0xb4, 0x45, 0x09, 0xde, 0x48, 0x7c, 0xf7, 0x41, 0x76, 0x0b, 0xb0,
+ 0x7e, 0x0b, 0x0b, 0xa9, 0xd0, 0x3e, 0x46, 0x0c, 0x9d, 0x51, 0x28, 0x7c, 0xa0, 0x96, 0xf8, 0x49,
+ 0x6f, 0x05, 0x78, 0x23, 0x92, 0x15, 0xa3, 0x9d, 0x2d, 0xcb, 0x15, 0x72, 0xcf, 0x14, 0x1a, 0x70,
+ 0x0a, 0x7f, 0xd7, 0x96, 0x1f, 0xfb, 0x8c, 0xad, 0xd1, 0xde, 0xf7, 0x3f, 0xf9, 0x86, 0x1d, 0xd6,
+ 0x10, 0x22, 0xf2, 0xb9, 0x28, 0x5d, 0x23, 0x32, 0xb8, 0xc3, 0xd7, 0x37, 0x8c, 0xed, 0xec, 0x62,
+ 0xd2, 0x45, 0x4f, 0x2e, 0xe2, 0xd9, 0x5e, 0x6a, 0xaf, 0xb7, 0xa8, 0x5e, 0xfe, 0x6b, 0x67, 0xb0,
+ 0x43, 0x7b, 0x70, 0xdf, 0xed, 0x07, 0x76, 0x70, 0xdb, 0x81, 0xe5, 0xe7, 0xe5, 0x23, 0x69, 0x3e,
+ 0x96, 0xe7, 0x53, 0x6c, 0x0c, 0xaf, 0x02, 0x3b, 0x55, 0xd4, 0x14, 0xf9, 0xea, 0xf8, 0x67, 0x79,
+ 0x75, 0x5a, 0x68, 0x5d, 0x6d, 0x6e, 0x06, 0xac, 0xc9, 0x82, 0xd3, 0x82, 0x82, 0x96, 0x1a, 0x5d,
+ 0x5e, 0xb7, 0xec, 0x47, 0xe0, 0x4d, 0xfc, 0xfb, 0x4a, 0xd7, 0xe9, 0xeb, 0xb3, 0xaa, 0x9e, 0x8e,
+ 0xb2, 0xbd, 0xfa, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x97, 0x50, 0xb8, 0x16, 0x02, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// CelServiceClient is the client API for CelService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type CelServiceClient interface {
+ // Transforms CEL source text into a parsed representation.
+ Parse(ctx context.Context, in *ParseRequest, opts ...grpc.CallOption) (*ParseResponse, error)
+ // Runs static checks on a parsed CEL representation and return
+ // an annotated representation, or a set of issues.
+ Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error)
+ // Evaluates a parsed or annotation CEL representation given
+ // values of external bindings.
+ Eval(ctx context.Context, in *EvalRequest, opts ...grpc.CallOption) (*EvalResponse, error)
+}
+
+type celServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewCelServiceClient(cc *grpc.ClientConn) CelServiceClient {
+ return &celServiceClient{cc}
+}
+
+func (c *celServiceClient) Parse(ctx context.Context, in *ParseRequest, opts ...grpc.CallOption) (*ParseResponse, error) {
+ out := new(ParseResponse)
+ err := c.cc.Invoke(ctx, "/google.api.expr.v1alpha1.CelService/Parse", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *celServiceClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) {
+ out := new(CheckResponse)
+ err := c.cc.Invoke(ctx, "/google.api.expr.v1alpha1.CelService/Check", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *celServiceClient) Eval(ctx context.Context, in *EvalRequest, opts ...grpc.CallOption) (*EvalResponse, error) {
+ out := new(EvalResponse)
+ err := c.cc.Invoke(ctx, "/google.api.expr.v1alpha1.CelService/Eval", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// CelServiceServer is the server API for CelService service.
+type CelServiceServer interface {
+ // Transforms CEL source text into a parsed representation.
+ Parse(context.Context, *ParseRequest) (*ParseResponse, error)
+ // Runs static checks on a parsed CEL representation and return
+ // an annotated representation, or a set of issues.
+ Check(context.Context, *CheckRequest) (*CheckResponse, error)
+ // Evaluates a parsed or annotation CEL representation given
+ // values of external bindings.
+ Eval(context.Context, *EvalRequest) (*EvalResponse, error)
+}
+
+func RegisterCelServiceServer(s *grpc.Server, srv CelServiceServer) {
+ s.RegisterService(&_CelService_serviceDesc, srv)
+}
+
+func _CelService_Parse_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ParseRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(CelServiceServer).Parse(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.api.expr.v1alpha1.CelService/Parse",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(CelServiceServer).Parse(ctx, req.(*ParseRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _CelService_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CheckRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(CelServiceServer).Check(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.api.expr.v1alpha1.CelService/Check",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(CelServiceServer).Check(ctx, req.(*CheckRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _CelService_Eval_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(EvalRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(CelServiceServer).Eval(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.api.expr.v1alpha1.CelService/Eval",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(CelServiceServer).Eval(ctx, req.(*EvalRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _CelService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.api.expr.v1alpha1.CelService",
+ HandlerType: (*CelServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Parse",
+ Handler: _CelService_Parse_Handler,
+ },
+ {
+ MethodName: "Check",
+ Handler: _CelService_Check_Handler,
+ },
+ {
+ MethodName: "Eval",
+ Handler: _CelService_Eval_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/api/expr/v1alpha1/cel_service.proto",
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
new file mode 100644
index 00000000000..f82485fcf4d
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
@@ -0,0 +1,1445 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/expr/v1alpha1/checked.proto
+
+package expr
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ empty "github.com/golang/protobuf/ptypes/empty"
+ _struct "github.com/golang/protobuf/ptypes/struct"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// CEL primitive types.
+type Type_PrimitiveType int32
+
+const (
+ // Unspecified type.
+ Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+ // Boolean type.
+ Type_BOOL Type_PrimitiveType = 1
+ // Int64 type.
+ //
+ // Proto-based integer values are widened to int64.
+ Type_INT64 Type_PrimitiveType = 2
+ // Uint64 type.
+ //
+ // Proto-based unsigned integer values are widened to uint64.
+ Type_UINT64 Type_PrimitiveType = 3
+ // Double type.
+ //
+ // Proto-based float values are widened to double values.
+ Type_DOUBLE Type_PrimitiveType = 4
+ // String type.
+ Type_STRING Type_PrimitiveType = 5
+ // Bytes type.
+ Type_BYTES Type_PrimitiveType = 6
+)
+
+var Type_PrimitiveType_name = map[int32]string{
+ 0: "PRIMITIVE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "UINT64",
+ 4: "DOUBLE",
+ 5: "STRING",
+ 6: "BYTES",
+}
+
+var Type_PrimitiveType_value = map[string]int32{
+ "PRIMITIVE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "UINT64": 3,
+ "DOUBLE": 4,
+ "STRING": 5,
+ "BYTES": 6,
+}
+
+func (x Type_PrimitiveType) String() string {
+ return proto.EnumName(Type_PrimitiveType_name, int32(x))
+}
+
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{1, 0}
+}
+
+// Well-known protobuf types treated with first-class support in CEL.
+type Type_WellKnownType int32
+
+const (
+ // Unspecified type.
+ Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+ // Well-known protobuf.Any type.
+ //
+ // Any types are a polymorphic message type. During type-checking they are
+ // treated like `DYN` types, but at runtime they are resolved to a specific
+ // message type specified at evaluation time.
+ Type_ANY Type_WellKnownType = 1
+ // Well-known protobuf.Timestamp type, internally referenced as `timestamp`.
+ Type_TIMESTAMP Type_WellKnownType = 2
+ // Well-known protobuf.Duration type, internally referenced as `duration`.
+ Type_DURATION Type_WellKnownType = 3
+)
+
+var Type_WellKnownType_name = map[int32]string{
+ 0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+ 1: "ANY",
+ 2: "TIMESTAMP",
+ 3: "DURATION",
+}
+
+var Type_WellKnownType_value = map[string]int32{
+ "WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+ "ANY": 1,
+ "TIMESTAMP": 2,
+ "DURATION": 3,
+}
+
+func (x Type_WellKnownType) String() string {
+ return proto.EnumName(Type_WellKnownType_name, int32(x))
+}
+
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{1, 1}
+}
+
+// A CEL expression which has been successfully type checked.
+type CheckedExpr struct {
+ // A map from expression ids to resolved references.
+ //
+ // The following entries are in this table:
+ //
+ // - An Ident or Select expression is represented here if it resolves to a
+ // declaration. For instance, if `a.b.c` is represented by
+ // `select(select(id(a), b), c)`, and `a.b` resolves to a declaration,
+ // while `c` is a field selection, then the reference is attached to the
+ // nested select expression (but not to the id or or the outer select).
+ // In turn, if `a` resolves to a declaration and `b.c` are field selections,
+ // the reference is attached to the ident expression.
+ // - Every Call expression has an entry here, identifying the function being
+ // called.
+ // - Every CreateStruct expression for a message has an entry, identifying
+ // the message.
+ ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // A map from expression ids to types.
+ //
+ // Every expression node which has a type different than DYN has a mapping
+ // here. If an expression has type DYN, it is omitted from this map to save
+ // space.
+ TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The source info derived from input that generated the parsed `expr` and
+ // any optimizations made during the type-checking pass.
+ SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ // The checked expression. Semantically equivalent to the parsed `expr`, but
+ // may have structural differences.
+ Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CheckedExpr) Reset() { *m = CheckedExpr{} }
+func (m *CheckedExpr) String() string { return proto.CompactTextString(m) }
+func (*CheckedExpr) ProtoMessage() {}
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{0}
+}
+
+func (m *CheckedExpr) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CheckedExpr.Unmarshal(m, b)
+}
+func (m *CheckedExpr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CheckedExpr.Marshal(b, m, deterministic)
+}
+func (m *CheckedExpr) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CheckedExpr.Merge(m, src)
+}
+func (m *CheckedExpr) XXX_Size() int {
+ return xxx_messageInfo_CheckedExpr.Size(m)
+}
+func (m *CheckedExpr) XXX_DiscardUnknown() {
+ xxx_messageInfo_CheckedExpr.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CheckedExpr proto.InternalMessageInfo
+
+func (m *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+ if m != nil {
+ return m.ReferenceMap
+ }
+ return nil
+}
+
+func (m *CheckedExpr) GetTypeMap() map[int64]*Type {
+ if m != nil {
+ return m.TypeMap
+ }
+ return nil
+}
+
+func (m *CheckedExpr) GetSourceInfo() *SourceInfo {
+ if m != nil {
+ return m.SourceInfo
+ }
+ return nil
+}
+
+func (m *CheckedExpr) GetExpr() *Expr {
+ if m != nil {
+ return m.Expr
+ }
+ return nil
+}
+
+// Represents a CEL type.
+type Type struct {
+ // The kind of type.
+ //
+ // Types that are valid to be assigned to TypeKind:
+ // *Type_Dyn
+ // *Type_Null
+ // *Type_Primitive
+ // *Type_Wrapper
+ // *Type_WellKnown
+ // *Type_ListType_
+ // *Type_MapType_
+ // *Type_Function
+ // *Type_MessageType
+ // *Type_TypeParam
+ // *Type_Type
+ // *Type_Error
+ // *Type_AbstractType_
+ TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Type) Reset() { *m = Type{} }
+func (m *Type) String() string { return proto.CompactTextString(m) }
+func (*Type) ProtoMessage() {}
+func (*Type) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{1}
+}
+
+func (m *Type) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Type.Unmarshal(m, b)
+}
+func (m *Type) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Type.Marshal(b, m, deterministic)
+}
+func (m *Type) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Type.Merge(m, src)
+}
+func (m *Type) XXX_Size() int {
+ return xxx_messageInfo_Type.Size(m)
+}
+func (m *Type) XXX_DiscardUnknown() {
+ xxx_messageInfo_Type.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Type proto.InternalMessageInfo
+
+type isType_TypeKind interface {
+ isType_TypeKind()
+}
+
+type Type_Dyn struct {
+ Dyn *empty.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+ Null _struct.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+ Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+ Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+ WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=google.api.expr.v1alpha1.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+ ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+ MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+ Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+ MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+ TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+ Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+ Error *empty.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+ AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+ if m != nil {
+ return m.TypeKind
+ }
+ return nil
+}
+
+func (m *Type) GetDyn() *empty.Empty {
+ if x, ok := m.GetTypeKind().(*Type_Dyn); ok {
+ return x.Dyn
+ }
+ return nil
+}
+
+func (m *Type) GetNull() _struct.NullValue {
+ if x, ok := m.GetTypeKind().(*Type_Null); ok {
+ return x.Null
+ }
+ return _struct.NullValue_NULL_VALUE
+}
+
+func (m *Type) GetPrimitive() Type_PrimitiveType {
+ if x, ok := m.GetTypeKind().(*Type_Primitive); ok {
+ return x.Primitive
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (m *Type) GetWrapper() Type_PrimitiveType {
+ if x, ok := m.GetTypeKind().(*Type_Wrapper); ok {
+ return x.Wrapper
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (m *Type) GetWellKnown() Type_WellKnownType {
+ if x, ok := m.GetTypeKind().(*Type_WellKnown); ok {
+ return x.WellKnown
+ }
+ return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (m *Type) GetListType() *Type_ListType {
+ if x, ok := m.GetTypeKind().(*Type_ListType_); ok {
+ return x.ListType
+ }
+ return nil
+}
+
+func (m *Type) GetMapType() *Type_MapType {
+ if x, ok := m.GetTypeKind().(*Type_MapType_); ok {
+ return x.MapType
+ }
+ return nil
+}
+
+func (m *Type) GetFunction() *Type_FunctionType {
+ if x, ok := m.GetTypeKind().(*Type_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+func (m *Type) GetMessageType() string {
+ if x, ok := m.GetTypeKind().(*Type_MessageType); ok {
+ return x.MessageType
+ }
+ return ""
+}
+
+func (m *Type) GetTypeParam() string {
+ if x, ok := m.GetTypeKind().(*Type_TypeParam); ok {
+ return x.TypeParam
+ }
+ return ""
+}
+
+func (m *Type) GetType() *Type {
+ if x, ok := m.GetTypeKind().(*Type_Type); ok {
+ return x.Type
+ }
+ return nil
+}
+
+func (m *Type) GetError() *empty.Empty {
+ if x, ok := m.GetTypeKind().(*Type_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (m *Type) GetAbstractType() *Type_AbstractType {
+ if x, ok := m.GetTypeKind().(*Type_AbstractType_); ok {
+ return x.AbstractType
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Type) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Type_OneofMarshaler, _Type_OneofUnmarshaler, _Type_OneofSizer, []interface{}{
+ (*Type_Dyn)(nil),
+ (*Type_Null)(nil),
+ (*Type_Primitive)(nil),
+ (*Type_Wrapper)(nil),
+ (*Type_WellKnown)(nil),
+ (*Type_ListType_)(nil),
+ (*Type_MapType_)(nil),
+ (*Type_Function)(nil),
+ (*Type_MessageType)(nil),
+ (*Type_TypeParam)(nil),
+ (*Type_Type)(nil),
+ (*Type_Error)(nil),
+ (*Type_AbstractType_)(nil),
+ }
+}
+
+func _Type_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Type)
+ // type_kind
+ switch x := m.TypeKind.(type) {
+ case *Type_Dyn:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Dyn); err != nil {
+ return err
+ }
+ case *Type_Null:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Null))
+ case *Type_Primitive:
+ b.EncodeVarint(3<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Primitive))
+ case *Type_Wrapper:
+ b.EncodeVarint(4<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Wrapper))
+ case *Type_WellKnown:
+ b.EncodeVarint(5<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.WellKnown))
+ case *Type_ListType_:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ListType); err != nil {
+ return err
+ }
+ case *Type_MapType_:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.MapType); err != nil {
+ return err
+ }
+ case *Type_Function:
+ b.EncodeVarint(8<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Function); err != nil {
+ return err
+ }
+ case *Type_MessageType:
+ b.EncodeVarint(9<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.MessageType)
+ case *Type_TypeParam:
+ b.EncodeVarint(10<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.TypeParam)
+ case *Type_Type:
+ b.EncodeVarint(11<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Type); err != nil {
+ return err
+ }
+ case *Type_Error:
+ b.EncodeVarint(12<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Error); err != nil {
+ return err
+ }
+ case *Type_AbstractType_:
+ b.EncodeVarint(14<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.AbstractType); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Type.TypeKind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Type_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Type)
+ switch tag {
+ case 1: // type_kind.dyn
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(empty.Empty)
+ err := b.DecodeMessage(msg)
+ m.TypeKind = &Type_Dyn{msg}
+ return true, err
+ case 2: // type_kind.null
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TypeKind = &Type_Null{_struct.NullValue(x)}
+ return true, err
+ case 3: // type_kind.primitive
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TypeKind = &Type_Primitive{Type_PrimitiveType(x)}
+ return true, err
+ case 4: // type_kind.wrapper
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TypeKind = &Type_Wrapper{Type_PrimitiveType(x)}
+ return true, err
+ case 5: // type_kind.well_known
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TypeKind = &Type_WellKnown{Type_WellKnownType(x)}
+ return true, err
+ case 6: // type_kind.list_type
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Type_ListType)
+ err := b.DecodeMessage(msg)
+ m.TypeKind = &Type_ListType_{msg}
+ return true, err
+ case 7: // type_kind.map_type
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Type_MapType)
+ err := b.DecodeMessage(msg)
+ m.TypeKind = &Type_MapType_{msg}
+ return true, err
+ case 8: // type_kind.function
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Type_FunctionType)
+ err := b.DecodeMessage(msg)
+ m.TypeKind = &Type_Function{msg}
+ return true, err
+ case 9: // type_kind.message_type
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.TypeKind = &Type_MessageType{x}
+ return true, err
+ case 10: // type_kind.type_param
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.TypeKind = &Type_TypeParam{x}
+ return true, err
+ case 11: // type_kind.type
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Type)
+ err := b.DecodeMessage(msg)
+ m.TypeKind = &Type_Type{msg}
+ return true, err
+ case 12: // type_kind.error
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(empty.Empty)
+ err := b.DecodeMessage(msg)
+ m.TypeKind = &Type_Error{msg}
+ return true, err
+ case 14: // type_kind.abstract_type
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Type_AbstractType)
+ err := b.DecodeMessage(msg)
+ m.TypeKind = &Type_AbstractType_{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Type_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Type)
+ // type_kind
+ switch x := m.TypeKind.(type) {
+ case *Type_Dyn:
+ s := proto.Size(x.Dyn)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Type_Null:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Null))
+ case *Type_Primitive:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Primitive))
+ case *Type_Wrapper:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Wrapper))
+ case *Type_WellKnown:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.WellKnown))
+ case *Type_ListType_:
+ s := proto.Size(x.ListType)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Type_MapType_:
+ s := proto.Size(x.MapType)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Type_Function:
+ s := proto.Size(x.Function)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Type_MessageType:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.MessageType)))
+ n += len(x.MessageType)
+ case *Type_TypeParam:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.TypeParam)))
+ n += len(x.TypeParam)
+ case *Type_Type:
+ s := proto.Size(x.Type)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Type_Error:
+ s := proto.Size(x.Error)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Type_AbstractType_:
+ s := proto.Size(x.AbstractType)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// List type with typed elements, e.g. `list`.
+type Type_ListType struct {
+ // The element type.
+ ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Type_ListType) Reset() { *m = Type_ListType{} }
+func (m *Type_ListType) String() string { return proto.CompactTextString(m) }
+func (*Type_ListType) ProtoMessage() {}
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{1, 0}
+}
+
+func (m *Type_ListType) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Type_ListType.Unmarshal(m, b)
+}
+func (m *Type_ListType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Type_ListType.Marshal(b, m, deterministic)
+}
+func (m *Type_ListType) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Type_ListType.Merge(m, src)
+}
+func (m *Type_ListType) XXX_Size() int {
+ return xxx_messageInfo_Type_ListType.Size(m)
+}
+func (m *Type_ListType) XXX_DiscardUnknown() {
+ xxx_messageInfo_Type_ListType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Type_ListType proto.InternalMessageInfo
+
+func (m *Type_ListType) GetElemType() *Type {
+ if m != nil {
+ return m.ElemType
+ }
+ return nil
+}
+
+// Map type with parameterized key and value types, e.g. `map`.
+type Type_MapType struct {
+ // The type of the key.
+ KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+ // The type of the value.
+ ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Type_MapType) Reset() { *m = Type_MapType{} }
+func (m *Type_MapType) String() string { return proto.CompactTextString(m) }
+func (*Type_MapType) ProtoMessage() {}
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{1, 1}
+}
+
+func (m *Type_MapType) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Type_MapType.Unmarshal(m, b)
+}
+func (m *Type_MapType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Type_MapType.Marshal(b, m, deterministic)
+}
+func (m *Type_MapType) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Type_MapType.Merge(m, src)
+}
+func (m *Type_MapType) XXX_Size() int {
+ return xxx_messageInfo_Type_MapType.Size(m)
+}
+func (m *Type_MapType) XXX_DiscardUnknown() {
+ xxx_messageInfo_Type_MapType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Type_MapType proto.InternalMessageInfo
+
+func (m *Type_MapType) GetKeyType() *Type {
+ if m != nil {
+ return m.KeyType
+ }
+ return nil
+}
+
+func (m *Type_MapType) GetValueType() *Type {
+ if m != nil {
+ return m.ValueType
+ }
+ return nil
+}
+
+// Function type with result and arg types.
+type Type_FunctionType struct {
+ // Result type of the function.
+ ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ // Argument types of the function.
+ ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Type_FunctionType) Reset() { *m = Type_FunctionType{} }
+func (m *Type_FunctionType) String() string { return proto.CompactTextString(m) }
+func (*Type_FunctionType) ProtoMessage() {}
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{1, 2}
+}
+
+func (m *Type_FunctionType) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Type_FunctionType.Unmarshal(m, b)
+}
+func (m *Type_FunctionType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Type_FunctionType.Marshal(b, m, deterministic)
+}
+func (m *Type_FunctionType) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Type_FunctionType.Merge(m, src)
+}
+func (m *Type_FunctionType) XXX_Size() int {
+ return xxx_messageInfo_Type_FunctionType.Size(m)
+}
+func (m *Type_FunctionType) XXX_DiscardUnknown() {
+ xxx_messageInfo_Type_FunctionType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Type_FunctionType proto.InternalMessageInfo
+
+func (m *Type_FunctionType) GetResultType() *Type {
+ if m != nil {
+ return m.ResultType
+ }
+ return nil
+}
+
+func (m *Type_FunctionType) GetArgTypes() []*Type {
+ if m != nil {
+ return m.ArgTypes
+ }
+ return nil
+}
+
+// Application defined abstract type.
+type Type_AbstractType struct {
+ // The fully qualified name of this abstract type.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Parameter types for this abstract type.
+ ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Type_AbstractType) Reset() { *m = Type_AbstractType{} }
+func (m *Type_AbstractType) String() string { return proto.CompactTextString(m) }
+func (*Type_AbstractType) ProtoMessage() {}
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{1, 3}
+}
+
+func (m *Type_AbstractType) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Type_AbstractType.Unmarshal(m, b)
+}
+func (m *Type_AbstractType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Type_AbstractType.Marshal(b, m, deterministic)
+}
+func (m *Type_AbstractType) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Type_AbstractType.Merge(m, src)
+}
+func (m *Type_AbstractType) XXX_Size() int {
+ return xxx_messageInfo_Type_AbstractType.Size(m)
+}
+func (m *Type_AbstractType) XXX_DiscardUnknown() {
+ xxx_messageInfo_Type_AbstractType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Type_AbstractType proto.InternalMessageInfo
+
+func (m *Type_AbstractType) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Type_AbstractType) GetParameterTypes() []*Type {
+ if m != nil {
+ return m.ParameterTypes
+ }
+ return nil
+}
+
+// Represents a declaration of a named value or function.
+//
+// A declaration is part of the contract between the expression, the agent
+// evaluating that expression, and the caller requesting evaluation.
+type Decl struct {
+ // The fully qualified name of the declaration.
+ //
+ // Declarations are organized in containers and this represents the full path
+ // to the declaration in its container, as in `google.api.expr.Decl`.
+ //
+ // Declarations used as
+ // [FunctionDecl.Overload][google.api.expr.v1alpha1.Decl.FunctionDecl.Overload]
+ // parameters may or may not have a name depending on whether the overload is
+ // function declaration or a function definition containing a result
+ // [Expr][google.api.expr.v1alpha1.Expr].
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The declaration kind.
+ //
+ // Types that are valid to be assigned to DeclKind:
+ // *Decl_Ident
+ // *Decl_Function
+ DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Decl) Reset() { *m = Decl{} }
+func (m *Decl) String() string { return proto.CompactTextString(m) }
+func (*Decl) ProtoMessage() {}
+func (*Decl) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{2}
+}
+
+func (m *Decl) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Decl.Unmarshal(m, b)
+}
+func (m *Decl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Decl.Marshal(b, m, deterministic)
+}
+func (m *Decl) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Decl.Merge(m, src)
+}
+func (m *Decl) XXX_Size() int {
+ return xxx_messageInfo_Decl.Size(m)
+}
+func (m *Decl) XXX_DiscardUnknown() {
+ xxx_messageInfo_Decl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Decl proto.InternalMessageInfo
+
+func (m *Decl) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type isDecl_DeclKind interface {
+ isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+ Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+ Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+ if m != nil {
+ return m.DeclKind
+ }
+ return nil
+}
+
+func (m *Decl) GetIdent() *Decl_IdentDecl {
+ if x, ok := m.GetDeclKind().(*Decl_Ident); ok {
+ return x.Ident
+ }
+ return nil
+}
+
+func (m *Decl) GetFunction() *Decl_FunctionDecl {
+ if x, ok := m.GetDeclKind().(*Decl_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Decl) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Decl_OneofMarshaler, _Decl_OneofUnmarshaler, _Decl_OneofSizer, []interface{}{
+ (*Decl_Ident)(nil),
+ (*Decl_Function)(nil),
+ }
+}
+
+func _Decl_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Decl)
+ // decl_kind
+ switch x := m.DeclKind.(type) {
+ case *Decl_Ident:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Ident); err != nil {
+ return err
+ }
+ case *Decl_Function:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Function); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Decl.DeclKind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Decl_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Decl)
+ switch tag {
+ case 2: // decl_kind.ident
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Decl_IdentDecl)
+ err := b.DecodeMessage(msg)
+ m.DeclKind = &Decl_Ident{msg}
+ return true, err
+ case 3: // decl_kind.function
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Decl_FunctionDecl)
+ err := b.DecodeMessage(msg)
+ m.DeclKind = &Decl_Function{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Decl_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Decl)
+ // decl_kind
+ switch x := m.DeclKind.(type) {
+ case *Decl_Ident:
+ s := proto.Size(x.Ident)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Decl_Function:
+ s := proto.Size(x.Function)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Identifier declaration which specifies its type and optional `Expr` value.
+//
+// An identifier without a value is a declaration that must be provided at
+// evaluation time. An identifier with a value should resolve to a constant,
+// but may be used in conjunction with other identifiers bound at evaluation
+// time.
+type Decl_IdentDecl struct {
+ // Required. The type of the identifier.
+ Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // The constant value of the identifier. If not specified, the identifier
+ // must be supplied at evaluation time.
+ Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ // Documentation string for the identifier.
+ Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Decl_IdentDecl) Reset() { *m = Decl_IdentDecl{} }
+func (m *Decl_IdentDecl) String() string { return proto.CompactTextString(m) }
+func (*Decl_IdentDecl) ProtoMessage() {}
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{2, 0}
+}
+
+func (m *Decl_IdentDecl) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Decl_IdentDecl.Unmarshal(m, b)
+}
+func (m *Decl_IdentDecl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Decl_IdentDecl.Marshal(b, m, deterministic)
+}
+func (m *Decl_IdentDecl) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Decl_IdentDecl.Merge(m, src)
+}
+func (m *Decl_IdentDecl) XXX_Size() int {
+ return xxx_messageInfo_Decl_IdentDecl.Size(m)
+}
+func (m *Decl_IdentDecl) XXX_DiscardUnknown() {
+ xxx_messageInfo_Decl_IdentDecl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Decl_IdentDecl proto.InternalMessageInfo
+
+func (m *Decl_IdentDecl) GetType() *Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (m *Decl_IdentDecl) GetValue() *Constant {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Decl_IdentDecl) GetDoc() string {
+ if m != nil {
+ return m.Doc
+ }
+ return ""
+}
+
+// Function declaration specifies one or more overloads which indicate the
+// function's parameter types and return type, and may optionally specify a
+// function definition in terms of CEL expressions.
+//
+// Functions have no observable side-effects (there may be side-effects like
+// logging which are not observable from CEL).
+type Decl_FunctionDecl struct {
+ // Required. List of function overloads, must contain at least one overload.
+ Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Decl_FunctionDecl) Reset() { *m = Decl_FunctionDecl{} }
+func (m *Decl_FunctionDecl) String() string { return proto.CompactTextString(m) }
+func (*Decl_FunctionDecl) ProtoMessage() {}
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{2, 1}
+}
+
+func (m *Decl_FunctionDecl) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Decl_FunctionDecl.Unmarshal(m, b)
+}
+func (m *Decl_FunctionDecl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Decl_FunctionDecl.Marshal(b, m, deterministic)
+}
+func (m *Decl_FunctionDecl) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Decl_FunctionDecl.Merge(m, src)
+}
+func (m *Decl_FunctionDecl) XXX_Size() int {
+ return xxx_messageInfo_Decl_FunctionDecl.Size(m)
+}
+func (m *Decl_FunctionDecl) XXX_DiscardUnknown() {
+ xxx_messageInfo_Decl_FunctionDecl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Decl_FunctionDecl proto.InternalMessageInfo
+
+func (m *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+ if m != nil {
+ return m.Overloads
+ }
+ return nil
+}
+
+// An overload indicates a function's parameter types and return type, and
+// may optionally include a function body described in terms of
+// [Expr][google.api.expr.v1alpha1.Expr] values.
+//
+// Functions overloads are declared in either a function or method
+// call-style. For methods, the `params[0]` is the expected type of the
+// target receiver.
+//
+// Overloads must have non-overlapping argument types after erasure of all
+// parameterized type variables (similar as type erasure in Java).
+type Decl_FunctionDecl_Overload struct {
+ // Required. Globally unique overload name of the function which reflects
+ // the function name and argument types.
+ //
+ // This will be used by a [Reference][google.api.expr.v1alpha1.Reference]
+ // to indicate the `overload_id` that was resolved for the function
+ // `name`.
+ OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ // List of function parameter [Type][google.api.expr.v1alpha1.Type]
+ // values.
+ //
+ // Param types are disjoint after generic type parameters have been
+ // replaced with the type `DYN`. Since the `DYN` type is compatible with
+ // any other type, this means that if `A` is a type parameter, the
+ // function types `int` and `int` are not disjoint. Likewise,
+ // `map` is not disjoint from `map`.
+ //
+ // When the `result_type` of a function is a generic type param, the
+ // type param name also appears as the `type` of on at least one params.
+ Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+ // The type param names associated with the function declaration.
+ //
+ // For example, `function ex(K key, map map) : V` would yield
+ // the type params of `K, V`.
+ TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+ // Required. The result type of the function. For example, the operator
+ // `string.isEmpty()` would have `result_type` of `kind: BOOL`.
+ ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ // Whether the function is to be used in a method call-style `x.f(...)`
+ // of a function call-style `f(x, ...)`.
+ //
+ // For methods, the first parameter declaration, `params[0]` is the
+ // expected type of the target receiver.
+ IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+ // Documentation string for the overload.
+ Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Decl_FunctionDecl_Overload) Reset() { *m = Decl_FunctionDecl_Overload{} }
+func (m *Decl_FunctionDecl_Overload) String() string { return proto.CompactTextString(m) }
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{2, 1, 0}
+}
+
+func (m *Decl_FunctionDecl_Overload) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Decl_FunctionDecl_Overload.Unmarshal(m, b)
+}
+func (m *Decl_FunctionDecl_Overload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Decl_FunctionDecl_Overload.Marshal(b, m, deterministic)
+}
+func (m *Decl_FunctionDecl_Overload) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Decl_FunctionDecl_Overload.Merge(m, src)
+}
+func (m *Decl_FunctionDecl_Overload) XXX_Size() int {
+ return xxx_messageInfo_Decl_FunctionDecl_Overload.Size(m)
+}
+func (m *Decl_FunctionDecl_Overload) XXX_DiscardUnknown() {
+ xxx_messageInfo_Decl_FunctionDecl_Overload.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Decl_FunctionDecl_Overload proto.InternalMessageInfo
+
+func (m *Decl_FunctionDecl_Overload) GetOverloadId() string {
+ if m != nil {
+ return m.OverloadId
+ }
+ return ""
+}
+
+func (m *Decl_FunctionDecl_Overload) GetParams() []*Type {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+ if m != nil {
+ return m.TypeParams
+ }
+ return nil
+}
+
+func (m *Decl_FunctionDecl_Overload) GetResultType() *Type {
+ if m != nil {
+ return m.ResultType
+ }
+ return nil
+}
+
+func (m *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+ if m != nil {
+ return m.IsInstanceFunction
+ }
+ return false
+}
+
+func (m *Decl_FunctionDecl_Overload) GetDoc() string {
+ if m != nil {
+ return m.Doc
+ }
+ return ""
+}
+
+// Describes a resolved reference to a declaration.
+type Reference struct {
+ // The fully qualified name of the declaration.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // For references to functions, this is a list of `Overload.overload_id`
+ // values which match according to typing rules.
+ //
+ // If the list has more than one element, overload resolution among the
+ // presented candidates must happen at runtime because of dynamic types. The
+ // type checker attempts to narrow down this list as much as possible.
+ //
+ // Empty if this is not a reference to a
+ // [Decl.FunctionDecl][google.api.expr.v1alpha1.Decl.FunctionDecl].
+ OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ // For references to constants, this may contain the value of the
+ // constant if known at compile time.
+ Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+func (*Reference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_30a741de3e790389, []int{3}
+}
+
+func (m *Reference) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Reference.Unmarshal(m, b)
+}
+func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
+}
+func (m *Reference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Reference.Merge(m, src)
+}
+func (m *Reference) XXX_Size() int {
+ return xxx_messageInfo_Reference.Size(m)
+}
+func (m *Reference) XXX_DiscardUnknown() {
+ xxx_messageInfo_Reference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Reference proto.InternalMessageInfo
+
+func (m *Reference) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Reference) GetOverloadId() []string {
+ if m != nil {
+ return m.OverloadId
+ }
+ return nil
+}
+
+func (m *Reference) GetValue() *Constant {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("google.api.expr.v1alpha1.Type_PrimitiveType", Type_PrimitiveType_name, Type_PrimitiveType_value)
+ proto.RegisterEnum("google.api.expr.v1alpha1.Type_WellKnownType", Type_WellKnownType_name, Type_WellKnownType_value)
+ proto.RegisterType((*CheckedExpr)(nil), "google.api.expr.v1alpha1.CheckedExpr")
+ proto.RegisterMapType((map[int64]*Reference)(nil), "google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry")
+ proto.RegisterMapType((map[int64]*Type)(nil), "google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry")
+ proto.RegisterType((*Type)(nil), "google.api.expr.v1alpha1.Type")
+ proto.RegisterType((*Type_ListType)(nil), "google.api.expr.v1alpha1.Type.ListType")
+ proto.RegisterType((*Type_MapType)(nil), "google.api.expr.v1alpha1.Type.MapType")
+ proto.RegisterType((*Type_FunctionType)(nil), "google.api.expr.v1alpha1.Type.FunctionType")
+ proto.RegisterType((*Type_AbstractType)(nil), "google.api.expr.v1alpha1.Type.AbstractType")
+ proto.RegisterType((*Decl)(nil), "google.api.expr.v1alpha1.Decl")
+ proto.RegisterType((*Decl_IdentDecl)(nil), "google.api.expr.v1alpha1.Decl.IdentDecl")
+ proto.RegisterType((*Decl_FunctionDecl)(nil), "google.api.expr.v1alpha1.Decl.FunctionDecl")
+ proto.RegisterType((*Decl_FunctionDecl_Overload)(nil), "google.api.expr.v1alpha1.Decl.FunctionDecl.Overload")
+ proto.RegisterType((*Reference)(nil), "google.api.expr.v1alpha1.Reference")
+}
+
+func init() {
+ proto.RegisterFile("google/api/expr/v1alpha1/checked.proto", fileDescriptor_30a741de3e790389)
+}
+
+var fileDescriptor_30a741de3e790389 = []byte{
+ // 1144 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x5f, 0x6f, 0xdb, 0xb6,
+ 0x17, 0x8d, 0x6c, 0xd9, 0x96, 0xae, 0x9c, 0xfe, 0xfc, 0x23, 0x86, 0x41, 0x50, 0x8b, 0x36, 0x70,
+ 0xb7, 0x2e, 0xd8, 0x06, 0xb9, 0xf5, 0x82, 0xae, 0x5d, 0x37, 0x6c, 0x71, 0xa2, 0x24, 0x42, 0xfd,
+ 0x0f, 0x8a, 0x93, 0x20, 0xc5, 0x00, 0x81, 0x91, 0x19, 0x57, 0xb0, 0x2c, 0x09, 0x94, 0x9c, 0xc4,
+ 0x7b, 0xdd, 0xd3, 0xb0, 0x7d, 0x9b, 0x7d, 0x89, 0x7d, 0x9a, 0xbd, 0x6e, 0x8f, 0x03, 0x29, 0xc9,
+ 0x71, 0xfe, 0x38, 0xb6, 0xdf, 0xae, 0xc8, 0x73, 0x0e, 0x2f, 0x2f, 0xcf, 0x25, 0x05, 0x2f, 0x06,
+ 0x41, 0x30, 0xf0, 0x48, 0x0d, 0x87, 0x6e, 0x8d, 0x5c, 0x85, 0xb4, 0x76, 0xf1, 0x0a, 0x7b, 0xe1,
+ 0x47, 0xfc, 0xaa, 0xe6, 0x7c, 0x24, 0xce, 0x90, 0xf4, 0xf5, 0x90, 0x06, 0x71, 0x80, 0xd4, 0x04,
+ 0xa7, 0xe3, 0xd0, 0xd5, 0x19, 0x4e, 0xcf, 0x70, 0xda, 0xe7, 0x73, 0x15, 0xa2, 0x89, 0x1f, 0xe3,
+ 0xab, 0x44, 0x40, 0x7b, 0x9c, 0xc2, 0xf8, 0xd7, 0xd9, 0xf8, 0xbc, 0x46, 0x46, 0x61, 0x3c, 0x49,
+ 0x27, 0x9f, 0xdc, 0x9e, 0x8c, 0x62, 0x3a, 0x76, 0xe2, 0x64, 0xb6, 0xfa, 0x4f, 0x1e, 0x94, 0x9d,
+ 0x24, 0x1b, 0xe3, 0x2a, 0xa4, 0xe8, 0x67, 0x58, 0xa7, 0xe4, 0x9c, 0x50, 0xe2, 0x3b, 0xc4, 0x1e,
+ 0xe1, 0x50, 0xcd, 0x6d, 0xe4, 0x37, 0x95, 0xfa, 0xb7, 0xfa, 0xbc, 0x1c, 0xf5, 0x19, 0xb6, 0x6e,
+ 0x65, 0xd4, 0x16, 0x0e, 0x0d, 0x3f, 0xa6, 0x13, 0xab, 0x4c, 0x67, 0x86, 0x50, 0x0b, 0xa4, 0x78,
+ 0x12, 0x26, 0xc2, 0x79, 0x2e, 0x5c, 0x5f, 0x4e, 0xb8, 0x37, 0x09, 0xaf, 0x35, 0x4b, 0x71, 0xf2,
+ 0x85, 0x0c, 0x50, 0xa2, 0x60, 0x4c, 0x1d, 0x62, 0xbb, 0xfe, 0x79, 0xa0, 0x16, 0x36, 0x84, 0x4d,
+ 0xa5, 0xfe, 0xd9, 0x7c, 0xc5, 0x43, 0x0e, 0x36, 0xfd, 0xf3, 0xc0, 0x82, 0x68, 0x1a, 0xa3, 0x3a,
+ 0x88, 0x0c, 0xa7, 0x8a, 0x9c, 0xff, 0x74, 0x3e, 0x9f, 0xa5, 0x62, 0x71, 0xac, 0xd6, 0x87, 0xff,
+ 0xdf, 0xd9, 0x2c, 0xaa, 0x40, 0x7e, 0x48, 0x26, 0xaa, 0xb0, 0x21, 0x6c, 0xe6, 0x2d, 0x16, 0xa2,
+ 0xb7, 0x50, 0xb8, 0xc0, 0xde, 0x98, 0xa8, 0x39, 0xae, 0xfd, 0x7c, 0xbe, 0xf6, 0x54, 0xcd, 0x4a,
+ 0x18, 0xdf, 0xe5, 0xde, 0x08, 0xda, 0x07, 0x28, 0xcf, 0xee, 0xfc, 0x9e, 0x05, 0xb6, 0x6e, 0x2e,
+ 0xf0, 0x40, 0xf2, 0x4c, 0x68, 0x46, 0xbb, 0xfa, 0x97, 0x02, 0x22, 0x1b, 0x43, 0x5f, 0x42, 0xbe,
+ 0x3f, 0xf1, 0xb9, 0xa8, 0x52, 0xff, 0x34, 0x13, 0xc8, 0xec, 0xa2, 0x1b, 0xcc, 0x4b, 0x07, 0x6b,
+ 0x16, 0x03, 0xa1, 0x97, 0x20, 0xfa, 0x63, 0xcf, 0xe3, 0xab, 0x3d, 0xaa, 0x6b, 0x77, 0xc0, 0xed,
+ 0xb1, 0xe7, 0x1d, 0xb3, 0x25, 0x0e, 0xd6, 0x2c, 0x8e, 0x44, 0x4d, 0x90, 0x43, 0xea, 0x8e, 0xdc,
+ 0xd8, 0xbd, 0x20, 0x6a, 0x9e, 0xd3, 0xbe, 0x7e, 0x38, 0x49, 0xbd, 0x9b, 0xe1, 0xd9, 0xd7, 0xc1,
+ 0x9a, 0x75, 0x2d, 0x80, 0x0e, 0xa0, 0x74, 0x49, 0x71, 0x18, 0x92, 0xe4, 0xb4, 0x56, 0xd7, 0xca,
+ 0xe8, 0xa8, 0x05, 0x70, 0x49, 0x3c, 0xcf, 0x1e, 0xfa, 0xc1, 0xa5, 0xcf, 0xad, 0xb3, 0x58, 0xec,
+ 0x84, 0x78, 0xde, 0x7b, 0x86, 0xcf, 0x12, 0xbb, 0xcc, 0x06, 0xd0, 0x1e, 0xc8, 0x9e, 0x1b, 0xc5,
+ 0x36, 0xb3, 0xa6, 0x5a, 0xe4, 0xa5, 0xfc, 0x62, 0x81, 0x5a, 0xd3, 0x8d, 0xe2, 0x54, 0x48, 0xf2,
+ 0xd2, 0x18, 0xed, 0x80, 0x34, 0xc2, 0x61, 0x22, 0x53, 0xe2, 0x32, 0x2f, 0x16, 0xc8, 0xb4, 0x70,
+ 0x98, 0xed, 0x6d, 0x94, 0x84, 0xc8, 0x04, 0xe9, 0x7c, 0xec, 0x3b, 0xb1, 0x1b, 0xf8, 0xaa, 0xc4,
+ 0x45, 0xbe, 0x5a, 0x20, 0xb2, 0x97, 0xc2, 0xb3, 0x7c, 0x32, 0x3a, 0x7a, 0x0e, 0xe5, 0x11, 0x89,
+ 0x22, 0x3c, 0x20, 0x49, 0x4e, 0xf2, 0x86, 0xb0, 0x29, 0x1f, 0xac, 0x59, 0x4a, 0x3a, 0xca, 0xd7,
+ 0x7b, 0x06, 0xc0, 0xdb, 0x3a, 0xc4, 0x14, 0x8f, 0x54, 0x48, 0x21, 0x32, 0x1b, 0xeb, 0xb2, 0x21,
+ 0xb4, 0x05, 0x22, 0x67, 0x2b, 0xcb, 0x98, 0x94, 0x59, 0x87, 0xa1, 0x91, 0x0e, 0x05, 0x42, 0x69,
+ 0x40, 0xd5, 0xf2, 0x02, 0x6b, 0x26, 0x30, 0x64, 0xc1, 0x3a, 0x3e, 0x8b, 0x62, 0x8a, 0x9d, 0xf4,
+ 0x1c, 0x1e, 0x2d, 0xb5, 0xf7, 0xed, 0x94, 0x93, 0xae, 0x5d, 0xc6, 0x33, 0xdf, 0xda, 0x3e, 0x48,
+ 0xd9, 0x39, 0xa1, 0x77, 0x20, 0x13, 0x8f, 0x8c, 0x12, 0x6d, 0x61, 0xa9, 0x7e, 0x93, 0x18, 0x81,
+ 0x0b, 0xfd, 0x2a, 0x40, 0x29, 0x3d, 0x2a, 0xf4, 0x16, 0xa4, 0x21, 0x99, 0xac, 0xa2, 0x53, 0x1a,
+ 0x92, 0x09, 0xa7, 0xfe, 0x00, 0xc0, 0x5b, 0x38, 0x21, 0x2f, 0xd7, 0xf4, 0x32, 0x67, 0xf0, 0x2c,
+ 0xfe, 0x10, 0xa0, 0x3c, 0x7b, 0xd6, 0xe8, 0x47, 0x50, 0x28, 0x89, 0xc6, 0x5e, 0xbc, 0x4a, 0x36,
+ 0x90, 0x50, 0xb2, 0xa2, 0x60, 0x3a, 0xe0, 0xec, 0x28, 0x7d, 0x2c, 0x16, 0x16, 0x05, 0xd3, 0x01,
+ 0x0b, 0x22, 0x6d, 0x08, 0xe5, 0xd9, 0xea, 0x23, 0x04, 0xa2, 0x8f, 0x47, 0x49, 0x1a, 0xb2, 0xc5,
+ 0x63, 0xb4, 0x0f, 0xff, 0xe3, 0xbe, 0x22, 0x31, 0xa1, 0x2b, 0x2d, 0xf3, 0x68, 0x4a, 0xe3, 0x8b,
+ 0x55, 0x23, 0x58, 0xbf, 0x71, 0x1b, 0xa0, 0xa7, 0xa0, 0x75, 0x2d, 0xb3, 0x65, 0xf6, 0xcc, 0x63,
+ 0xc3, 0xee, 0x9d, 0x76, 0x0d, 0xfb, 0xa8, 0x7d, 0xd8, 0x35, 0x76, 0xcc, 0x3d, 0xd3, 0xd8, 0xad,
+ 0xac, 0x21, 0x09, 0xc4, 0x46, 0xa7, 0xd3, 0xac, 0x08, 0x48, 0x86, 0x82, 0xd9, 0xee, 0xbd, 0xde,
+ 0xaa, 0xe4, 0x10, 0x40, 0xf1, 0x28, 0x89, 0xf3, 0x2c, 0xde, 0xed, 0x1c, 0x35, 0x9a, 0x46, 0x45,
+ 0x64, 0xf1, 0x61, 0xcf, 0x32, 0xdb, 0xfb, 0x95, 0x02, 0x83, 0x37, 0x4e, 0x7b, 0xc6, 0x61, 0xa5,
+ 0x58, 0x3d, 0x86, 0xf5, 0x1b, 0xb7, 0x06, 0x7a, 0x06, 0x8f, 0x4f, 0x8c, 0x66, 0xd3, 0x7e, 0xdf,
+ 0xee, 0x9c, 0xb4, 0xef, 0x5b, 0xb5, 0x04, 0xf9, 0xed, 0xf6, 0x69, 0x45, 0x40, 0xeb, 0x20, 0xf7,
+ 0xcc, 0x96, 0x71, 0xd8, 0xdb, 0x6e, 0x75, 0x2b, 0x39, 0x54, 0x06, 0x69, 0xf7, 0xc8, 0xda, 0xee,
+ 0x99, 0x9d, 0x76, 0x25, 0xdf, 0x50, 0x80, 0xb7, 0x97, 0x3d, 0x74, 0xfd, 0x7e, 0xf5, 0xcf, 0x02,
+ 0x88, 0xbb, 0xc4, 0xf1, 0xee, 0xad, 0xdf, 0x4f, 0x50, 0x70, 0xfb, 0xc4, 0x8f, 0x53, 0xb3, 0x6c,
+ 0xce, 0xaf, 0x1a, 0x93, 0xd0, 0x4d, 0x86, 0x65, 0x11, 0xeb, 0x2b, 0x4e, 0xbc, 0x71, 0x9d, 0xe4,
+ 0x17, 0xb5, 0x14, 0x17, 0xc9, 0x2c, 0x96, 0xea, 0x4c, 0xe9, 0xda, 0xef, 0x02, 0xc8, 0xd3, 0x15,
+ 0xd8, 0xc3, 0xbb, 0x82, 0xeb, 0x92, 0x4b, 0xe1, 0xcd, 0xcd, 0x07, 0xaf, 0xfa, 0xc0, 0xff, 0x43,
+ 0xe0, 0x47, 0x31, 0xf6, 0xe3, 0xf4, 0xd1, 0x63, 0x8f, 0x67, 0x3f, 0x70, 0xf8, 0x0e, 0x64, 0x8b,
+ 0x85, 0xda, 0xdf, 0xb9, 0xeb, 0x6e, 0xe0, 0x09, 0x59, 0x20, 0x07, 0x17, 0x84, 0x7a, 0x01, 0xee,
+ 0x47, 0xaa, 0xc0, 0x5d, 0xb6, 0xb5, 0xc2, 0x56, 0xf5, 0x4e, 0x4a, 0xb6, 0xae, 0x65, 0xb4, 0xdf,
+ 0x72, 0x20, 0x65, 0xe3, 0xe8, 0x19, 0x28, 0xd9, 0x8c, 0xed, 0xf6, 0xd3, 0x73, 0x82, 0x6c, 0xc8,
+ 0xec, 0xa3, 0xd7, 0x50, 0xe4, 0xb6, 0x5d, 0xd6, 0xe4, 0x29, 0x9a, 0x09, 0x5f, 0x5f, 0xc1, 0x11,
+ 0xff, 0xb9, 0x92, 0x2d, 0x98, 0xde, 0xc0, 0xd1, 0xed, 0x46, 0x17, 0x57, 0x6e, 0xf4, 0x97, 0xf0,
+ 0x89, 0x1b, 0xd9, 0x2e, 0xaf, 0xa9, 0x43, 0xec, 0xa9, 0x23, 0xd8, 0xd3, 0x29, 0x59, 0xc8, 0x8d,
+ 0xcc, 0x74, 0x2a, 0xab, 0x48, 0x56, 0xf0, 0xe2, 0xb4, 0xe0, 0xcc, 0xb5, 0x7d, 0xe2, 0x78, 0x89,
+ 0x6b, 0x7f, 0x01, 0x79, 0xfa, 0xd3, 0x73, 0xaf, 0x73, 0x6f, 0x15, 0x2b, 0xdd, 0xd3, 0x4c, 0xb1,
+ 0xa6, 0x5e, 0x10, 0x57, 0xf4, 0x42, 0xc3, 0x83, 0x27, 0x4e, 0x30, 0x9a, 0x8b, 0x6f, 0xc8, 0xec,
+ 0x38, 0xbb, 0xec, 0x9d, 0xe9, 0x0a, 0x1f, 0xbe, 0x4f, 0x61, 0x83, 0xc0, 0xc3, 0xfe, 0x40, 0x0f,
+ 0xe8, 0xa0, 0x36, 0x20, 0x3e, 0x7f, 0x85, 0x6a, 0xc9, 0x14, 0x0e, 0xdd, 0xe8, 0xee, 0x4f, 0xfa,
+ 0x3b, 0xf6, 0xf5, 0xaf, 0x20, 0x9c, 0x15, 0x39, 0xf6, 0x9b, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff,
+ 0xee, 0x02, 0xe6, 0x8f, 0x11, 0x0c, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/conformance_service.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/conformance_service.pb.go
new file mode 100644
index 00000000000..e8e2f08068e
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/conformance_service.pb.go
@@ -0,0 +1,807 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/expr/v1alpha1/conformance_service.proto
+
+package expr
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Severities of issues.
+type IssueDetails_Severity int32
+
+const (
+ // An unspecified severity.
+ IssueDetails_SEVERITY_UNSPECIFIED IssueDetails_Severity = 0
+ // Deprecation issue for statements and method that may no longer be
+ // supported or maintained.
+ IssueDetails_DEPRECATION IssueDetails_Severity = 1
+ // Warnings such as: unused variables.
+ IssueDetails_WARNING IssueDetails_Severity = 2
+ // Errors such as: unmatched curly braces or variable redefinition.
+ IssueDetails_ERROR IssueDetails_Severity = 3
+)
+
+var IssueDetails_Severity_name = map[int32]string{
+ 0: "SEVERITY_UNSPECIFIED",
+ 1: "DEPRECATION",
+ 2: "WARNING",
+ 3: "ERROR",
+}
+
+var IssueDetails_Severity_value = map[string]int32{
+ "SEVERITY_UNSPECIFIED": 0,
+ "DEPRECATION": 1,
+ "WARNING": 2,
+ "ERROR": 3,
+}
+
+func (x IssueDetails_Severity) String() string {
+ return proto.EnumName(IssueDetails_Severity_name, int32(x))
+}
+
+func (IssueDetails_Severity) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b3ca1183e6ceae83, []int{6, 0}
+}
+
+// Request message for the Parse method.
+type ParseRequest struct {
+ // Required. Source text in CEL syntax.
+ CelSource string `protobuf:"bytes,1,opt,name=cel_source,json=celSource,proto3" json:"cel_source,omitempty"`
+ // Tag for version of CEL syntax, for future use.
+ SyntaxVersion string `protobuf:"bytes,2,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ // File or resource for source text, used in
+ // [SourceInfo][google.api.expr.v1alpha1.SourceInfo].
+ SourceLocation string `protobuf:"bytes,3,opt,name=source_location,json=sourceLocation,proto3" json:"source_location,omitempty"`
+ // Prevent macro expansion. See "Macros" in Language Defiinition.
+ DisableMacros bool `protobuf:"varint,4,opt,name=disable_macros,json=disableMacros,proto3" json:"disable_macros,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ParseRequest) Reset() { *m = ParseRequest{} }
+func (m *ParseRequest) String() string { return proto.CompactTextString(m) }
+func (*ParseRequest) ProtoMessage() {}
+func (*ParseRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b3ca1183e6ceae83, []int{0}
+}
+
+func (m *ParseRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ParseRequest.Unmarshal(m, b)
+}
+func (m *ParseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ParseRequest.Marshal(b, m, deterministic)
+}
+func (m *ParseRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ParseRequest.Merge(m, src)
+}
+func (m *ParseRequest) XXX_Size() int {
+ return xxx_messageInfo_ParseRequest.Size(m)
+}
+func (m *ParseRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ParseRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ParseRequest proto.InternalMessageInfo
+
+func (m *ParseRequest) GetCelSource() string {
+ if m != nil {
+ return m.CelSource
+ }
+ return ""
+}
+
+func (m *ParseRequest) GetSyntaxVersion() string {
+ if m != nil {
+ return m.SyntaxVersion
+ }
+ return ""
+}
+
+func (m *ParseRequest) GetSourceLocation() string {
+ if m != nil {
+ return m.SourceLocation
+ }
+ return ""
+}
+
+func (m *ParseRequest) GetDisableMacros() bool {
+ if m != nil {
+ return m.DisableMacros
+ }
+ return false
+}
+
+// Response message for the Parse method.
+type ParseResponse struct {
+ // The parsed representation, or unset if parsing failed.
+ ParsedExpr *ParsedExpr `protobuf:"bytes,1,opt,name=parsed_expr,json=parsedExpr,proto3" json:"parsed_expr,omitempty"`
+ // Any number of issues with [StatusDetails][] as the details.
+ Issues []*status.Status `protobuf:"bytes,2,rep,name=issues,proto3" json:"issues,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ParseResponse) Reset() { *m = ParseResponse{} }
+func (m *ParseResponse) String() string { return proto.CompactTextString(m) }
+func (*ParseResponse) ProtoMessage() {}
+func (*ParseResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b3ca1183e6ceae83, []int{1}
+}
+
+func (m *ParseResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ParseResponse.Unmarshal(m, b)
+}
+func (m *ParseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ParseResponse.Marshal(b, m, deterministic)
+}
+func (m *ParseResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ParseResponse.Merge(m, src)
+}
+func (m *ParseResponse) XXX_Size() int {
+ return xxx_messageInfo_ParseResponse.Size(m)
+}
+func (m *ParseResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ParseResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ParseResponse proto.InternalMessageInfo
+
+func (m *ParseResponse) GetParsedExpr() *ParsedExpr {
+ if m != nil {
+ return m.ParsedExpr
+ }
+ return nil
+}
+
+func (m *ParseResponse) GetIssues() []*status.Status {
+ if m != nil {
+ return m.Issues
+ }
+ return nil
+}
+
+// Request message for the Check method.
+type CheckRequest struct {
+ // Required. The parsed representation of the CEL program.
+ ParsedExpr *ParsedExpr `protobuf:"bytes,1,opt,name=parsed_expr,json=parsedExpr,proto3" json:"parsed_expr,omitempty"`
+ // Declarations of types for external variables and functions.
+ // Required if program uses external variables or functions
+ // not in the default environment.
+ TypeEnv []*Decl `protobuf:"bytes,2,rep,name=type_env,json=typeEnv,proto3" json:"type_env,omitempty"`
+ // The protocol buffer context. See "Name Resolution" in the
+ // Language Definition.
+ Container string `protobuf:"bytes,3,opt,name=container,proto3" json:"container,omitempty"`
+ // If true, use only the declarations in
+ // [type_env][google.api.expr.v1alpha1.CheckRequest.type_env]. If false
+ // (default), add declarations for the standard definitions to the type
+ // environment. See "Standard Definitions" in the Language Definition.
+ NoStdEnv bool `protobuf:"varint,4,opt,name=no_std_env,json=noStdEnv,proto3" json:"no_std_env,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CheckRequest) Reset() { *m = CheckRequest{} }
+func (m *CheckRequest) String() string { return proto.CompactTextString(m) }
+func (*CheckRequest) ProtoMessage() {}
+func (*CheckRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b3ca1183e6ceae83, []int{2}
+}
+
+func (m *CheckRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CheckRequest.Unmarshal(m, b)
+}
+func (m *CheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CheckRequest.Marshal(b, m, deterministic)
+}
+func (m *CheckRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CheckRequest.Merge(m, src)
+}
+func (m *CheckRequest) XXX_Size() int {
+ return xxx_messageInfo_CheckRequest.Size(m)
+}
+func (m *CheckRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CheckRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CheckRequest proto.InternalMessageInfo
+
+func (m *CheckRequest) GetParsedExpr() *ParsedExpr {
+ if m != nil {
+ return m.ParsedExpr
+ }
+ return nil
+}
+
+func (m *CheckRequest) GetTypeEnv() []*Decl {
+ if m != nil {
+ return m.TypeEnv
+ }
+ return nil
+}
+
+func (m *CheckRequest) GetContainer() string {
+ if m != nil {
+ return m.Container
+ }
+ return ""
+}
+
+func (m *CheckRequest) GetNoStdEnv() bool {
+ if m != nil {
+ return m.NoStdEnv
+ }
+ return false
+}
+
+// Response message for the Check method.
+type CheckResponse struct {
+ // The annotated representation, or unset if checking failed.
+ CheckedExpr *CheckedExpr `protobuf:"bytes,1,opt,name=checked_expr,json=checkedExpr,proto3" json:"checked_expr,omitempty"`
+ // Any number of issues with [StatusDetails][] as the details.
+ Issues []*status.Status `protobuf:"bytes,2,rep,name=issues,proto3" json:"issues,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CheckResponse) Reset() { *m = CheckResponse{} }
+func (m *CheckResponse) String() string { return proto.CompactTextString(m) }
+func (*CheckResponse) ProtoMessage() {}
+func (*CheckResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b3ca1183e6ceae83, []int{3}
+}
+
+func (m *CheckResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CheckResponse.Unmarshal(m, b)
+}
+func (m *CheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CheckResponse.Marshal(b, m, deterministic)
+}
+func (m *CheckResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CheckResponse.Merge(m, src)
+}
+func (m *CheckResponse) XXX_Size() int {
+ return xxx_messageInfo_CheckResponse.Size(m)
+}
+func (m *CheckResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CheckResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CheckResponse proto.InternalMessageInfo
+
+func (m *CheckResponse) GetCheckedExpr() *CheckedExpr {
+ if m != nil {
+ return m.CheckedExpr
+ }
+ return nil
+}
+
+func (m *CheckResponse) GetIssues() []*status.Status {
+ if m != nil {
+ return m.Issues
+ }
+ return nil
+}
+
+// Request message for the Eval method.
+type EvalRequest struct {
+ // Required. Either the parsed or annotated representation of the CEL program.
+ //
+ // Types that are valid to be assigned to ExprKind:
+ // *EvalRequest_ParsedExpr
+ // *EvalRequest_CheckedExpr
+ ExprKind isEvalRequest_ExprKind `protobuf_oneof:"expr_kind"`
+ // Bindings for the external variables. The types SHOULD be compatible
+ // with the type environment in
+ // [CheckRequest][google.api.expr.v1alpha1.CheckRequest], if checked.
+ Bindings map[string]*ExprValue `protobuf:"bytes,3,rep,name=bindings,proto3" json:"bindings,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // SHOULD be the same container as used in
+ // [CheckRequest][google.api.expr.v1alpha1.CheckRequest], if checked.
+ Container string `protobuf:"bytes,4,opt,name=container,proto3" json:"container,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EvalRequest) Reset() { *m = EvalRequest{} }
+func (m *EvalRequest) String() string { return proto.CompactTextString(m) }
+func (*EvalRequest) ProtoMessage() {}
+func (*EvalRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b3ca1183e6ceae83, []int{4}
+}
+
+func (m *EvalRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EvalRequest.Unmarshal(m, b)
+}
+func (m *EvalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EvalRequest.Marshal(b, m, deterministic)
+}
+func (m *EvalRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EvalRequest.Merge(m, src)
+}
+func (m *EvalRequest) XXX_Size() int {
+ return xxx_messageInfo_EvalRequest.Size(m)
+}
+func (m *EvalRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_EvalRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EvalRequest proto.InternalMessageInfo
+
+type isEvalRequest_ExprKind interface {
+ isEvalRequest_ExprKind()
+}
+
+type EvalRequest_ParsedExpr struct {
+ ParsedExpr *ParsedExpr `protobuf:"bytes,1,opt,name=parsed_expr,json=parsedExpr,proto3,oneof"`
+}
+
+type EvalRequest_CheckedExpr struct {
+ CheckedExpr *CheckedExpr `protobuf:"bytes,2,opt,name=checked_expr,json=checkedExpr,proto3,oneof"`
+}
+
+func (*EvalRequest_ParsedExpr) isEvalRequest_ExprKind() {}
+
+func (*EvalRequest_CheckedExpr) isEvalRequest_ExprKind() {}
+
+func (m *EvalRequest) GetExprKind() isEvalRequest_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (m *EvalRequest) GetParsedExpr() *ParsedExpr {
+ if x, ok := m.GetExprKind().(*EvalRequest_ParsedExpr); ok {
+ return x.ParsedExpr
+ }
+ return nil
+}
+
+func (m *EvalRequest) GetCheckedExpr() *CheckedExpr {
+ if x, ok := m.GetExprKind().(*EvalRequest_CheckedExpr); ok {
+ return x.CheckedExpr
+ }
+ return nil
+}
+
+func (m *EvalRequest) GetBindings() map[string]*ExprValue {
+ if m != nil {
+ return m.Bindings
+ }
+ return nil
+}
+
+func (m *EvalRequest) GetContainer() string {
+ if m != nil {
+ return m.Container
+ }
+ return ""
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*EvalRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _EvalRequest_OneofMarshaler, _EvalRequest_OneofUnmarshaler, _EvalRequest_OneofSizer, []interface{}{
+ (*EvalRequest_ParsedExpr)(nil),
+ (*EvalRequest_CheckedExpr)(nil),
+ }
+}
+
+func _EvalRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*EvalRequest)
+ // expr_kind
+ switch x := m.ExprKind.(type) {
+ case *EvalRequest_ParsedExpr:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ParsedExpr); err != nil {
+ return err
+ }
+ case *EvalRequest_CheckedExpr:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.CheckedExpr); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("EvalRequest.ExprKind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _EvalRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*EvalRequest)
+ switch tag {
+ case 1: // expr_kind.parsed_expr
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ParsedExpr)
+ err := b.DecodeMessage(msg)
+ m.ExprKind = &EvalRequest_ParsedExpr{msg}
+ return true, err
+ case 2: // expr_kind.checked_expr
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(CheckedExpr)
+ err := b.DecodeMessage(msg)
+ m.ExprKind = &EvalRequest_CheckedExpr{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _EvalRequest_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*EvalRequest)
+ // expr_kind
+ switch x := m.ExprKind.(type) {
+ case *EvalRequest_ParsedExpr:
+ s := proto.Size(x.ParsedExpr)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *EvalRequest_CheckedExpr:
+ s := proto.Size(x.CheckedExpr)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Response message for the Eval method.
+type EvalResponse struct {
+ // The execution result, or unset if execution couldn't start.
+ Result *ExprValue `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"`
+ // Any number of issues with [StatusDetails][] as the details.
+ // Note that CEL execution errors are reified into
+ // [ExprValue][google.api.expr.v1alpha1.ExprValue]. Nevertheless, we'll allow
+ // out-of-band issues to be raised, which also makes the replies more regular.
+ Issues []*status.Status `protobuf:"bytes,2,rep,name=issues,proto3" json:"issues,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EvalResponse) Reset() { *m = EvalResponse{} }
+func (m *EvalResponse) String() string { return proto.CompactTextString(m) }
+func (*EvalResponse) ProtoMessage() {}
+func (*EvalResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b3ca1183e6ceae83, []int{5}
+}
+
+func (m *EvalResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EvalResponse.Unmarshal(m, b)
+}
+func (m *EvalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EvalResponse.Marshal(b, m, deterministic)
+}
+func (m *EvalResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EvalResponse.Merge(m, src)
+}
+func (m *EvalResponse) XXX_Size() int {
+ return xxx_messageInfo_EvalResponse.Size(m)
+}
+func (m *EvalResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_EvalResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EvalResponse proto.InternalMessageInfo
+
+func (m *EvalResponse) GetResult() *ExprValue {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *EvalResponse) GetIssues() []*status.Status {
+ if m != nil {
+ return m.Issues
+ }
+ return nil
+}
+
+// Warnings or errors in service execution are represented by
+// [google.rpc.Status][google.rpc.Status] messages, with the following message
+// in the details field.
+type IssueDetails struct {
+ // The severity of the issue.
+ Severity IssueDetails_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=google.api.expr.v1alpha1.IssueDetails_Severity" json:"severity,omitempty"`
+ // Position in the source, if known.
+ Position *SourcePosition `protobuf:"bytes,2,opt,name=position,proto3" json:"position,omitempty"`
+ // Expression ID from [Expr][google.api.expr.v1alpha1.Expr], 0 if unknown.
+ Id int64 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *IssueDetails) Reset() { *m = IssueDetails{} }
+func (m *IssueDetails) String() string { return proto.CompactTextString(m) }
+func (*IssueDetails) ProtoMessage() {}
+func (*IssueDetails) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b3ca1183e6ceae83, []int{6}
+}
+
+func (m *IssueDetails) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IssueDetails.Unmarshal(m, b)
+}
+func (m *IssueDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IssueDetails.Marshal(b, m, deterministic)
+}
+func (m *IssueDetails) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IssueDetails.Merge(m, src)
+}
+func (m *IssueDetails) XXX_Size() int {
+ return xxx_messageInfo_IssueDetails.Size(m)
+}
+func (m *IssueDetails) XXX_DiscardUnknown() {
+ xxx_messageInfo_IssueDetails.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IssueDetails proto.InternalMessageInfo
+
+func (m *IssueDetails) GetSeverity() IssueDetails_Severity {
+ if m != nil {
+ return m.Severity
+ }
+ return IssueDetails_SEVERITY_UNSPECIFIED
+}
+
+func (m *IssueDetails) GetPosition() *SourcePosition {
+ if m != nil {
+ return m.Position
+ }
+ return nil
+}
+
+func (m *IssueDetails) GetId() int64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("google.api.expr.v1alpha1.IssueDetails_Severity", IssueDetails_Severity_name, IssueDetails_Severity_value)
+ proto.RegisterType((*ParseRequest)(nil), "google.api.expr.v1alpha1.ParseRequest")
+ proto.RegisterType((*ParseResponse)(nil), "google.api.expr.v1alpha1.ParseResponse")
+ proto.RegisterType((*CheckRequest)(nil), "google.api.expr.v1alpha1.CheckRequest")
+ proto.RegisterType((*CheckResponse)(nil), "google.api.expr.v1alpha1.CheckResponse")
+ proto.RegisterType((*EvalRequest)(nil), "google.api.expr.v1alpha1.EvalRequest")
+ proto.RegisterMapType((map[string]*ExprValue)(nil), "google.api.expr.v1alpha1.EvalRequest.BindingsEntry")
+ proto.RegisterType((*EvalResponse)(nil), "google.api.expr.v1alpha1.EvalResponse")
+ proto.RegisterType((*IssueDetails)(nil), "google.api.expr.v1alpha1.IssueDetails")
+}
+
+func init() {
+ proto.RegisterFile("google/api/expr/v1alpha1/conformance_service.proto", fileDescriptor_b3ca1183e6ceae83)
+}
+
+var fileDescriptor_b3ca1183e6ceae83 = []byte{
+ // 807 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x41, 0x6f, 0xdb, 0x36,
+ 0x18, 0xb5, 0xe4, 0x24, 0xb5, 0x3f, 0xd9, 0xa9, 0x41, 0x0c, 0xa8, 0x61, 0x64, 0x43, 0xa0, 0x2e,
+ 0x69, 0xb0, 0x83, 0x84, 0xba, 0x97, 0x75, 0xdd, 0xa5, 0xb1, 0xb5, 0xc6, 0xdb, 0x9a, 0x18, 0x74,
+ 0x97, 0x62, 0xbd, 0x68, 0x8c, 0xc4, 0xb9, 0x44, 0x14, 0x52, 0x23, 0x65, 0xcd, 0xde, 0x69, 0x18,
+ 0xb0, 0x7f, 0xb2, 0xfd, 0x9b, 0xfd, 0xa0, 0x1d, 0x07, 0x89, 0xb4, 0x63, 0xb7, 0x50, 0xd2, 0x0c,
+ 0xbd, 0x49, 0x9f, 0xde, 0x7b, 0xfa, 0xde, 0xe3, 0x47, 0x12, 0xfa, 0x53, 0x21, 0xa6, 0x09, 0xf5,
+ 0x49, 0xca, 0x7c, 0x3a, 0x4f, 0xa5, 0x9f, 0x3f, 0x26, 0x49, 0xfa, 0x96, 0x3c, 0xf6, 0x23, 0xc1,
+ 0x7f, 0x16, 0xf2, 0x8a, 0xf0, 0x88, 0x86, 0x8a, 0xca, 0x9c, 0x45, 0xd4, 0x4b, 0xa5, 0xc8, 0x04,
+ 0xea, 0x6a, 0x8e, 0x47, 0x52, 0xe6, 0x15, 0x1c, 0x6f, 0xc9, 0xe9, 0x1d, 0x56, 0xab, 0xbd, 0xa5,
+ 0xd1, 0x25, 0x8d, 0xb5, 0x42, 0xef, 0x61, 0x25, 0x8e, 0xe6, 0x24, 0x31, 0xa0, 0x83, 0x4a, 0x90,
+ 0x5a, 0xf0, 0x8c, 0xcc, 0x0d, 0xec, 0x81, 0x81, 0xc9, 0x34, 0xf2, 0x55, 0x46, 0xb2, 0x99, 0xd2,
+ 0x1f, 0xdc, 0xbf, 0x2c, 0x68, 0x8d, 0x89, 0x54, 0x14, 0xd3, 0x5f, 0x66, 0x54, 0x65, 0xe8, 0x53,
+ 0x80, 0x88, 0x26, 0xa1, 0x12, 0x33, 0x19, 0xd1, 0xae, 0xb5, 0x6f, 0x1d, 0x35, 0x71, 0x33, 0xa2,
+ 0xc9, 0xa4, 0x2c, 0xa0, 0x03, 0xd8, 0xd5, 0xc2, 0x61, 0x4e, 0xa5, 0x62, 0x82, 0x77, 0xed, 0x12,
+ 0xd2, 0xd6, 0xd5, 0x73, 0x5d, 0x44, 0x8f, 0xe0, 0xbe, 0x56, 0x08, 0x13, 0x11, 0x91, 0xac, 0xc0,
+ 0xd5, 0x4b, 0xdc, 0xae, 0x2e, 0x7f, 0x6f, 0xaa, 0x85, 0x5e, 0xcc, 0x14, 0xb9, 0x48, 0x68, 0x78,
+ 0x45, 0x22, 0x29, 0x54, 0x77, 0x6b, 0xdf, 0x3a, 0x6a, 0xe0, 0xb6, 0xa9, 0xbe, 0x2c, 0x8b, 0xee,
+ 0x1f, 0x16, 0xb4, 0x4d, 0x9b, 0x2a, 0x15, 0x5c, 0x51, 0x14, 0x80, 0x93, 0x16, 0x85, 0x38, 0x2c,
+ 0x6c, 0x97, 0x8d, 0x3a, 0xfd, 0xcf, 0xbd, 0xaa, 0xd4, 0xbd, 0x92, 0x1d, 0x07, 0xf3, 0x54, 0x62,
+ 0x48, 0x57, 0xcf, 0xe8, 0x0b, 0xd8, 0x61, 0x4a, 0xcd, 0xa8, 0xea, 0xda, 0xfb, 0xf5, 0x23, 0xa7,
+ 0x8f, 0x96, 0x0a, 0x32, 0x8d, 0xbc, 0x49, 0x99, 0x14, 0x36, 0x08, 0xf7, 0x1f, 0x0b, 0x5a, 0x83,
+ 0x62, 0x89, 0x96, 0x59, 0x7d, 0xa4, 0x1e, 0x9e, 0x42, 0x23, 0x5b, 0xa4, 0x34, 0xa4, 0x3c, 0x37,
+ 0x5d, 0x7c, 0x56, 0xad, 0x31, 0xa4, 0x51, 0x82, 0xef, 0x15, 0xf8, 0x80, 0xe7, 0x68, 0x0f, 0x9a,
+ 0x91, 0xe0, 0x19, 0x61, 0x9c, 0x4a, 0x93, 0xf0, 0x75, 0x01, 0xed, 0x01, 0x70, 0x11, 0xaa, 0x2c,
+ 0x2e, 0xa5, 0x75, 0xb0, 0x0d, 0x2e, 0x26, 0x59, 0x1c, 0xf0, 0xdc, 0xfd, 0xd3, 0x82, 0xb6, 0xb1,
+ 0x63, 0x32, 0x3d, 0x81, 0x96, 0x19, 0xc1, 0x75, 0x43, 0x07, 0xd5, 0xcd, 0x0c, 0x34, 0xba, 0x74,
+ 0xe4, 0x44, 0xd7, 0x2f, 0x77, 0x8a, 0xf5, 0xf7, 0x3a, 0x38, 0x41, 0x4e, 0x92, 0x65, 0xaa, 0x2f,
+ 0xfe, 0x77, 0xaa, 0x27, 0xb5, 0x8d, 0x5c, 0xbf, 0x7d, 0xc7, 0x8e, 0x7d, 0x07, 0x3b, 0x27, 0xb5,
+ 0x4d, 0x43, 0x67, 0xd0, 0xb8, 0x60, 0x3c, 0x66, 0x7c, 0xaa, 0xba, 0xf5, 0xd2, 0xd2, 0x93, 0x6a,
+ 0x9d, 0x35, 0x37, 0xde, 0xb1, 0x61, 0x05, 0x3c, 0x93, 0x0b, 0xbc, 0x12, 0xd9, 0x5c, 0xb9, 0xad,
+ 0x77, 0x56, 0xae, 0xf7, 0x13, 0xb4, 0x37, 0x88, 0xa8, 0x03, 0xf5, 0x4b, 0xba, 0x30, 0xfb, 0xb1,
+ 0x78, 0x44, 0x4f, 0x61, 0x3b, 0x27, 0xc9, 0x8c, 0x1a, 0x5b, 0x0f, 0x6f, 0x68, 0x67, 0x9e, 0xca,
+ 0xf3, 0x02, 0x8a, 0x35, 0xe3, 0x2b, 0xfb, 0x4b, 0xeb, 0xd8, 0x81, 0x66, 0x81, 0x0a, 0x2f, 0x19,
+ 0x8f, 0xdd, 0x5f, 0xa1, 0xa5, 0x7b, 0x36, 0x83, 0xf0, 0x0c, 0x76, 0x24, 0x55, 0xb3, 0x24, 0x33,
+ 0xe9, 0x7f, 0x90, 0xb8, 0xa1, 0xdc, 0x6d, 0xed, 0x6d, 0x68, 0x8d, 0x8a, 0xc7, 0x21, 0xcd, 0x08,
+ 0x4b, 0x14, 0xfa, 0x0e, 0x1a, 0x8a, 0xe6, 0x54, 0xb2, 0x4c, 0x9b, 0xdd, 0xed, 0xfb, 0xd5, 0xff,
+ 0x5e, 0x67, 0x7a, 0x13, 0x43, 0xc3, 0x2b, 0x01, 0x34, 0x84, 0x46, 0x2a, 0x14, 0xcb, 0x96, 0xc7,
+ 0x94, 0xd3, 0x3f, 0xaa, 0x16, 0xd3, 0x07, 0xdc, 0xd8, 0xe0, 0xf1, 0x8a, 0x89, 0x76, 0xc1, 0x66,
+ 0x71, 0xb9, 0xb9, 0xea, 0xd8, 0x66, 0xb1, 0xfb, 0x12, 0x1a, 0xcb, 0x7f, 0xa1, 0x2e, 0x7c, 0x32,
+ 0x09, 0xce, 0x03, 0x3c, 0x7a, 0xf5, 0x63, 0xf8, 0xc3, 0xe9, 0x64, 0x1c, 0x0c, 0x46, 0xdf, 0x8c,
+ 0x82, 0x61, 0xa7, 0x86, 0xee, 0x83, 0x33, 0x0c, 0xc6, 0x38, 0x18, 0x3c, 0x7f, 0x35, 0x3a, 0x3b,
+ 0xed, 0x58, 0xc8, 0x81, 0x7b, 0xaf, 0x9f, 0xe3, 0xd3, 0xd1, 0xe9, 0x8b, 0x8e, 0x8d, 0x9a, 0xb0,
+ 0x1d, 0x60, 0x7c, 0x86, 0x3b, 0xf5, 0xfe, 0xdf, 0x36, 0xa0, 0xc1, 0xf5, 0x35, 0x32, 0xd1, 0xb7,
+ 0x08, 0x7a, 0x03, 0xdb, 0xe5, 0x60, 0xa3, 0xc3, 0x5b, 0x26, 0xdf, 0x0c, 0x5a, 0xef, 0xd1, 0xad,
+ 0x38, 0xbd, 0xb8, 0x6e, 0xad, 0xd0, 0x2e, 0x47, 0xfd, 0x26, 0xed, 0xf5, 0x83, 0xee, 0x26, 0xed,
+ 0x8d, 0x13, 0xc4, 0xad, 0xa1, 0xd7, 0xb0, 0x55, 0x8c, 0x12, 0x3a, 0xf8, 0xa0, 0xed, 0xd1, 0x3b,
+ 0xbc, 0x0d, 0xb6, 0x14, 0x3e, 0xfe, 0x0d, 0xf6, 0x22, 0x71, 0x55, 0x09, 0x3f, 0x7e, 0xf0, 0x7e,
+ 0x88, 0xe3, 0xe2, 0x8a, 0x1b, 0x5b, 0x6f, 0xbe, 0x36, 0xa4, 0xa9, 0x48, 0x08, 0x9f, 0x7a, 0x42,
+ 0x4e, 0xfd, 0x29, 0xe5, 0xe5, 0x05, 0xe8, 0xeb, 0x4f, 0x24, 0x65, 0xea, 0xfd, 0x3b, 0xf4, 0x59,
+ 0xf1, 0xf6, 0xaf, 0x65, 0x5d, 0xec, 0x94, 0xd8, 0x27, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xf9,
+ 0x66, 0xbb, 0xae, 0x09, 0x08, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// ConformanceServiceClient is the client API for ConformanceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ConformanceServiceClient interface {
+ // Transforms CEL source text into a parsed representation.
+ Parse(ctx context.Context, in *ParseRequest, opts ...grpc.CallOption) (*ParseResponse, error)
+ // Runs static checks on a parsed CEL representation and return
+ // an annotated representation, or a set of issues.
+ Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error)
+ // Evaluates a parsed or annotation CEL representation given
+ // values of external bindings.
+ Eval(ctx context.Context, in *EvalRequest, opts ...grpc.CallOption) (*EvalResponse, error)
+}
+
+type conformanceServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewConformanceServiceClient(cc *grpc.ClientConn) ConformanceServiceClient {
+ return &conformanceServiceClient{cc}
+}
+
+func (c *conformanceServiceClient) Parse(ctx context.Context, in *ParseRequest, opts ...grpc.CallOption) (*ParseResponse, error) {
+ out := new(ParseResponse)
+ err := c.cc.Invoke(ctx, "/google.api.expr.v1alpha1.ConformanceService/Parse", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *conformanceServiceClient) Check(ctx context.Context, in *CheckRequest, opts ...grpc.CallOption) (*CheckResponse, error) {
+ out := new(CheckResponse)
+ err := c.cc.Invoke(ctx, "/google.api.expr.v1alpha1.ConformanceService/Check", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *conformanceServiceClient) Eval(ctx context.Context, in *EvalRequest, opts ...grpc.CallOption) (*EvalResponse, error) {
+ out := new(EvalResponse)
+ err := c.cc.Invoke(ctx, "/google.api.expr.v1alpha1.ConformanceService/Eval", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ConformanceServiceServer is the server API for ConformanceService service.
+type ConformanceServiceServer interface {
+ // Transforms CEL source text into a parsed representation.
+ Parse(context.Context, *ParseRequest) (*ParseResponse, error)
+ // Runs static checks on a parsed CEL representation and return
+ // an annotated representation, or a set of issues.
+ Check(context.Context, *CheckRequest) (*CheckResponse, error)
+ // Evaluates a parsed or annotation CEL representation given
+ // values of external bindings.
+ Eval(context.Context, *EvalRequest) (*EvalResponse, error)
+}
+
+func RegisterConformanceServiceServer(s *grpc.Server, srv ConformanceServiceServer) {
+ s.RegisterService(&_ConformanceService_serviceDesc, srv)
+}
+
+func _ConformanceService_Parse_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ParseRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ConformanceServiceServer).Parse(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.api.expr.v1alpha1.ConformanceService/Parse",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ConformanceServiceServer).Parse(ctx, req.(*ParseRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ConformanceService_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CheckRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ConformanceServiceServer).Check(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.api.expr.v1alpha1.ConformanceService/Check",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ConformanceServiceServer).Check(ctx, req.(*CheckRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ConformanceService_Eval_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(EvalRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ConformanceServiceServer).Eval(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.api.expr.v1alpha1.ConformanceService/Eval",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ConformanceServiceServer).Eval(ctx, req.(*EvalRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _ConformanceService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.api.expr.v1alpha1.ConformanceService",
+ HandlerType: (*ConformanceServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Parse",
+ Handler: _ConformanceService_Parse_Handler,
+ },
+ {
+ MethodName: "Check",
+ Handler: _ConformanceService_Check_Handler,
+ },
+ {
+ MethodName: "Eval",
+ Handler: _ConformanceService_Eval_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/api/expr/v1alpha1/conformance_service.proto",
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
new file mode 100644
index 00000000000..e0177b1e82b
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
@@ -0,0 +1,434 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/expr/v1alpha1/eval.proto
+
+package expr
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The state of an evaluation.
+//
+// Can represent an inital, partial, or completed state of evaluation.
+type EvalState struct {
+ // The unique values referenced in this message.
+ Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ // An ordered list of results.
+ //
+ // Tracks the flow of evaluation through the expression.
+ // May be sparse.
+ Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EvalState) Reset() { *m = EvalState{} }
+func (m *EvalState) String() string { return proto.CompactTextString(m) }
+func (*EvalState) ProtoMessage() {}
+func (*EvalState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1e95f32326d4b8b7, []int{0}
+}
+
+func (m *EvalState) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EvalState.Unmarshal(m, b)
+}
+func (m *EvalState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EvalState.Marshal(b, m, deterministic)
+}
+func (m *EvalState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EvalState.Merge(m, src)
+}
+func (m *EvalState) XXX_Size() int {
+ return xxx_messageInfo_EvalState.Size(m)
+}
+func (m *EvalState) XXX_DiscardUnknown() {
+ xxx_messageInfo_EvalState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EvalState proto.InternalMessageInfo
+
+func (m *EvalState) GetValues() []*ExprValue {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+func (m *EvalState) GetResults() []*EvalState_Result {
+ if m != nil {
+ return m.Results
+ }
+ return nil
+}
+
+// A single evalution result.
+type EvalState_Result struct {
+ // The id of the expression this result if for.
+ Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+ // The index in `values` of the resulting value.
+ Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EvalState_Result) Reset() { *m = EvalState_Result{} }
+func (m *EvalState_Result) String() string { return proto.CompactTextString(m) }
+func (*EvalState_Result) ProtoMessage() {}
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1e95f32326d4b8b7, []int{0, 0}
+}
+
+func (m *EvalState_Result) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EvalState_Result.Unmarshal(m, b)
+}
+func (m *EvalState_Result) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EvalState_Result.Marshal(b, m, deterministic)
+}
+func (m *EvalState_Result) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EvalState_Result.Merge(m, src)
+}
+func (m *EvalState_Result) XXX_Size() int {
+ return xxx_messageInfo_EvalState_Result.Size(m)
+}
+func (m *EvalState_Result) XXX_DiscardUnknown() {
+ xxx_messageInfo_EvalState_Result.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EvalState_Result proto.InternalMessageInfo
+
+func (m *EvalState_Result) GetExpr() int64 {
+ if m != nil {
+ return m.Expr
+ }
+ return 0
+}
+
+func (m *EvalState_Result) GetValue() int64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// The value of an evaluated expression.
+type ExprValue struct {
+ // An expression can resolve to a value, error or unknown.
+ //
+ // Types that are valid to be assigned to Kind:
+ // *ExprValue_Value
+ // *ExprValue_Error
+ // *ExprValue_Unknown
+ Kind isExprValue_Kind `protobuf_oneof:"kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ExprValue) Reset() { *m = ExprValue{} }
+func (m *ExprValue) String() string { return proto.CompactTextString(m) }
+func (*ExprValue) ProtoMessage() {}
+func (*ExprValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1e95f32326d4b8b7, []int{1}
+}
+
+func (m *ExprValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ExprValue.Unmarshal(m, b)
+}
+func (m *ExprValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ExprValue.Marshal(b, m, deterministic)
+}
+func (m *ExprValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExprValue.Merge(m, src)
+}
+func (m *ExprValue) XXX_Size() int {
+ return xxx_messageInfo_ExprValue.Size(m)
+}
+func (m *ExprValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExprValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExprValue proto.InternalMessageInfo
+
+type isExprValue_Kind interface {
+ isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+ Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+ Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+ Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+func (m *ExprValue) GetKind() isExprValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (m *ExprValue) GetValue() *Value {
+ if x, ok := m.GetKind().(*ExprValue_Value); ok {
+ return x.Value
+ }
+ return nil
+}
+
+func (m *ExprValue) GetError() *ErrorSet {
+ if x, ok := m.GetKind().(*ExprValue_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (m *ExprValue) GetUnknown() *UnknownSet {
+ if x, ok := m.GetKind().(*ExprValue_Unknown); ok {
+ return x.Unknown
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ExprValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _ExprValue_OneofMarshaler, _ExprValue_OneofUnmarshaler, _ExprValue_OneofSizer, []interface{}{
+ (*ExprValue_Value)(nil),
+ (*ExprValue_Error)(nil),
+ (*ExprValue_Unknown)(nil),
+ }
+}
+
+func _ExprValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*ExprValue)
+ // kind
+ switch x := m.Kind.(type) {
+ case *ExprValue_Value:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Value); err != nil {
+ return err
+ }
+ case *ExprValue_Error:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Error); err != nil {
+ return err
+ }
+ case *ExprValue_Unknown:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Unknown); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("ExprValue.Kind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _ExprValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*ExprValue)
+ switch tag {
+ case 1: // kind.value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Value)
+ err := b.DecodeMessage(msg)
+ m.Kind = &ExprValue_Value{msg}
+ return true, err
+ case 2: // kind.error
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ErrorSet)
+ err := b.DecodeMessage(msg)
+ m.Kind = &ExprValue_Error{msg}
+ return true, err
+ case 3: // kind.unknown
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(UnknownSet)
+ err := b.DecodeMessage(msg)
+ m.Kind = &ExprValue_Unknown{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _ExprValue_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*ExprValue)
+ // kind
+ switch x := m.Kind.(type) {
+ case *ExprValue_Value:
+ s := proto.Size(x.Value)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *ExprValue_Error:
+ s := proto.Size(x.Error)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *ExprValue_Unknown:
+ s := proto.Size(x.Unknown)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// A set of errors.
+//
+// The errors included depend on the context. See `ExprValue.error`.
+type ErrorSet struct {
+ // The errors in the set.
+ Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ErrorSet) Reset() { *m = ErrorSet{} }
+func (m *ErrorSet) String() string { return proto.CompactTextString(m) }
+func (*ErrorSet) ProtoMessage() {}
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1e95f32326d4b8b7, []int{2}
+}
+
+func (m *ErrorSet) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ErrorSet.Unmarshal(m, b)
+}
+func (m *ErrorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ErrorSet.Marshal(b, m, deterministic)
+}
+func (m *ErrorSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ErrorSet.Merge(m, src)
+}
+func (m *ErrorSet) XXX_Size() int {
+ return xxx_messageInfo_ErrorSet.Size(m)
+}
+func (m *ErrorSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_ErrorSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ErrorSet proto.InternalMessageInfo
+
+func (m *ErrorSet) GetErrors() []*status.Status {
+ if m != nil {
+ return m.Errors
+ }
+ return nil
+}
+
+// A set of expressions for which the value is unknown.
+//
+// The unknowns included depend on the context. See `ExprValue.unknown`.
+type UnknownSet struct {
+ // The ids of the expressions with unknown values.
+ Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnknownSet) Reset() { *m = UnknownSet{} }
+func (m *UnknownSet) String() string { return proto.CompactTextString(m) }
+func (*UnknownSet) ProtoMessage() {}
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1e95f32326d4b8b7, []int{3}
+}
+
+func (m *UnknownSet) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UnknownSet.Unmarshal(m, b)
+}
+func (m *UnknownSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UnknownSet.Marshal(b, m, deterministic)
+}
+func (m *UnknownSet) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UnknownSet.Merge(m, src)
+}
+func (m *UnknownSet) XXX_Size() int {
+ return xxx_messageInfo_UnknownSet.Size(m)
+}
+func (m *UnknownSet) XXX_DiscardUnknown() {
+ xxx_messageInfo_UnknownSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UnknownSet proto.InternalMessageInfo
+
+func (m *UnknownSet) GetExprs() []int64 {
+ if m != nil {
+ return m.Exprs
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*EvalState)(nil), "google.api.expr.v1alpha1.EvalState")
+ proto.RegisterType((*EvalState_Result)(nil), "google.api.expr.v1alpha1.EvalState.Result")
+ proto.RegisterType((*ExprValue)(nil), "google.api.expr.v1alpha1.ExprValue")
+ proto.RegisterType((*ErrorSet)(nil), "google.api.expr.v1alpha1.ErrorSet")
+ proto.RegisterType((*UnknownSet)(nil), "google.api.expr.v1alpha1.UnknownSet")
+}
+
+func init() {
+ proto.RegisterFile("google/api/expr/v1alpha1/eval.proto", fileDescriptor_1e95f32326d4b8b7)
+}
+
+var fileDescriptor_1e95f32326d4b8b7 = []byte{
+ // 367 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x4b, 0xeb, 0x40,
+ 0x10, 0xc7, 0x5f, 0x5e, 0xda, 0xf4, 0xbd, 0xe9, 0x6d, 0x11, 0x0c, 0x45, 0xb0, 0xa4, 0x3d, 0x94,
+ 0x1e, 0x36, 0x34, 0x82, 0x82, 0xf5, 0x20, 0xc5, 0x82, 0xc7, 0x92, 0xa2, 0x07, 0x6f, 0x6b, 0x5d,
+ 0x62, 0xe8, 0x9a, 0x5d, 0x36, 0x3f, 0xec, 0xdf, 0xe7, 0xd1, 0xbf, 0xc8, 0xa3, 0xec, 0x6c, 0x16,
+ 0x0f, 0x92, 0xde, 0x3a, 0xbb, 0x9f, 0xcf, 0x77, 0xa6, 0xd9, 0x81, 0x49, 0x26, 0x65, 0x26, 0x78,
+ 0xcc, 0x54, 0x1e, 0xf3, 0x83, 0xd2, 0x71, 0xb3, 0x60, 0x42, 0xbd, 0xb2, 0x45, 0xcc, 0x1b, 0x26,
+ 0xa8, 0xd2, 0xb2, 0x92, 0x24, 0xb4, 0x10, 0x65, 0x2a, 0xa7, 0x06, 0xa2, 0x0e, 0x1a, 0x4d, 0x3b,
+ 0xf5, 0x86, 0x89, 0x9a, 0x5b, 0x7f, 0x74, 0xda, 0x52, 0x5a, 0xed, 0xe2, 0xb2, 0x62, 0x55, 0x5d,
+ 0xda, 0x8b, 0xe8, 0xc3, 0x83, 0xff, 0xeb, 0x86, 0x89, 0x6d, 0xc5, 0x2a, 0x4e, 0x96, 0x10, 0xa0,
+ 0x55, 0x86, 0xde, 0xd8, 0x9f, 0x0d, 0x93, 0x09, 0xed, 0xea, 0x4b, 0xd7, 0x07, 0xa5, 0x1f, 0x0d,
+ 0x9b, 0xb6, 0x0a, 0xb9, 0x83, 0x81, 0xe6, 0x65, 0x2d, 0xaa, 0x32, 0xf4, 0xd1, 0x9e, 0x1f, 0xb1,
+ 0x5d, 0x4b, 0x9a, 0xa2, 0x92, 0x3a, 0x75, 0x94, 0x40, 0x60, 0x8f, 0x08, 0x81, 0x9e, 0x91, 0x42,
+ 0x6f, 0xec, 0xcd, 0xfc, 0x14, 0x7f, 0x93, 0x13, 0xe8, 0x63, 0xb7, 0xf0, 0x2f, 0x1e, 0xda, 0x22,
+ 0xfa, 0x34, 0x7f, 0xc2, 0xcd, 0x43, 0xae, 0x1c, 0x63, 0xc4, 0x61, 0x72, 0xde, 0x3d, 0x05, 0xf2,
+ 0xf7, 0x7f, 0xda, 0x18, 0x72, 0x0d, 0x7d, 0xae, 0xb5, 0xd4, 0x18, 0x3e, 0x4c, 0xa2, 0x23, 0xe3,
+ 0x1b, 0x6c, 0xcb, 0x2b, 0xe3, 0xa2, 0x42, 0x6e, 0x61, 0x50, 0x17, 0xfb, 0x42, 0xbe, 0x17, 0xa1,
+ 0x8f, 0xf6, 0xb4, 0xdb, 0x7e, 0xb0, 0xa0, 0xf5, 0x9d, 0xb6, 0x0a, 0xa0, 0xb7, 0xcf, 0x8b, 0x97,
+ 0xe8, 0x12, 0xfe, 0xb9, 0x78, 0x32, 0x87, 0x00, 0xe3, 0xdd, 0x7b, 0x10, 0x17, 0xaa, 0xd5, 0x8e,
+ 0x6e, 0xf1, 0x1d, 0xd3, 0x96, 0x88, 0x22, 0x80, 0x9f, 0x60, 0xf3, 0xa1, 0x4c, 0x53, 0x2b, 0xfa,
+ 0xa9, 0x2d, 0x56, 0x02, 0xce, 0x76, 0xf2, 0xad, 0x73, 0xb2, 0x15, 0xae, 0xc2, 0xc6, 0x2c, 0xc6,
+ 0xc6, 0x7b, 0xba, 0x69, 0xb1, 0x4c, 0x0a, 0x56, 0x64, 0x54, 0xea, 0x2c, 0xce, 0x78, 0x81, 0x6b,
+ 0x13, 0xdb, 0x2b, 0xa6, 0xf2, 0xf2, 0xf7, 0xe2, 0x2d, 0x4d, 0xf5, 0xe5, 0x79, 0xcf, 0x01, 0xb2,
+ 0x17, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x62, 0xde, 0x1d, 0xe2, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
new file mode 100644
index 00000000000..e50ce2f1b0b
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
@@ -0,0 +1,161 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/expr/v1alpha1/explain.proto
+
+package expr
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Values of intermediate expressions produced when evaluating expression.
+// Deprecated, use `EvalState` instead.
+//
+// Deprecated: Do not use.
+type Explain struct {
+ // All of the observed values.
+ //
+ // The field value_index is an index in the values list.
+ // Separating values from steps is needed to remove redundant values.
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ // List of steps.
+ //
+ // Repeated evaluations of the same expression generate new ExprStep
+ // instances. The order of such ExprStep instances matches the order of
+ // elements returned by Comprehension.iter_range.
+ ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Explain) Reset() { *m = Explain{} }
+func (m *Explain) String() string { return proto.CompactTextString(m) }
+func (*Explain) ProtoMessage() {}
+func (*Explain) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2df9793dd8748e27, []int{0}
+}
+
+func (m *Explain) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Explain.Unmarshal(m, b)
+}
+func (m *Explain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Explain.Marshal(b, m, deterministic)
+}
+func (m *Explain) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Explain.Merge(m, src)
+}
+func (m *Explain) XXX_Size() int {
+ return xxx_messageInfo_Explain.Size(m)
+}
+func (m *Explain) XXX_DiscardUnknown() {
+ xxx_messageInfo_Explain.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Explain proto.InternalMessageInfo
+
+func (m *Explain) GetValues() []*Value {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+func (m *Explain) GetExprSteps() []*Explain_ExprStep {
+ if m != nil {
+ return m.ExprSteps
+ }
+ return nil
+}
+
+// ID and value index of one step.
+type Explain_ExprStep struct {
+ // ID of corresponding Expr node.
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Index of the value in the values list.
+ ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Explain_ExprStep) Reset() { *m = Explain_ExprStep{} }
+func (m *Explain_ExprStep) String() string { return proto.CompactTextString(m) }
+func (*Explain_ExprStep) ProtoMessage() {}
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2df9793dd8748e27, []int{0, 0}
+}
+
+func (m *Explain_ExprStep) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Explain_ExprStep.Unmarshal(m, b)
+}
+func (m *Explain_ExprStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Explain_ExprStep.Marshal(b, m, deterministic)
+}
+func (m *Explain_ExprStep) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Explain_ExprStep.Merge(m, src)
+}
+func (m *Explain_ExprStep) XXX_Size() int {
+ return xxx_messageInfo_Explain_ExprStep.Size(m)
+}
+func (m *Explain_ExprStep) XXX_DiscardUnknown() {
+ xxx_messageInfo_Explain_ExprStep.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Explain_ExprStep proto.InternalMessageInfo
+
+func (m *Explain_ExprStep) GetId() int64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
+func (m *Explain_ExprStep) GetValueIndex() int32 {
+ if m != nil {
+ return m.ValueIndex
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Explain)(nil), "google.api.expr.v1alpha1.Explain")
+ proto.RegisterType((*Explain_ExprStep)(nil), "google.api.expr.v1alpha1.Explain.ExprStep")
+}
+
+func init() {
+ proto.RegisterFile("google/api/expr/v1alpha1/explain.proto", fileDescriptor_2df9793dd8748e27)
+}
+
+var fileDescriptor_2df9793dd8748e27 = []byte{
+ // 261 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xb1, 0x4b, 0x03, 0x31,
+ 0x14, 0xc6, 0x79, 0x29, 0x56, 0x7d, 0x15, 0x87, 0x4c, 0xa1, 0x08, 0x3d, 0x44, 0xe4, 0x70, 0x48,
+ 0xa8, 0x0e, 0x82, 0x75, 0x2a, 0x38, 0x74, 0x2b, 0x27, 0x38, 0xb8, 0x94, 0xe8, 0x85, 0x18, 0x88,
+ 0x97, 0x70, 0x39, 0x4b, 0xff, 0x4a, 0xff, 0x1e, 0x47, 0x49, 0x2e, 0x37, 0x95, 0x9b, 0xee, 0xde,
+ 0xfb, 0x7e, 0xdf, 0xf7, 0x91, 0x87, 0xb7, 0xda, 0x39, 0x6d, 0x95, 0x90, 0xde, 0x08, 0x75, 0xf0,
+ 0xad, 0xd8, 0x2f, 0xa5, 0xf5, 0x5f, 0x72, 0x19, 0x27, 0x2b, 0x4d, 0xc3, 0x7d, 0xeb, 0x3a, 0x47,
+ 0x59, 0xcf, 0x71, 0xe9, 0x0d, 0x8f, 0x1c, 0x1f, 0xb8, 0xf9, 0xcd, 0x68, 0xc2, 0x5e, 0xda, 0x1f,
+ 0xd5, 0xfb, 0xaf, 0x7f, 0x01, 0x4f, 0x5f, 0xfa, 0x44, 0xfa, 0x88, 0xd3, 0x24, 0x05, 0x06, 0xc5,
+ 0xa4, 0x9c, 0xdd, 0x2f, 0xf8, 0x58, 0x38, 0x7f, 0x8b, 0x5c, 0x95, 0x71, 0xba, 0x41, 0x8c, 0xf2,
+ 0x2e, 0x74, 0xca, 0x07, 0x46, 0x92, 0xf9, 0x6e, 0xdc, 0x9c, 0xfb, 0xe2, 0xb7, 0x7d, 0xed, 0x94,
+ 0xaf, 0xce, 0x55, 0xfe, 0x0b, 0xf3, 0x15, 0x9e, 0x0d, 0x6b, 0x7a, 0x89, 0xc4, 0xd4, 0x0c, 0x0a,
+ 0x28, 0x27, 0x15, 0x31, 0x35, 0x5d, 0xe0, 0x2c, 0x15, 0xee, 0x4c, 0x53, 0xab, 0x03, 0x23, 0x05,
+ 0x94, 0x27, 0x15, 0xa6, 0xd5, 0x26, 0x6e, 0x9e, 0x08, 0x83, 0xb5, 0xc3, 0xab, 0x4f, 0xf7, 0x3d,
+ 0x5a, 0xbe, 0xbe, 0xc8, 0xed, 0xdb, 0xf8, 0xfc, 0x2d, 0xbc, 0x3f, 0x67, 0x52, 0x3b, 0x2b, 0x1b,
+ 0xcd, 0x5d, 0xab, 0x85, 0x56, 0x4d, 0x3a, 0x8e, 0xe8, 0x25, 0xe9, 0x4d, 0x38, 0xbe, 0xe2, 0x2a,
+ 0x4e, 0x7f, 0x00, 0x1f, 0xd3, 0xc4, 0x3e, 0xfc, 0x07, 0x00, 0x00, 0xff, 0xff, 0x34, 0xf2, 0xb9,
+ 0x9e, 0xb2, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
new file mode 100644
index 00000000000..87b082b7918
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
@@ -0,0 +1,1588 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/expr/v1alpha1/syntax.proto
+
+package expr
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ _struct "github.com/golang/protobuf/ptypes/struct"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// An expression together with source information as returned by the parser.
+type ParsedExpr struct {
+ // The parsed expression.
+ Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+ // The source info derived from input that generated the parsed `expr`.
+ SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ParsedExpr) Reset() { *m = ParsedExpr{} }
+func (m *ParsedExpr) String() string { return proto.CompactTextString(m) }
+func (*ParsedExpr) ProtoMessage() {}
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{0}
+}
+
+func (m *ParsedExpr) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ParsedExpr.Unmarshal(m, b)
+}
+func (m *ParsedExpr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ParsedExpr.Marshal(b, m, deterministic)
+}
+func (m *ParsedExpr) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ParsedExpr.Merge(m, src)
+}
+func (m *ParsedExpr) XXX_Size() int {
+ return xxx_messageInfo_ParsedExpr.Size(m)
+}
+func (m *ParsedExpr) XXX_DiscardUnknown() {
+ xxx_messageInfo_ParsedExpr.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ParsedExpr proto.InternalMessageInfo
+
+func (m *ParsedExpr) GetExpr() *Expr {
+ if m != nil {
+ return m.Expr
+ }
+ return nil
+}
+
+func (m *ParsedExpr) GetSourceInfo() *SourceInfo {
+ if m != nil {
+ return m.SourceInfo
+ }
+ return nil
+}
+
+// An abstract representation of a common expression.
+//
+// Expressions are abstractly represented as a collection of identifiers,
+// select statements, function calls, literals, and comprehensions. All
+// operators with the exception of the '.' operator are modelled as function
+// calls. This makes it easy to represent new operators into the existing AST.
+//
+// All references within expressions must resolve to a
+// [Decl][google.api.expr.v1alpha1.Decl] provided at type-check for an
+// expression to be valid. A reference may either be a bare identifier `name` or
+// a qualified identifier `google.api.name`. References may either refer to a
+// value or a function declaration.
+//
+// For example, the expression `google.api.name.startsWith('expr')` references
+// the declaration `google.api.name` within a
+// [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression, and the
+// function declaration `startsWith`.
+type Expr struct {
+ // Required. An id assigned to this node by the parser which is unique in a
+ // given expression tree. This is used to associate type information and other
+ // attributes to a node in the parse tree.
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Required. Variants of expressions.
+ //
+ // Types that are valid to be assigned to ExprKind:
+ // *Expr_ConstExpr
+ // *Expr_IdentExpr
+ // *Expr_SelectExpr
+ // *Expr_CallExpr
+ // *Expr_ListExpr
+ // *Expr_StructExpr
+ // *Expr_ComprehensionExpr
+ ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Expr) Reset() { *m = Expr{} }
+func (m *Expr) String() string { return proto.CompactTextString(m) }
+func (*Expr) ProtoMessage() {}
+func (*Expr) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{1}
+}
+
+func (m *Expr) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Expr.Unmarshal(m, b)
+}
+func (m *Expr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Expr.Marshal(b, m, deterministic)
+}
+func (m *Expr) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Expr.Merge(m, src)
+}
+func (m *Expr) XXX_Size() int {
+ return xxx_messageInfo_Expr.Size(m)
+}
+func (m *Expr) XXX_DiscardUnknown() {
+ xxx_messageInfo_Expr.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Expr proto.InternalMessageInfo
+
+func (m *Expr) GetId() int64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
+type isExpr_ExprKind interface {
+ isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+ ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+ IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+ SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+ CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+ ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+ StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+ ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (m *Expr) GetConstExpr() *Constant {
+ if x, ok := m.GetExprKind().(*Expr_ConstExpr); ok {
+ return x.ConstExpr
+ }
+ return nil
+}
+
+func (m *Expr) GetIdentExpr() *Expr_Ident {
+ if x, ok := m.GetExprKind().(*Expr_IdentExpr); ok {
+ return x.IdentExpr
+ }
+ return nil
+}
+
+func (m *Expr) GetSelectExpr() *Expr_Select {
+ if x, ok := m.GetExprKind().(*Expr_SelectExpr); ok {
+ return x.SelectExpr
+ }
+ return nil
+}
+
+func (m *Expr) GetCallExpr() *Expr_Call {
+ if x, ok := m.GetExprKind().(*Expr_CallExpr); ok {
+ return x.CallExpr
+ }
+ return nil
+}
+
+func (m *Expr) GetListExpr() *Expr_CreateList {
+ if x, ok := m.GetExprKind().(*Expr_ListExpr); ok {
+ return x.ListExpr
+ }
+ return nil
+}
+
+func (m *Expr) GetStructExpr() *Expr_CreateStruct {
+ if x, ok := m.GetExprKind().(*Expr_StructExpr); ok {
+ return x.StructExpr
+ }
+ return nil
+}
+
+func (m *Expr) GetComprehensionExpr() *Expr_Comprehension {
+ if x, ok := m.GetExprKind().(*Expr_ComprehensionExpr); ok {
+ return x.ComprehensionExpr
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Expr) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Expr_OneofMarshaler, _Expr_OneofUnmarshaler, _Expr_OneofSizer, []interface{}{
+ (*Expr_ConstExpr)(nil),
+ (*Expr_IdentExpr)(nil),
+ (*Expr_SelectExpr)(nil),
+ (*Expr_CallExpr)(nil),
+ (*Expr_ListExpr)(nil),
+ (*Expr_StructExpr)(nil),
+ (*Expr_ComprehensionExpr)(nil),
+ }
+}
+
+func _Expr_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Expr)
+ // expr_kind
+ switch x := m.ExprKind.(type) {
+ case *Expr_ConstExpr:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ConstExpr); err != nil {
+ return err
+ }
+ case *Expr_IdentExpr:
+ b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.IdentExpr); err != nil {
+ return err
+ }
+ case *Expr_SelectExpr:
+ b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.SelectExpr); err != nil {
+ return err
+ }
+ case *Expr_CallExpr:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.CallExpr); err != nil {
+ return err
+ }
+ case *Expr_ListExpr:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ListExpr); err != nil {
+ return err
+ }
+ case *Expr_StructExpr:
+ b.EncodeVarint(8<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.StructExpr); err != nil {
+ return err
+ }
+ case *Expr_ComprehensionExpr:
+ b.EncodeVarint(9<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ComprehensionExpr); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Expr.ExprKind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Expr_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Expr)
+ switch tag {
+ case 3: // expr_kind.const_expr
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Constant)
+ err := b.DecodeMessage(msg)
+ m.ExprKind = &Expr_ConstExpr{msg}
+ return true, err
+ case 4: // expr_kind.ident_expr
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Expr_Ident)
+ err := b.DecodeMessage(msg)
+ m.ExprKind = &Expr_IdentExpr{msg}
+ return true, err
+ case 5: // expr_kind.select_expr
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Expr_Select)
+ err := b.DecodeMessage(msg)
+ m.ExprKind = &Expr_SelectExpr{msg}
+ return true, err
+ case 6: // expr_kind.call_expr
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Expr_Call)
+ err := b.DecodeMessage(msg)
+ m.ExprKind = &Expr_CallExpr{msg}
+ return true, err
+ case 7: // expr_kind.list_expr
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Expr_CreateList)
+ err := b.DecodeMessage(msg)
+ m.ExprKind = &Expr_ListExpr{msg}
+ return true, err
+ case 8: // expr_kind.struct_expr
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Expr_CreateStruct)
+ err := b.DecodeMessage(msg)
+ m.ExprKind = &Expr_StructExpr{msg}
+ return true, err
+ case 9: // expr_kind.comprehension_expr
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Expr_Comprehension)
+ err := b.DecodeMessage(msg)
+ m.ExprKind = &Expr_ComprehensionExpr{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Expr_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Expr)
+ // expr_kind
+ switch x := m.ExprKind.(type) {
+ case *Expr_ConstExpr:
+ s := proto.Size(x.ConstExpr)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Expr_IdentExpr:
+ s := proto.Size(x.IdentExpr)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Expr_SelectExpr:
+ s := proto.Size(x.SelectExpr)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Expr_CallExpr:
+ s := proto.Size(x.CallExpr)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Expr_ListExpr:
+ s := proto.Size(x.ListExpr)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Expr_StructExpr:
+ s := proto.Size(x.StructExpr)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Expr_ComprehensionExpr:
+ s := proto.Size(x.ComprehensionExpr)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// An identifier expression. e.g. `request`.
+type Expr_Ident struct {
+ // Required. Holds a single, unqualified identifier, possibly preceded by a
+ // '.'.
+ //
+ // Qualified names are represented by the
+ // [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Expr_Ident) Reset() { *m = Expr_Ident{} }
+func (m *Expr_Ident) String() string { return proto.CompactTextString(m) }
+func (*Expr_Ident) ProtoMessage() {}
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{1, 0}
+}
+
+func (m *Expr_Ident) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Expr_Ident.Unmarshal(m, b)
+}
+func (m *Expr_Ident) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Expr_Ident.Marshal(b, m, deterministic)
+}
+func (m *Expr_Ident) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Expr_Ident.Merge(m, src)
+}
+func (m *Expr_Ident) XXX_Size() int {
+ return xxx_messageInfo_Expr_Ident.Size(m)
+}
+func (m *Expr_Ident) XXX_DiscardUnknown() {
+ xxx_messageInfo_Expr_Ident.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Expr_Ident proto.InternalMessageInfo
+
+func (m *Expr_Ident) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// A field selection expression. e.g. `request.auth`.
+type Expr_Select struct {
+ // Required. The target of the selection expression.
+ //
+ // For example, in the select expression `request.auth`, the `request`
+ // portion of the expression is the `operand`.
+ Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+ // Required. The name of the field to select.
+ //
+ // For example, in the select expression `request.auth`, the `auth` portion
+ // of the expression would be the `field`.
+ Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+ // Whether the select is to be interpreted as a field presence test.
+ //
+ // This results from the macro `has(request.auth)`.
+ TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Expr_Select) Reset() { *m = Expr_Select{} }
+func (m *Expr_Select) String() string { return proto.CompactTextString(m) }
+func (*Expr_Select) ProtoMessage() {}
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{1, 1}
+}
+
+func (m *Expr_Select) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Expr_Select.Unmarshal(m, b)
+}
+func (m *Expr_Select) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Expr_Select.Marshal(b, m, deterministic)
+}
+func (m *Expr_Select) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Expr_Select.Merge(m, src)
+}
+func (m *Expr_Select) XXX_Size() int {
+ return xxx_messageInfo_Expr_Select.Size(m)
+}
+func (m *Expr_Select) XXX_DiscardUnknown() {
+ xxx_messageInfo_Expr_Select.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Expr_Select proto.InternalMessageInfo
+
+func (m *Expr_Select) GetOperand() *Expr {
+ if m != nil {
+ return m.Operand
+ }
+ return nil
+}
+
+func (m *Expr_Select) GetField() string {
+ if m != nil {
+ return m.Field
+ }
+ return ""
+}
+
+func (m *Expr_Select) GetTestOnly() bool {
+ if m != nil {
+ return m.TestOnly
+ }
+ return false
+}
+
+// A call expression, including calls to predefined functions and operators.
+//
+// For example, `value == 10`, `size(map_value)`.
+type Expr_Call struct {
+ // The target of an method call-style expression. For example, `x` in
+ // `x.f()`.
+ Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ // Required. The name of the function or method being called.
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ // The arguments.
+ Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Expr_Call) Reset() { *m = Expr_Call{} }
+func (m *Expr_Call) String() string { return proto.CompactTextString(m) }
+func (*Expr_Call) ProtoMessage() {}
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{1, 2}
+}
+
+func (m *Expr_Call) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Expr_Call.Unmarshal(m, b)
+}
+func (m *Expr_Call) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Expr_Call.Marshal(b, m, deterministic)
+}
+func (m *Expr_Call) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Expr_Call.Merge(m, src)
+}
+func (m *Expr_Call) XXX_Size() int {
+ return xxx_messageInfo_Expr_Call.Size(m)
+}
+func (m *Expr_Call) XXX_DiscardUnknown() {
+ xxx_messageInfo_Expr_Call.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Expr_Call proto.InternalMessageInfo
+
+func (m *Expr_Call) GetTarget() *Expr {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *Expr_Call) GetFunction() string {
+ if m != nil {
+ return m.Function
+ }
+ return ""
+}
+
+func (m *Expr_Call) GetArgs() []*Expr {
+ if m != nil {
+ return m.Args
+ }
+ return nil
+}
+
+// A list creation expression.
+//
+// Lists may either be homogenous, e.g. `[1, 2, 3]`, or heterogenous, e.g.
+// `dyn([1, 'hello', 2.0])`
+type Expr_CreateList struct {
+ // The elements part of the list.
+ Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Expr_CreateList) Reset() { *m = Expr_CreateList{} }
+func (m *Expr_CreateList) String() string { return proto.CompactTextString(m) }
+func (*Expr_CreateList) ProtoMessage() {}
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{1, 3}
+}
+
+func (m *Expr_CreateList) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Expr_CreateList.Unmarshal(m, b)
+}
+func (m *Expr_CreateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Expr_CreateList.Marshal(b, m, deterministic)
+}
+func (m *Expr_CreateList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Expr_CreateList.Merge(m, src)
+}
+func (m *Expr_CreateList) XXX_Size() int {
+ return xxx_messageInfo_Expr_CreateList.Size(m)
+}
+func (m *Expr_CreateList) XXX_DiscardUnknown() {
+ xxx_messageInfo_Expr_CreateList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Expr_CreateList proto.InternalMessageInfo
+
+func (m *Expr_CreateList) GetElements() []*Expr {
+ if m != nil {
+ return m.Elements
+ }
+ return nil
+}
+
+// A map or message creation expression.
+//
+// Maps are constructed as `{'key_name': 'value'}`. Message construction is
+// similar, but prefixed with a type name and composed of field ids:
+// `types.MyType{field_id: 'value'}`.
+type Expr_CreateStruct struct {
+ // The type name of the message to be created, empty when creating map
+ // literals.
+ MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+ // The entries in the creation expression.
+ Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Expr_CreateStruct) Reset() { *m = Expr_CreateStruct{} }
+func (m *Expr_CreateStruct) String() string { return proto.CompactTextString(m) }
+func (*Expr_CreateStruct) ProtoMessage() {}
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{1, 4}
+}
+
+func (m *Expr_CreateStruct) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Expr_CreateStruct.Unmarshal(m, b)
+}
+func (m *Expr_CreateStruct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Expr_CreateStruct.Marshal(b, m, deterministic)
+}
+func (m *Expr_CreateStruct) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Expr_CreateStruct.Merge(m, src)
+}
+func (m *Expr_CreateStruct) XXX_Size() int {
+ return xxx_messageInfo_Expr_CreateStruct.Size(m)
+}
+func (m *Expr_CreateStruct) XXX_DiscardUnknown() {
+ xxx_messageInfo_Expr_CreateStruct.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Expr_CreateStruct proto.InternalMessageInfo
+
+func (m *Expr_CreateStruct) GetMessageName() string {
+ if m != nil {
+ return m.MessageName
+ }
+ return ""
+}
+
+func (m *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
+
+// Represents an entry.
+type Expr_CreateStruct_Entry struct {
+ // Required. An id assigned to this node by the parser which is unique
+ // in a given expression tree. This is used to associate type
+ // information and other attributes to the node.
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // The `Entry` key kinds.
+ //
+ // Types that are valid to be assigned to KeyKind:
+ // *Expr_CreateStruct_Entry_FieldKey
+ // *Expr_CreateStruct_Entry_MapKey
+ KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+ // Required. The value assigned to the key.
+ Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Expr_CreateStruct_Entry) Reset() { *m = Expr_CreateStruct_Entry{} }
+func (m *Expr_CreateStruct_Entry) String() string { return proto.CompactTextString(m) }
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{1, 4, 0}
+}
+
+func (m *Expr_CreateStruct_Entry) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Expr_CreateStruct_Entry.Unmarshal(m, b)
+}
+func (m *Expr_CreateStruct_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Expr_CreateStruct_Entry.Marshal(b, m, deterministic)
+}
+func (m *Expr_CreateStruct_Entry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Expr_CreateStruct_Entry.Merge(m, src)
+}
+func (m *Expr_CreateStruct_Entry) XXX_Size() int {
+ return xxx_messageInfo_Expr_CreateStruct_Entry.Size(m)
+}
+func (m *Expr_CreateStruct_Entry) XXX_DiscardUnknown() {
+ xxx_messageInfo_Expr_CreateStruct_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Expr_CreateStruct_Entry proto.InternalMessageInfo
+
+func (m *Expr_CreateStruct_Entry) GetId() int64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+ isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+ FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+ MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+ if m != nil {
+ return m.KeyKind
+ }
+ return nil
+}
+
+func (m *Expr_CreateStruct_Entry) GetFieldKey() string {
+ if x, ok := m.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+ return x.FieldKey
+ }
+ return ""
+}
+
+func (m *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+ if x, ok := m.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+ return x.MapKey
+ }
+ return nil
+}
+
+func (m *Expr_CreateStruct_Entry) GetValue() *Expr {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Expr_CreateStruct_Entry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Expr_CreateStruct_Entry_OneofMarshaler, _Expr_CreateStruct_Entry_OneofUnmarshaler, _Expr_CreateStruct_Entry_OneofSizer, []interface{}{
+ (*Expr_CreateStruct_Entry_FieldKey)(nil),
+ (*Expr_CreateStruct_Entry_MapKey)(nil),
+ }
+}
+
+func _Expr_CreateStruct_Entry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Expr_CreateStruct_Entry)
+ // key_kind
+ switch x := m.KeyKind.(type) {
+ case *Expr_CreateStruct_Entry_FieldKey:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.FieldKey)
+ case *Expr_CreateStruct_Entry_MapKey:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.MapKey); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Expr_CreateStruct_Entry.KeyKind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Expr_CreateStruct_Entry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Expr_CreateStruct_Entry)
+ switch tag {
+ case 2: // key_kind.field_key
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.KeyKind = &Expr_CreateStruct_Entry_FieldKey{x}
+ return true, err
+ case 3: // key_kind.map_key
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Expr)
+ err := b.DecodeMessage(msg)
+ m.KeyKind = &Expr_CreateStruct_Entry_MapKey{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Expr_CreateStruct_Entry_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Expr_CreateStruct_Entry)
+ // key_kind
+ switch x := m.KeyKind.(type) {
+ case *Expr_CreateStruct_Entry_FieldKey:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.FieldKey)))
+ n += len(x.FieldKey)
+ case *Expr_CreateStruct_Entry_MapKey:
+ s := proto.Size(x.MapKey)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// A comprehension expression applied to a list or map.
+//
+// Comprehensions are not part of the core syntax, but enabled with macros.
+// A macro matches a specific call signature within a parsed AST and replaces
+// the call with an alternate AST block. Macro expansion happens at parse
+// time.
+//
+// The following macros are supported within CEL:
+//
+// Aggregate type macros may be applied to all elements in a list or all keys
+// in a map:
+//
+// * `all`, `exists`, `exists_one` - test a predicate expression against
+// the inputs and return `true` if the predicate is satisfied for all,
+// any, or only one value `list.all(x, x < 10)`.
+// * `filter` - test a predicate expression against the inputs and return
+// the subset of elements which satisfy the predicate:
+// `payments.filter(p, p > 1000)`.
+// * `map` - apply an expression to all elements in the input and return the
+// output aggregate type: `[1, 2, 3].map(i, i * i)`.
+//
+// The `has(m.x)` macro tests whether the property `x` is present in struct
+// `m`. The semantics of this macro depend on the type of `m`. For proto2
+// messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the
+// macro tests whether the property is set to its default. For map and struct
+// types, the macro tests whether the property `x` is defined on `m`.
+type Expr_Comprehension struct {
+ // The name of the iteration variable.
+ IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+ // The range over which var iterates.
+ IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+ // The name of the variable used for accumulation of the result.
+ AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+ // The initial value of the accumulator.
+ AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+ // An expression which can contain iter_var and accu_var.
+ //
+ // Returns false when the result has been computed and may be used as
+ // a hint to short-circuit the remainder of the comprehension.
+ LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+ // An expression which can contain iter_var and accu_var.
+ //
+ // Computes the next value of accu_var.
+ LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+ // An expression which can contain accu_var.
+ //
+ // Computes the result.
+ Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Expr_Comprehension) Reset() { *m = Expr_Comprehension{} }
+func (m *Expr_Comprehension) String() string { return proto.CompactTextString(m) }
+func (*Expr_Comprehension) ProtoMessage() {}
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{1, 5}
+}
+
+func (m *Expr_Comprehension) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Expr_Comprehension.Unmarshal(m, b)
+}
+func (m *Expr_Comprehension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Expr_Comprehension.Marshal(b, m, deterministic)
+}
+func (m *Expr_Comprehension) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Expr_Comprehension.Merge(m, src)
+}
+func (m *Expr_Comprehension) XXX_Size() int {
+ return xxx_messageInfo_Expr_Comprehension.Size(m)
+}
+func (m *Expr_Comprehension) XXX_DiscardUnknown() {
+ xxx_messageInfo_Expr_Comprehension.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Expr_Comprehension proto.InternalMessageInfo
+
+func (m *Expr_Comprehension) GetIterVar() string {
+ if m != nil {
+ return m.IterVar
+ }
+ return ""
+}
+
+func (m *Expr_Comprehension) GetIterRange() *Expr {
+ if m != nil {
+ return m.IterRange
+ }
+ return nil
+}
+
+func (m *Expr_Comprehension) GetAccuVar() string {
+ if m != nil {
+ return m.AccuVar
+ }
+ return ""
+}
+
+func (m *Expr_Comprehension) GetAccuInit() *Expr {
+ if m != nil {
+ return m.AccuInit
+ }
+ return nil
+}
+
+func (m *Expr_Comprehension) GetLoopCondition() *Expr {
+ if m != nil {
+ return m.LoopCondition
+ }
+ return nil
+}
+
+func (m *Expr_Comprehension) GetLoopStep() *Expr {
+ if m != nil {
+ return m.LoopStep
+ }
+ return nil
+}
+
+func (m *Expr_Comprehension) GetResult() *Expr {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+// Represents a primitive literal.
+//
+// Named 'Constant' here for backwards compatibility.
+//
+// This is similar as the primitives supported in the well-known type
+// `google.protobuf.Value`, but richer so it can represent CEL's full range of
+// primitives.
+//
+// Lists and structs are not included as constants as these aggregate types may
+// contain [Expr][google.api.expr.v1alpha1.Expr] elements which require
+// evaluation and are thus not constant.
+//
+// Examples of literals include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`,
+// `true`, `null`.
+type Constant struct {
+ // Required. The valid constant kinds.
+ //
+ // Types that are valid to be assigned to ConstantKind:
+ // *Constant_NullValue
+ // *Constant_BoolValue
+ // *Constant_Int64Value
+ // *Constant_Uint64Value
+ // *Constant_DoubleValue
+ // *Constant_StringValue
+ // *Constant_BytesValue
+ // *Constant_DurationValue
+ // *Constant_TimestampValue
+ ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Constant) Reset() { *m = Constant{} }
+func (m *Constant) String() string { return proto.CompactTextString(m) }
+func (*Constant) ProtoMessage() {}
+func (*Constant) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{2}
+}
+
+func (m *Constant) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Constant.Unmarshal(m, b)
+}
+func (m *Constant) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Constant.Marshal(b, m, deterministic)
+}
+func (m *Constant) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Constant.Merge(m, src)
+}
+func (m *Constant) XXX_Size() int {
+ return xxx_messageInfo_Constant.Size(m)
+}
+func (m *Constant) XXX_DiscardUnknown() {
+ xxx_messageInfo_Constant.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Constant proto.InternalMessageInfo
+
+type isConstant_ConstantKind interface {
+ isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+ NullValue _struct.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+ DurationValue *duration.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+ TimestampValue *timestamp.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+ if m != nil {
+ return m.ConstantKind
+ }
+ return nil
+}
+
+func (m *Constant) GetNullValue() _struct.NullValue {
+ if x, ok := m.GetConstantKind().(*Constant_NullValue); ok {
+ return x.NullValue
+ }
+ return _struct.NullValue_NULL_VALUE
+}
+
+func (m *Constant) GetBoolValue() bool {
+ if x, ok := m.GetConstantKind().(*Constant_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *Constant) GetInt64Value() int64 {
+ if x, ok := m.GetConstantKind().(*Constant_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (m *Constant) GetUint64Value() uint64 {
+ if x, ok := m.GetConstantKind().(*Constant_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (m *Constant) GetDoubleValue() float64 {
+ if x, ok := m.GetConstantKind().(*Constant_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (m *Constant) GetStringValue() string {
+ if x, ok := m.GetConstantKind().(*Constant_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (m *Constant) GetBytesValue() []byte {
+ if x, ok := m.GetConstantKind().(*Constant_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (m *Constant) GetDurationValue() *duration.Duration {
+ if x, ok := m.GetConstantKind().(*Constant_DurationValue); ok {
+ return x.DurationValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (m *Constant) GetTimestampValue() *timestamp.Timestamp {
+ if x, ok := m.GetConstantKind().(*Constant_TimestampValue); ok {
+ return x.TimestampValue
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Constant) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Constant_OneofMarshaler, _Constant_OneofUnmarshaler, _Constant_OneofSizer, []interface{}{
+ (*Constant_NullValue)(nil),
+ (*Constant_BoolValue)(nil),
+ (*Constant_Int64Value)(nil),
+ (*Constant_Uint64Value)(nil),
+ (*Constant_DoubleValue)(nil),
+ (*Constant_StringValue)(nil),
+ (*Constant_BytesValue)(nil),
+ (*Constant_DurationValue)(nil),
+ (*Constant_TimestampValue)(nil),
+ }
+}
+
+func _Constant_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Constant)
+ // constant_kind
+ switch x := m.ConstantKind.(type) {
+ case *Constant_NullValue:
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.NullValue))
+ case *Constant_BoolValue:
+ t := uint64(0)
+ if x.BoolValue {
+ t = 1
+ }
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Constant_Int64Value:
+ b.EncodeVarint(3<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Int64Value))
+ case *Constant_Uint64Value:
+ b.EncodeVarint(4<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Uint64Value))
+ case *Constant_DoubleValue:
+ b.EncodeVarint(5<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.DoubleValue))
+ case *Constant_StringValue:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.StringValue)
+ case *Constant_BytesValue:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.BytesValue)
+ case *Constant_DurationValue:
+ b.EncodeVarint(8<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.DurationValue); err != nil {
+ return err
+ }
+ case *Constant_TimestampValue:
+ b.EncodeVarint(9<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.TimestampValue); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Constant.ConstantKind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Constant_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Constant)
+ switch tag {
+ case 1: // constant_kind.null_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.ConstantKind = &Constant_NullValue{_struct.NullValue(x)}
+ return true, err
+ case 2: // constant_kind.bool_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.ConstantKind = &Constant_BoolValue{x != 0}
+ return true, err
+ case 3: // constant_kind.int64_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.ConstantKind = &Constant_Int64Value{int64(x)}
+ return true, err
+ case 4: // constant_kind.uint64_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.ConstantKind = &Constant_Uint64Value{x}
+ return true, err
+ case 5: // constant_kind.double_value
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.ConstantKind = &Constant_DoubleValue{math.Float64frombits(x)}
+ return true, err
+ case 6: // constant_kind.string_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.ConstantKind = &Constant_StringValue{x}
+ return true, err
+ case 7: // constant_kind.bytes_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.ConstantKind = &Constant_BytesValue{x}
+ return true, err
+ case 8: // constant_kind.duration_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(duration.Duration)
+ err := b.DecodeMessage(msg)
+ m.ConstantKind = &Constant_DurationValue{msg}
+ return true, err
+ case 9: // constant_kind.timestamp_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(timestamp.Timestamp)
+ err := b.DecodeMessage(msg)
+ m.ConstantKind = &Constant_TimestampValue{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Constant_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Constant)
+ // constant_kind
+ switch x := m.ConstantKind.(type) {
+ case *Constant_NullValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.NullValue))
+ case *Constant_BoolValue:
+ n += 1 // tag and wire
+ n += 1
+ case *Constant_Int64Value:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Int64Value))
+ case *Constant_Uint64Value:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Uint64Value))
+ case *Constant_DoubleValue:
+ n += 1 // tag and wire
+ n += 8
+ case *Constant_StringValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.StringValue)))
+ n += len(x.StringValue)
+ case *Constant_BytesValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.BytesValue)))
+ n += len(x.BytesValue)
+ case *Constant_DurationValue:
+ s := proto.Size(x.DurationValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Constant_TimestampValue:
+ s := proto.Size(x.TimestampValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Source information collected at parse time.
+type SourceInfo struct {
+ // The syntax version of the source, e.g. `cel1`.
+ SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ // The location name. All position information attached to an expression is
+ // relative to this location.
+ //
+ // The location could be a file, UI element, or similar. For example,
+ // `acme/app/AnvilPolicy.cel`.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // Monotonically increasing list of character offsets where newlines appear.
+ //
+ // The line number of a given position is the index `i` where for a given
+ // `id` the `line_offsets[i] < id_positions[id] < line_offsets[i+1]`. The
+ // column may be derivd from `id_positions[id] - line_offsets[i]`.
+ LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+ // A map from the parse node id (e.g. `Expr.id`) to the character offset
+ // within source.
+ Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SourceInfo) Reset() { *m = SourceInfo{} }
+func (m *SourceInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceInfo) ProtoMessage() {}
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{3}
+}
+
+func (m *SourceInfo) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SourceInfo.Unmarshal(m, b)
+}
+func (m *SourceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SourceInfo.Marshal(b, m, deterministic)
+}
+func (m *SourceInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SourceInfo.Merge(m, src)
+}
+func (m *SourceInfo) XXX_Size() int {
+ return xxx_messageInfo_SourceInfo.Size(m)
+}
+func (m *SourceInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_SourceInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceInfo proto.InternalMessageInfo
+
+func (m *SourceInfo) GetSyntaxVersion() string {
+ if m != nil {
+ return m.SyntaxVersion
+ }
+ return ""
+}
+
+func (m *SourceInfo) GetLocation() string {
+ if m != nil {
+ return m.Location
+ }
+ return ""
+}
+
+func (m *SourceInfo) GetLineOffsets() []int32 {
+ if m != nil {
+ return m.LineOffsets
+ }
+ return nil
+}
+
+func (m *SourceInfo) GetPositions() map[int64]int32 {
+ if m != nil {
+ return m.Positions
+ }
+ return nil
+}
+
+// A specific position in source.
+type SourcePosition struct {
+ // The soucre location name (e.g. file name).
+ Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
+ // The character offset.
+ Offset int32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+ // The 1-based index of the starting line in the source text
+ // where the issue occurs, or 0 if unknown.
+ Line int32 `protobuf:"varint,3,opt,name=line,proto3" json:"line,omitempty"`
+ // The 0-based index of the starting position within the line of source text
+ // where the issue occurs. Only meaningful if line is nonzero.
+ Column int32 `protobuf:"varint,4,opt,name=column,proto3" json:"column,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SourcePosition) Reset() { *m = SourcePosition{} }
+func (m *SourcePosition) String() string { return proto.CompactTextString(m) }
+func (*SourcePosition) ProtoMessage() {}
+func (*SourcePosition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d4e2be48009c83cb, []int{4}
+}
+
+func (m *SourcePosition) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SourcePosition.Unmarshal(m, b)
+}
+func (m *SourcePosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SourcePosition.Marshal(b, m, deterministic)
+}
+func (m *SourcePosition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SourcePosition.Merge(m, src)
+}
+func (m *SourcePosition) XXX_Size() int {
+ return xxx_messageInfo_SourcePosition.Size(m)
+}
+func (m *SourcePosition) XXX_DiscardUnknown() {
+ xxx_messageInfo_SourcePosition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourcePosition proto.InternalMessageInfo
+
+func (m *SourcePosition) GetLocation() string {
+ if m != nil {
+ return m.Location
+ }
+ return ""
+}
+
+func (m *SourcePosition) GetOffset() int32 {
+ if m != nil {
+ return m.Offset
+ }
+ return 0
+}
+
+func (m *SourcePosition) GetLine() int32 {
+ if m != nil {
+ return m.Line
+ }
+ return 0
+}
+
+func (m *SourcePosition) GetColumn() int32 {
+ if m != nil {
+ return m.Column
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*ParsedExpr)(nil), "google.api.expr.v1alpha1.ParsedExpr")
+ proto.RegisterType((*Expr)(nil), "google.api.expr.v1alpha1.Expr")
+ proto.RegisterType((*Expr_Ident)(nil), "google.api.expr.v1alpha1.Expr.Ident")
+ proto.RegisterType((*Expr_Select)(nil), "google.api.expr.v1alpha1.Expr.Select")
+ proto.RegisterType((*Expr_Call)(nil), "google.api.expr.v1alpha1.Expr.Call")
+ proto.RegisterType((*Expr_CreateList)(nil), "google.api.expr.v1alpha1.Expr.CreateList")
+ proto.RegisterType((*Expr_CreateStruct)(nil), "google.api.expr.v1alpha1.Expr.CreateStruct")
+ proto.RegisterType((*Expr_CreateStruct_Entry)(nil), "google.api.expr.v1alpha1.Expr.CreateStruct.Entry")
+ proto.RegisterType((*Expr_Comprehension)(nil), "google.api.expr.v1alpha1.Expr.Comprehension")
+ proto.RegisterType((*Constant)(nil), "google.api.expr.v1alpha1.Constant")
+ proto.RegisterType((*SourceInfo)(nil), "google.api.expr.v1alpha1.SourceInfo")
+ proto.RegisterMapType((map[int64]int32)(nil), "google.api.expr.v1alpha1.SourceInfo.PositionsEntry")
+ proto.RegisterType((*SourcePosition)(nil), "google.api.expr.v1alpha1.SourcePosition")
+}
+
+func init() {
+ proto.RegisterFile("google/api/expr/v1alpha1/syntax.proto", fileDescriptor_d4e2be48009c83cb)
+}
+
+var fileDescriptor_d4e2be48009c83cb = []byte{
+ // 1134 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcf, 0x6e, 0x1b, 0xb7,
+ 0x13, 0xd6, 0xea, 0x9f, 0xb5, 0x23, 0x5b, 0xf9, 0xfd, 0x88, 0xa2, 0x50, 0x36, 0x69, 0xe2, 0x38,
+ 0x35, 0x90, 0xa2, 0x85, 0x04, 0x3b, 0x41, 0x90, 0xc6, 0xe9, 0x45, 0xae, 0x0b, 0x19, 0x29, 0x1c,
+ 0x77, 0x5d, 0xf8, 0x50, 0xa0, 0x10, 0xe8, 0x15, 0xa5, 0x2c, 0x4c, 0x91, 0x8b, 0x25, 0xd7, 0xb0,
+ 0xce, 0x3d, 0xf4, 0xd6, 0x97, 0x69, 0x5f, 0xa0, 0xef, 0xd1, 0x07, 0xe9, 0xa5, 0x40, 0x31, 0x43,
+ 0xae, 0xfc, 0x0f, 0x86, 0xd4, 0x1b, 0x39, 0xfc, 0xbe, 0x8f, 0xc3, 0x99, 0xe1, 0x90, 0xb0, 0x3d,
+ 0xd5, 0x7a, 0x2a, 0x45, 0x9f, 0x67, 0x69, 0x5f, 0x5c, 0x66, 0x79, 0xff, 0x62, 0x87, 0xcb, 0xec,
+ 0x23, 0xdf, 0xe9, 0x9b, 0xb9, 0xb2, 0xfc, 0xb2, 0x97, 0xe5, 0xda, 0x6a, 0xd6, 0x75, 0xb0, 0x1e,
+ 0xcf, 0xd2, 0x1e, 0xc2, 0x7a, 0x25, 0x2c, 0x7a, 0xe2, 0x05, 0x08, 0x77, 0x56, 0x4c, 0xfa, 0xe3,
+ 0x22, 0xe7, 0x36, 0xd5, 0xca, 0x31, 0xa3, 0xc7, 0xb7, 0xd7, 0x8d, 0xcd, 0x8b, 0xc4, 0xfa, 0xd5,
+ 0xa7, 0xb7, 0x57, 0x6d, 0x3a, 0x13, 0xc6, 0xf2, 0x59, 0xe6, 0x00, 0x5b, 0xbf, 0x06, 0x00, 0xc7,
+ 0x3c, 0x37, 0x62, 0x7c, 0x70, 0x99, 0xe5, 0x6c, 0x17, 0xea, 0xb8, 0x7d, 0xb7, 0xba, 0x19, 0xbc,
+ 0x68, 0xef, 0x3e, 0xe9, 0xdd, 0xe7, 0x56, 0x0f, 0xd1, 0x31, 0x61, 0xd9, 0x01, 0xb4, 0x8d, 0x2e,
+ 0xf2, 0x44, 0x8c, 0x52, 0x35, 0xd1, 0xdd, 0x1a, 0x51, 0x3f, 0xbf, 0x9f, 0x7a, 0x42, 0xe0, 0x43,
+ 0x35, 0xd1, 0x31, 0x98, 0xc5, 0x78, 0xeb, 0xaf, 0x75, 0xa8, 0x93, 0x0f, 0x1d, 0xa8, 0xa6, 0x63,
+ 0xf2, 0xa0, 0x16, 0x57, 0xd3, 0x31, 0xdb, 0x07, 0x48, 0xb4, 0x32, 0x76, 0x44, 0x9e, 0x39, 0xf9,
+ 0xad, 0xfb, 0xe5, 0xf7, 0x11, 0xcb, 0x95, 0x1d, 0x56, 0xe2, 0x90, 0x78, 0x07, 0xce, 0x49, 0x48,
+ 0xc7, 0x42, 0x79, 0x91, 0xfa, 0x32, 0x1f, 0x91, 0xd3, 0x3b, 0x44, 0x02, 0xca, 0x10, 0x93, 0x64,
+ 0x86, 0xd0, 0x36, 0x42, 0x8a, 0xc4, 0xeb, 0x34, 0x48, 0x67, 0x7b, 0x89, 0xce, 0x09, 0x31, 0x86,
+ 0x95, 0x18, 0x1c, 0x97, 0x94, 0x06, 0x10, 0x26, 0x5c, 0x4a, 0xa7, 0xd3, 0x24, 0x9d, 0xe7, 0x4b,
+ 0x74, 0xf6, 0xb9, 0x94, 0xc3, 0x4a, 0xdc, 0x42, 0x9e, 0xf7, 0x26, 0x94, 0x69, 0x19, 0x98, 0x35,
+ 0xd2, 0xf8, 0x62, 0x99, 0x46, 0x2e, 0xb8, 0x15, 0xdf, 0xa7, 0x06, 0xfd, 0x69, 0x21, 0x9b, 0x94,
+ 0x8e, 0xa0, 0xed, 0xea, 0xc6, 0x69, 0xb5, 0x48, 0xeb, 0xcb, 0x95, 0xb4, 0x4e, 0x88, 0x47, 0xa7,
+ 0xa3, 0x11, 0xe9, 0xfd, 0x0c, 0x2c, 0xd1, 0xb3, 0x2c, 0x17, 0x1f, 0x85, 0x32, 0xa9, 0x56, 0x4e,
+ 0x36, 0x24, 0xd9, 0xaf, 0x96, 0xc9, 0x5e, 0x27, 0x0e, 0x2b, 0xf1, 0xff, 0x6f, 0x28, 0x21, 0x24,
+ 0x7a, 0x04, 0x0d, 0x4a, 0x0e, 0x63, 0x50, 0x57, 0x7c, 0x26, 0xba, 0xc1, 0x66, 0xf0, 0x22, 0x8c,
+ 0x69, 0x1c, 0x15, 0xd0, 0x74, 0x11, 0x67, 0x6f, 0x60, 0x4d, 0x67, 0x22, 0xe7, 0x6a, 0x4c, 0x80,
+ 0xe5, 0x05, 0x5d, 0xc2, 0xd9, 0x27, 0xd0, 0x98, 0xa4, 0x42, 0xba, 0x32, 0x0c, 0x63, 0x37, 0x61,
+ 0x8f, 0x20, 0xb4, 0xc2, 0xd8, 0x91, 0x56, 0x72, 0x4e, 0x85, 0xd8, 0x8a, 0x5b, 0x68, 0xf8, 0xa0,
+ 0xe4, 0x3c, 0xfa, 0x2d, 0x80, 0x3a, 0x66, 0x88, 0xbd, 0x86, 0xa6, 0xe5, 0xf9, 0x54, 0xd8, 0x15,
+ 0x37, 0xf5, 0x68, 0x16, 0x41, 0x6b, 0x52, 0xa8, 0x04, 0xef, 0xb6, 0xdf, 0x76, 0x31, 0xc7, 0x7b,
+ 0xc9, 0xf3, 0xa9, 0xe9, 0xd6, 0x36, 0x6b, 0xab, 0xdc, 0x4b, 0xc4, 0x46, 0x43, 0x80, 0xab, 0x6c,
+ 0xb3, 0xb7, 0xd0, 0x12, 0x52, 0xcc, 0x84, 0xb2, 0xa6, 0x1b, 0xac, 0xa4, 0xb2, 0xc0, 0x47, 0x7f,
+ 0x54, 0x61, 0xfd, 0x7a, 0xb2, 0xd9, 0x33, 0x58, 0x9f, 0x09, 0x63, 0xf8, 0x54, 0x8c, 0xae, 0x85,
+ 0xbf, 0xed, 0x6d, 0x47, 0x7c, 0x26, 0xd8, 0x7b, 0x58, 0x13, 0xca, 0xe6, 0xa9, 0x30, 0xdd, 0x2a,
+ 0x6d, 0xb7, 0xf3, 0x1f, 0xaa, 0xa9, 0x77, 0xa0, 0x6c, 0x3e, 0x8f, 0x4b, 0x85, 0xe8, 0xf7, 0x00,
+ 0x1a, 0x64, 0xf2, 0xcd, 0x21, 0x58, 0x34, 0x87, 0xcf, 0x20, 0xa4, 0xdc, 0x8c, 0xce, 0xc5, 0xdc,
+ 0x45, 0x0d, 0xeb, 0x9a, 0x4c, 0xef, 0xc5, 0x9c, 0x7d, 0x0d, 0x6b, 0x33, 0x9e, 0xd1, 0x62, 0x6d,
+ 0x95, 0x64, 0x0c, 0x2b, 0x71, 0x73, 0xc6, 0x33, 0xa4, 0xbe, 0x82, 0xc6, 0x05, 0x97, 0x85, 0xf0,
+ 0xcd, 0x62, 0x59, 0xb4, 0x1c, 0x78, 0x00, 0xd0, 0x3a, 0x17, 0xf3, 0xd1, 0x79, 0xaa, 0xc6, 0xd1,
+ 0x3f, 0x55, 0xd8, 0xb8, 0x51, 0xcc, 0xec, 0x21, 0xb4, 0x52, 0x2b, 0xf2, 0xd1, 0x05, 0xcf, 0x7d,
+ 0xcc, 0xd6, 0x70, 0x7e, 0xca, 0x73, 0xf6, 0x0d, 0x00, 0x2d, 0xe5, 0x5c, 0x4d, 0xc5, 0x8a, 0xfd,
+ 0x37, 0x44, 0x46, 0x8c, 0x04, 0x54, 0xe6, 0x49, 0x52, 0x90, 0x72, 0xcd, 0x29, 0xe3, 0x1c, 0x95,
+ 0xf7, 0x20, 0xa4, 0xa5, 0x54, 0xa5, 0x76, 0xc5, 0xc3, 0x90, 0xd6, 0xa1, 0x4a, 0x2d, 0x3b, 0x80,
+ 0x8e, 0xd4, 0x3a, 0x1b, 0x25, 0x5a, 0x8d, 0x53, 0x2a, 0xcd, 0xc6, 0x4a, 0x0a, 0x1b, 0xc8, 0xda,
+ 0x2f, 0x49, 0xe8, 0x03, 0xc9, 0x18, 0x2b, 0x32, 0xdf, 0xed, 0x96, 0xfa, 0x80, 0x84, 0x13, 0x2b,
+ 0x32, 0xbc, 0x50, 0xb9, 0x30, 0x85, 0xb4, 0xbe, 0xc7, 0x2d, 0xbd, 0x50, 0x0e, 0x3d, 0x68, 0x43,
+ 0x88, 0xab, 0x94, 0x8c, 0xad, 0x3f, 0x6b, 0xd0, 0x2a, 0x9f, 0x06, 0xb6, 0x07, 0xa0, 0x0a, 0x29,
+ 0x47, 0x2e, 0xc1, 0x98, 0x89, 0xce, 0x6e, 0x54, 0xaa, 0x96, 0x6f, 0x65, 0xef, 0xa8, 0x90, 0xf2,
+ 0x14, 0x11, 0xf8, 0x06, 0xa8, 0x72, 0xc2, 0x9e, 0x02, 0x9c, 0x69, 0x5d, 0x92, 0x31, 0x53, 0x2d,
+ 0x04, 0xa0, 0xcd, 0x01, 0x9e, 0x41, 0x3b, 0x55, 0xf6, 0xf5, 0x2b, 0x8f, 0xc0, 0x74, 0xd4, 0xb0,
+ 0x3f, 0x92, 0xd1, 0x41, 0x9e, 0xc3, 0x7a, 0x71, 0x1d, 0x83, 0x69, 0xa9, 0x0f, 0x2b, 0x71, 0xbb,
+ 0xb8, 0x09, 0x1a, 0xeb, 0xe2, 0x4c, 0x0a, 0x0f, 0xc2, 0xc8, 0x07, 0x08, 0x72, 0xd6, 0x05, 0xc8,
+ 0xd8, 0x3c, 0x55, 0x53, 0x0f, 0x6a, 0xfa, 0x3b, 0xd0, 0x76, 0xd6, 0x85, 0x47, 0x67, 0x73, 0x2b,
+ 0x8c, 0xc7, 0x60, 0x18, 0xd7, 0xd1, 0x23, 0x32, 0x3a, 0xc8, 0x77, 0xd0, 0x29, 0x7f, 0x16, 0x1e,
+ 0xe5, 0x1e, 0x81, 0x87, 0x77, 0xc2, 0xf2, 0xad, 0x87, 0x0d, 0xaa, 0x5d, 0xf4, 0x66, 0xa3, 0xa4,
+ 0x39, 0x9d, 0x43, 0x78, 0xb0, 0xf8, 0x63, 0x78, 0x21, 0xd7, 0xf6, 0xef, 0xc6, 0xf7, 0xc7, 0x12,
+ 0xe7, 0x95, 0x3a, 0x0b, 0x22, 0x49, 0x0d, 0x1e, 0xc0, 0x46, 0xe2, 0x33, 0xe6, 0x72, 0xf8, 0x4b,
+ 0x15, 0xe0, 0xea, 0xf7, 0xc0, 0xb6, 0xa1, 0xe3, 0x3e, 0x51, 0xa3, 0x0b, 0x91, 0xe3, 0xfd, 0xf2,
+ 0x77, 0x6a, 0xc3, 0x59, 0x4f, 0x9d, 0x11, 0xfb, 0xaa, 0xd4, 0x09, 0xbf, 0xde, 0x57, 0xcb, 0x39,
+ 0x36, 0x32, 0x99, 0x2a, 0x31, 0xd2, 0x93, 0x89, 0x11, 0xd6, 0xf5, 0xd7, 0x46, 0xdc, 0x46, 0xdb,
+ 0x07, 0x67, 0x62, 0x3f, 0x40, 0x98, 0x69, 0x43, 0x65, 0x6c, 0xba, 0x75, 0x6a, 0x65, 0x2f, 0x57,
+ 0xf9, 0xdc, 0xf4, 0x8e, 0x4b, 0x96, 0x6b, 0x66, 0x57, 0x2a, 0xd1, 0x3b, 0xe8, 0xdc, 0x5c, 0x64,
+ 0xff, 0x83, 0x1a, 0xf6, 0x28, 0xd7, 0xd7, 0x70, 0x88, 0x2f, 0xd0, 0x55, 0x81, 0x35, 0x7c, 0x7b,
+ 0x79, 0x5b, 0x7d, 0x13, 0x6c, 0x65, 0xd0, 0x71, 0xbb, 0x94, 0x1a, 0x37, 0x4e, 0x18, 0xdc, 0x3a,
+ 0xe1, 0xa7, 0xd0, 0x74, 0x87, 0xf3, 0x42, 0x7e, 0x86, 0x2f, 0x27, 0x9e, 0x92, 0xaa, 0xb3, 0x11,
+ 0xd3, 0x18, 0xb1, 0x89, 0x96, 0xc5, 0x4c, 0x51, 0x3d, 0x36, 0x62, 0x3f, 0x1b, 0x28, 0x78, 0x9c,
+ 0xe8, 0xd9, 0xbd, 0x87, 0x1e, 0xb4, 0x4f, 0x28, 0xe0, 0xc7, 0x98, 0xd8, 0xe3, 0xe0, 0xa7, 0x77,
+ 0x1e, 0x38, 0xd5, 0x92, 0xab, 0x69, 0x4f, 0xe7, 0xd3, 0xfe, 0x54, 0x28, 0x4a, 0x7b, 0xdf, 0x2d,
+ 0xf1, 0x2c, 0x35, 0x77, 0xbf, 0xc4, 0x7b, 0x38, 0xfb, 0x3b, 0x08, 0xce, 0x9a, 0x84, 0x7d, 0xf9,
+ 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x25, 0xe3, 0xe8, 0x3d, 0x0b, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
new file mode 100644
index 00000000000..8a95b7d7f13
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
@@ -0,0 +1,715 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/expr/v1alpha1/value.proto
+
+package expr
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+ _struct "github.com/golang/protobuf/ptypes/struct"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Represents a CEL value.
+//
+// This is similar to `google.protobuf.Value`, but can represent CEL's full
+// range of values.
+type Value struct {
+ // Required. The valid kinds of values.
+ //
+ // Types that are valid to be assigned to Kind:
+ // *Value_NullValue
+ // *Value_BoolValue
+ // *Value_Int64Value
+ // *Value_Uint64Value
+ // *Value_DoubleValue
+ // *Value_StringValue
+ // *Value_BytesValue
+ // *Value_EnumValue
+ // *Value_ObjectValue
+ // *Value_MapValue
+ // *Value_ListValue
+ // *Value_TypeValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Value) Reset() { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage() {}
+func (*Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_24bee359d1e5798a, []int{0}
+}
+
+func (m *Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Value.Unmarshal(m, b)
+}
+func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Value.Marshal(b, m, deterministic)
+}
+func (m *Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Value.Merge(m, src)
+}
+func (m *Value) XXX_Size() int {
+ return xxx_messageInfo_Value.Size(m)
+}
+func (m *Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Value proto.InternalMessageInfo
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue _struct.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+ EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+ ObjectValue *any.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+ MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+ TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (m *Value) GetNullValue() _struct.NullValue {
+ if x, ok := m.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return _struct.NullValue_NULL_VALUE
+}
+
+func (m *Value) GetBoolValue() bool {
+ if x, ok := m.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *Value) GetInt64Value() int64 {
+ if x, ok := m.GetKind().(*Value_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (m *Value) GetUint64Value() uint64 {
+ if x, ok := m.GetKind().(*Value_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (m *Value) GetDoubleValue() float64 {
+ if x, ok := m.GetKind().(*Value_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (m *Value) GetStringValue() string {
+ if x, ok := m.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (m *Value) GetBytesValue() []byte {
+ if x, ok := m.GetKind().(*Value_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+func (m *Value) GetEnumValue() *EnumValue {
+ if x, ok := m.GetKind().(*Value_EnumValue); ok {
+ return x.EnumValue
+ }
+ return nil
+}
+
+func (m *Value) GetObjectValue() *any.Any {
+ if x, ok := m.GetKind().(*Value_ObjectValue); ok {
+ return x.ObjectValue
+ }
+ return nil
+}
+
+func (m *Value) GetMapValue() *MapValue {
+ if x, ok := m.GetKind().(*Value_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+ if x, ok := m.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+func (m *Value) GetTypeValue() string {
+ if x, ok := m.GetKind().(*Value_TypeValue); ok {
+ return x.TypeValue
+ }
+ return ""
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_Int64Value)(nil),
+ (*Value_Uint64Value)(nil),
+ (*Value_DoubleValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BytesValue)(nil),
+ (*Value_EnumValue)(nil),
+ (*Value_ObjectValue)(nil),
+ (*Value_MapValue)(nil),
+ (*Value_ListValue)(nil),
+ (*Value_TypeValue)(nil),
+ }
+}
+
+func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Value)
+ // kind
+ switch x := m.Kind.(type) {
+ case *Value_NullValue:
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.NullValue))
+ case *Value_BoolValue:
+ t := uint64(0)
+ if x.BoolValue {
+ t = 1
+ }
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Value_Int64Value:
+ b.EncodeVarint(3<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Int64Value))
+ case *Value_Uint64Value:
+ b.EncodeVarint(4<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Uint64Value))
+ case *Value_DoubleValue:
+ b.EncodeVarint(5<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.DoubleValue))
+ case *Value_StringValue:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.StringValue)
+ case *Value_BytesValue:
+ b.EncodeVarint(7<<3 | proto.WireBytes)
+ b.EncodeRawBytes(x.BytesValue)
+ case *Value_EnumValue:
+ b.EncodeVarint(9<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.EnumValue); err != nil {
+ return err
+ }
+ case *Value_ObjectValue:
+ b.EncodeVarint(10<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ObjectValue); err != nil {
+ return err
+ }
+ case *Value_MapValue:
+ b.EncodeVarint(11<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.MapValue); err != nil {
+ return err
+ }
+ case *Value_ListValue:
+ b.EncodeVarint(12<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ListValue); err != nil {
+ return err
+ }
+ case *Value_TypeValue:
+ b.EncodeVarint(15<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.TypeValue)
+ case nil:
+ default:
+ return fmt.Errorf("Value.Kind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Value)
+ switch tag {
+ case 1: // kind.null_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_NullValue{_struct.NullValue(x)}
+ return true, err
+ case 2: // kind.bool_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_BoolValue{x != 0}
+ return true, err
+ case 3: // kind.int64_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_Int64Value{int64(x)}
+ return true, err
+ case 4: // kind.uint64_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_Uint64Value{x}
+ return true, err
+ case 5: // kind.double_value
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Kind = &Value_DoubleValue{math.Float64frombits(x)}
+ return true, err
+ case 6: // kind.string_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Kind = &Value_StringValue{x}
+ return true, err
+ case 7: // kind.bytes_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.Kind = &Value_BytesValue{x}
+ return true, err
+ case 9: // kind.enum_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(EnumValue)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_EnumValue{msg}
+ return true, err
+ case 10: // kind.object_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(any.Any)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_ObjectValue{msg}
+ return true, err
+ case 11: // kind.map_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(MapValue)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_MapValue{msg}
+ return true, err
+ case 12: // kind.list_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ListValue)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_ListValue{msg}
+ return true, err
+ case 15: // kind.type_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Kind = &Value_TypeValue{x}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Value_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Value)
+ // kind
+ switch x := m.Kind.(type) {
+ case *Value_NullValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.NullValue))
+ case *Value_BoolValue:
+ n += 1 // tag and wire
+ n += 1
+ case *Value_Int64Value:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Int64Value))
+ case *Value_Uint64Value:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Uint64Value))
+ case *Value_DoubleValue:
+ n += 1 // tag and wire
+ n += 8
+ case *Value_StringValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.StringValue)))
+ n += len(x.StringValue)
+ case *Value_BytesValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.BytesValue)))
+ n += len(x.BytesValue)
+ case *Value_EnumValue:
+ s := proto.Size(x.EnumValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Value_ObjectValue:
+ s := proto.Size(x.ObjectValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Value_MapValue:
+ s := proto.Size(x.MapValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Value_ListValue:
+ s := proto.Size(x.ListValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Value_TypeValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.TypeValue)))
+ n += len(x.TypeValue)
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// An enum value.
+type EnumValue struct {
+ // The fully qualified name of the enum type.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // The value of the enum.
+ Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EnumValue) Reset() { *m = EnumValue{} }
+func (m *EnumValue) String() string { return proto.CompactTextString(m) }
+func (*EnumValue) ProtoMessage() {}
+func (*EnumValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_24bee359d1e5798a, []int{1}
+}
+
+func (m *EnumValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EnumValue.Unmarshal(m, b)
+}
+func (m *EnumValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EnumValue.Marshal(b, m, deterministic)
+}
+func (m *EnumValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EnumValue.Merge(m, src)
+}
+func (m *EnumValue) XXX_Size() int {
+ return xxx_messageInfo_EnumValue.Size(m)
+}
+func (m *EnumValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_EnumValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValue proto.InternalMessageInfo
+
+func (m *EnumValue) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *EnumValue) GetValue() int32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// A list.
+//
+// Wrapped in a message so 'not set' and empty can be differentiated, which is
+// required for use in a 'oneof'.
+type ListValue struct {
+ // The ordered values in the list.
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListValue) Reset() { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage() {}
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_24bee359d1e5798a, []int{2}
+}
+
+func (m *ListValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListValue.Unmarshal(m, b)
+}
+func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
+}
+func (m *ListValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListValue.Merge(m, src)
+}
+func (m *ListValue) XXX_Size() int {
+ return xxx_messageInfo_ListValue.Size(m)
+}
+func (m *ListValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListValue proto.InternalMessageInfo
+
+func (m *ListValue) GetValues() []*Value {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+// A map.
+//
+// Wrapped in a message so 'not set' and empty can be differentiated, which is
+// required for use in a 'oneof'.
+type MapValue struct {
+ // The set of map entries.
+ //
+ // CEL has fewer restrictions on keys, so a protobuf map represenation
+ // cannot be used.
+ Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MapValue) Reset() { *m = MapValue{} }
+func (m *MapValue) String() string { return proto.CompactTextString(m) }
+func (*MapValue) ProtoMessage() {}
+func (*MapValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_24bee359d1e5798a, []int{3}
+}
+
+func (m *MapValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MapValue.Unmarshal(m, b)
+}
+func (m *MapValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MapValue.Marshal(b, m, deterministic)
+}
+func (m *MapValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MapValue.Merge(m, src)
+}
+func (m *MapValue) XXX_Size() int {
+ return xxx_messageInfo_MapValue.Size(m)
+}
+func (m *MapValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_MapValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MapValue proto.InternalMessageInfo
+
+func (m *MapValue) GetEntries() []*MapValue_Entry {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
+
+// An entry in the map.
+type MapValue_Entry struct {
+ // The key.
+ //
+ // Must be unique with in the map.
+ // Currently only boolean, int, uint, and string values can be keys.
+ Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The value.
+ Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MapValue_Entry) Reset() { *m = MapValue_Entry{} }
+func (m *MapValue_Entry) String() string { return proto.CompactTextString(m) }
+func (*MapValue_Entry) ProtoMessage() {}
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_24bee359d1e5798a, []int{3, 0}
+}
+
+func (m *MapValue_Entry) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MapValue_Entry.Unmarshal(m, b)
+}
+func (m *MapValue_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MapValue_Entry.Marshal(b, m, deterministic)
+}
+func (m *MapValue_Entry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MapValue_Entry.Merge(m, src)
+}
+func (m *MapValue_Entry) XXX_Size() int {
+ return xxx_messageInfo_MapValue_Entry.Size(m)
+}
+func (m *MapValue_Entry) XXX_DiscardUnknown() {
+ xxx_messageInfo_MapValue_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MapValue_Entry proto.InternalMessageInfo
+
+func (m *MapValue_Entry) GetKey() *Value {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MapValue_Entry) GetValue() *Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Value)(nil), "google.api.expr.v1alpha1.Value")
+ proto.RegisterType((*EnumValue)(nil), "google.api.expr.v1alpha1.EnumValue")
+ proto.RegisterType((*ListValue)(nil), "google.api.expr.v1alpha1.ListValue")
+ proto.RegisterType((*MapValue)(nil), "google.api.expr.v1alpha1.MapValue")
+ proto.RegisterType((*MapValue_Entry)(nil), "google.api.expr.v1alpha1.MapValue.Entry")
+}
+
+func init() {
+ proto.RegisterFile("google/api/expr/v1alpha1/value.proto", fileDescriptor_24bee359d1e5798a)
+}
+
+var fileDescriptor_24bee359d1e5798a = []byte{
+ // 518 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xcb, 0x6e, 0xd4, 0x30,
+ 0x14, 0x86, 0x6b, 0xe6, 0xd2, 0xc9, 0x99, 0x11, 0x48, 0x56, 0x17, 0xc3, 0xa8, 0x52, 0x43, 0xca,
+ 0x22, 0xab, 0x44, 0x33, 0x50, 0x10, 0x2a, 0x9b, 0x8e, 0x5a, 0x69, 0x16, 0x80, 0xaa, 0x2c, 0x58,
+ 0xb0, 0x41, 0xce, 0xd4, 0x84, 0x50, 0xc7, 0x0e, 0x89, 0x5d, 0x91, 0xc7, 0xe3, 0x01, 0x78, 0x1f,
+ 0x96, 0xc8, 0xb7, 0x50, 0xa8, 0x46, 0xed, 0x2e, 0xe7, 0xf7, 0xf7, 0xfb, 0x5c, 0x7c, 0x14, 0x78,
+ 0x5e, 0x08, 0x51, 0x30, 0x9a, 0x92, 0xba, 0x4c, 0xe9, 0x8f, 0xba, 0x49, 0x6f, 0x96, 0x84, 0xd5,
+ 0x5f, 0xc9, 0x32, 0xbd, 0x21, 0x4c, 0xd1, 0xa4, 0x6e, 0x84, 0x14, 0x78, 0x6e, 0xa9, 0x84, 0xd4,
+ 0x65, 0xa2, 0xa9, 0xc4, 0x53, 0x8b, 0xa7, 0xce, 0x6f, 0xb8, 0x5c, 0x7d, 0x49, 0x09, 0xef, 0xac,
+ 0x69, 0x71, 0xf8, 0xff, 0x51, 0x2b, 0x1b, 0xb5, 0x95, 0xf6, 0x34, 0xfa, 0x35, 0x84, 0xd1, 0x47,
+ 0x9d, 0x02, 0x9f, 0x02, 0x70, 0xc5, 0xd8, 0x67, 0x93, 0x70, 0x8e, 0x42, 0x14, 0x3f, 0x5e, 0x2d,
+ 0x12, 0x97, 0xd1, 0x9b, 0x93, 0x0f, 0x8a, 0x31, 0xc3, 0x6f, 0xf6, 0xb2, 0x80, 0xfb, 0x00, 0x1f,
+ 0x01, 0xe4, 0x42, 0x78, 0xf3, 0xa3, 0x10, 0xc5, 0x13, 0x0d, 0x68, 0xcd, 0x02, 0xcf, 0x60, 0x5a,
+ 0x72, 0xf9, 0xea, 0xa5, 0x23, 0x06, 0x21, 0x8a, 0x07, 0x9b, 0xbd, 0x0c, 0x8c, 0x68, 0x91, 0x63,
+ 0x98, 0xa9, 0xdb, 0xcc, 0x30, 0x44, 0xf1, 0x70, 0xb3, 0x97, 0x4d, 0xd5, 0xbf, 0xd0, 0x95, 0x50,
+ 0x39, 0xa3, 0x0e, 0x1a, 0x85, 0x28, 0x46, 0x1a, 0xb2, 0x6a, 0x0f, 0xb5, 0xb2, 0x29, 0x79, 0xe1,
+ 0xa0, 0x71, 0x88, 0xe2, 0x40, 0x43, 0x56, 0xed, 0x2b, 0xca, 0x3b, 0x49, 0x5b, 0xc7, 0xec, 0x87,
+ 0x28, 0x9e, 0xe9, 0x8a, 0x8c, 0x68, 0x91, 0x73, 0x00, 0xca, 0x55, 0xe5, 0x88, 0x20, 0x44, 0xf1,
+ 0x74, 0x75, 0x9c, 0xec, 0x7a, 0x84, 0xe4, 0x82, 0xab, 0xaa, 0x9f, 0x0d, 0xf5, 0x01, 0x7e, 0x03,
+ 0x33, 0x91, 0x7f, 0xa3, 0x5b, 0xe9, 0xee, 0x01, 0x73, 0xcf, 0xc1, 0x9d, 0xd1, 0x9e, 0xf1, 0x4e,
+ 0xd7, 0x68, 0x59, 0x6b, 0x3d, 0x83, 0xa0, 0x22, 0xb5, 0xf3, 0x4d, 0x8d, 0x2f, 0xda, 0x9d, 0xff,
+ 0x3d, 0xa9, 0x7d, 0xfa, 0x49, 0xe5, 0xbe, 0x75, 0x0f, 0xac, 0x6c, 0x7d, 0xee, 0xd9, 0x7d, 0x3d,
+ 0xbc, 0x2b, 0x5b, 0xd9, 0xf7, 0xc0, 0x7c, 0xa0, 0xdf, 0x57, 0x76, 0xb5, 0x1f, 0xfa, 0x13, 0x37,
+ 0xcf, 0x40, 0x6b, 0x06, 0x58, 0x8f, 0x61, 0x78, 0x5d, 0xf2, 0xab, 0xe8, 0x04, 0x82, 0x7e, 0x0c,
+ 0x18, 0xc3, 0x50, 0x13, 0x66, 0x99, 0x82, 0xcc, 0x7c, 0xe3, 0x03, 0x18, 0xfd, 0x5d, 0x92, 0x51,
+ 0x66, 0x83, 0xe8, 0x1c, 0x82, 0x3e, 0x33, 0x7e, 0x0d, 0x63, 0xa3, 0xb6, 0x73, 0x14, 0x0e, 0xe2,
+ 0xe9, 0xea, 0x68, 0x77, 0xb9, 0xc6, 0x90, 0x39, 0x3c, 0xfa, 0x89, 0x60, 0xe2, 0x87, 0x80, 0xd7,
+ 0xb0, 0x4f, 0xb9, 0x6c, 0xca, 0xfe, 0x9a, 0xf8, 0xfe, 0xc9, 0x25, 0x17, 0x5c, 0x36, 0x5d, 0xe6,
+ 0x8d, 0x8b, 0xef, 0x30, 0x32, 0x0a, 0x5e, 0xc2, 0xe0, 0x9a, 0x76, 0xa6, 0x91, 0x07, 0xd4, 0xa3,
+ 0x59, 0x7c, 0x72, 0xbb, 0xd1, 0x07, 0x98, 0x2c, 0xbd, 0xae, 0xe0, 0x70, 0x2b, 0xaa, 0x9d, 0xf0,
+ 0x1a, 0x0c, 0x7d, 0xa9, 0x97, 0xe6, 0x12, 0x7d, 0x7a, 0xeb, 0xb8, 0x42, 0x30, 0xc2, 0x8b, 0x44,
+ 0x34, 0x45, 0x5a, 0x50, 0x6e, 0x56, 0x2a, 0xb5, 0x47, 0xa4, 0x2e, 0xdb, 0xbb, 0xbf, 0x95, 0x53,
+ 0x1d, 0xfd, 0x46, 0x28, 0x1f, 0x1b, 0xf6, 0xc5, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf9, 0x53,
+ 0x8e, 0x99, 0x81, 0x04, 0x00, 0x00,
+}
From f76ad673e76f56830d8da366813c71a691a6c285 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 8 Apr 2019 15:39:23 -0700
Subject: [PATCH 03/76] Update Trigger CEL fields
Changes the CEL filter to be expressed this way:
filter:
cel:
expression: foo.bar = "baz"
---
pkg/apis/eventing/v1alpha1/trigger_types.go | 19 +++++++++++++---
.../v1alpha1/zz_generated.deepcopy.go | 22 ++++++++++++++++---
2 files changed, 35 insertions(+), 6 deletions(-)
diff --git a/pkg/apis/eventing/v1alpha1/trigger_types.go b/pkg/apis/eventing/v1alpha1/trigger_types.go
index e14b032542c..446e717099a 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_types.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_types.go
@@ -67,10 +67,16 @@ type TriggerSpec struct {
type TriggerFilter struct {
// SourceAndType filters events based on exact matches on the type and source
// attributes.
+ //
+ // +optional
SourceAndType *TriggerFilterSourceAndType `json:"sourceAndType,omitempty"`
- // CELExpression filters events by evaluating the expression with the CEL
- // runtime.
- CELExpression *string `json:"celExpression,omitempty"`
+ // CEL filters events by evaluating the expression with the Common
+ // Expression Language runtime.
+ //
+ // +optional
+ CEL *TriggerFilterCEL `json:"cel,omitempty"`
+ // Alternate:
+ // Expression *TriggerFilterExpression{Lang, Expr}
}
// TriggerFilterSourceAndType filters events based on exact matches on the cloud event's type and
@@ -81,6 +87,13 @@ type TriggerFilterSourceAndType struct {
Source string `json:"source,omitempty"`
}
+// TriggerFilterCEL filters events by evaluating the expression with the Common
+// Expression Language runtime. An event passes the filter if the expression
+// evaluates to true.
+type TriggerFilterCEL struct {
+ Expression string `json:"expression,omitempty"`
+}
+
var triggerCondSet = duckv1alpha1.NewLivingConditionSet(TriggerConditionBrokerExists, TriggerConditionKubernetesService, TriggerConditionVirtualService, TriggerConditionSubscribed)
// TriggerStatus represents the current state of a Trigger.
diff --git a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
index cba19e2aae6..2da3c90d8d4 100644
--- a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
@@ -552,9 +552,9 @@ func (in *TriggerFilter) DeepCopyInto(out *TriggerFilter) {
*out = new(TriggerFilterSourceAndType)
**out = **in
}
- if in.CELExpression != nil {
- in, out := &in.CELExpression, &out.CELExpression
- *out = new(string)
+ if in.CEL != nil {
+ in, out := &in.CEL, &out.CEL
+ *out = new(TriggerFilterCEL)
**out = **in
}
return
@@ -570,6 +570,22 @@ func (in *TriggerFilter) DeepCopy() *TriggerFilter {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TriggerFilterCEL) DeepCopyInto(out *TriggerFilterCEL) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerFilterCEL.
+func (in *TriggerFilterCEL) DeepCopy() *TriggerFilterCEL {
+ if in == nil {
+ return nil
+ }
+ out := new(TriggerFilterCEL)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TriggerFilterSourceAndType) DeepCopyInto(out *TriggerFilterSourceAndType) {
*out = *in
From 535401195a4a4240a4ccb46ccf02b5222dcee9f1 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 8 Apr 2019 15:40:59 -0700
Subject: [PATCH 04/76] Switch to dynamic types for CEL filters
Using a protobuf Struct eliminates the need to generate pb structs
for CloudEvent fields, and lays the groundwork for adding dynamic fields
to the expression environment.
---
Gopkg.lock | 6 +-
Gopkg.toml | 2 +-
pkg/broker/cel.go | 36 +-
pkg/broker/dev_knative/ce.pb.go | 113 --
pkg/broker/dev_knative/ce.proto | 11 -
pkg/broker/receiver.go | 2 +-
.../golang/protobuf/jsonpb/jsonpb.go | 1271 +++++++++++++++++
7 files changed, 1294 insertions(+), 147 deletions(-)
delete mode 100644 pkg/broker/dev_knative/ce.pb.go
delete mode 100644 pkg/broker/dev_knative/ce.proto
create mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
diff --git a/Gopkg.lock b/Gopkg.lock
index b0d15ad29c8..d513e4ec3f8 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -195,10 +195,11 @@
revision = "24b0969c4cb722950103eed87108c8d291a8df00"
[[projects]]
- digest = "1:ac06172e8420ee3192527e84a3f373ada56043e6b0e27c2e765b4dd8408f2ec9"
+ digest = "1:771d03745d3f566fb910a47ab1fa959379398df1d1768e44b8c5bc3865d1407a"
name = "github.com/golang/protobuf"
packages = [
"descriptor",
+ "jsonpb",
"proto",
"protoc-gen-go/descriptor",
"ptypes",
@@ -1283,7 +1284,8 @@
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http",
"github.com/cloudevents/sdk-go/pkg/cloudevents/types",
"github.com/fsnotify/fsnotify",
- "github.com/golang/protobuf/proto",
+ "github.com/golang/protobuf/jsonpb",
+ "github.com/golang/protobuf/ptypes/struct",
"github.com/google/cel-go/cel",
"github.com/google/cel-go/checker/decls",
"github.com/google/cel-go/common/types",
diff --git a/Gopkg.toml b/Gopkg.toml
index c171d36ae4e..7eae4b3366d 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -138,4 +138,4 @@ required = [
[[constraint]]
name = "github.com/golang/protobuf"
- version = "^1.3.1"
+ version = "=1.3.0"
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index 8230f9dc64a..aa49f36b92c 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -1,20 +1,23 @@
package broker
import (
+ "bytes"
+ "encoding/json"
+ "log"
+
"github.com/cloudevents/sdk-go/pkg/cloudevents"
+ "github.com/golang/protobuf/jsonpb"
+ structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- celtypes "github.com/knative/eventing/pkg/broker/dev_knative"
)
func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) bool {
e, err := cel.NewEnv(
- cel.Types(&celtypes.CloudEventFilterMeta{}),
cel.Declarations(
- decls.NewIdent("ce", decls.NewObjectType("dev.knative.CloudEventFilterMeta"), nil),
- //decls.NewIdent("data", types.DynType, nil),
+ decls.NewIdent("ce", decls.NewObjectType("google.protobuf.Struct"), nil),
),
)
if err != nil {
@@ -22,7 +25,7 @@ func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event
return false
}
- p, iss := e.Parse(*ts.Filter.CELExpression)
+ p, iss := e.Parse(ts.Filter.CEL.Expr)
if iss != nil && iss.Err() != nil {
//TODO do something with error
return false
@@ -39,23 +42,18 @@ func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event
return false
}
- // Would it be useful to cache programs by trigger UID and resourceversion?
-
- //TODO generate the variables
- ce := &celtypes.CloudEventFilterMeta{
- Specversion: event.SpecVersion(),
- Type: event.Type(),
- Source: event.Source(),
- //TODO Is this the right way to get id? Do we even need id?
- //Id: event.Context.AsV02().ID
- //TODO should this use google.protobuf.Timestamp? Do we even need time?
- Time: event.Context.AsV02().Time.String(),
+ cloudEvent := &structpb.Struct{}
+ eventJSON, err := json.Marshal(event.Context.AsV02())
+ if err != nil {
+ //TODO do something with error
+ return false
+ }
+ if err := jsonpb.Unmarshal(bytes.NewBuffer(eventJSON), cloudEvent); err != nil {
+ log.Fatalf("json parse error: %s\n", err)
}
out, _, err := prg.Eval(map[string]interface{}{
- // Native values are converted to CEL values under the covers.
- "ce": ce,
- //"data": data,
+ "ce": cloudEvent,
})
if err != nil {
//TODO do something with error
diff --git a/pkg/broker/dev_knative/ce.pb.go b/pkg/broker/dev_knative/ce.pb.go
deleted file mode 100644
index c67d0e5009a..00000000000
--- a/pkg/broker/dev_knative/ce.pb.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: ce.proto
-
-package dev_knative
-
-import (
- fmt "fmt"
- math "math"
-
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type CloudEventFilterMeta struct {
- Specversion string `protobuf:"bytes,1,opt,name=specversion,proto3" json:"specversion,omitempty"`
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
- Id string `protobuf:"bytes,4,opt,name=id,proto3" json:"id,omitempty"`
- Time string `protobuf:"bytes,5,opt,name=time,proto3" json:"time,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CloudEventFilterMeta) Reset() { *m = CloudEventFilterMeta{} }
-func (m *CloudEventFilterMeta) String() string { return proto.CompactTextString(m) }
-func (*CloudEventFilterMeta) ProtoMessage() {}
-func (*CloudEventFilterMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_85884d17b902dd0c, []int{0}
-}
-
-func (m *CloudEventFilterMeta) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CloudEventFilterMeta.Unmarshal(m, b)
-}
-func (m *CloudEventFilterMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CloudEventFilterMeta.Marshal(b, m, deterministic)
-}
-func (m *CloudEventFilterMeta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CloudEventFilterMeta.Merge(m, src)
-}
-func (m *CloudEventFilterMeta) XXX_Size() int {
- return xxx_messageInfo_CloudEventFilterMeta.Size(m)
-}
-func (m *CloudEventFilterMeta) XXX_DiscardUnknown() {
- xxx_messageInfo_CloudEventFilterMeta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CloudEventFilterMeta proto.InternalMessageInfo
-
-func (m *CloudEventFilterMeta) GetSpecversion() string {
- if m != nil {
- return m.Specversion
- }
- return ""
-}
-
-func (m *CloudEventFilterMeta) GetType() string {
- if m != nil {
- return m.Type
- }
- return ""
-}
-
-func (m *CloudEventFilterMeta) GetSource() string {
- if m != nil {
- return m.Source
- }
- return ""
-}
-
-func (m *CloudEventFilterMeta) GetId() string {
- if m != nil {
- return m.Id
- }
- return ""
-}
-
-func (m *CloudEventFilterMeta) GetTime() string {
- if m != nil {
- return m.Time
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*CloudEventFilterMeta)(nil), "dev.knative.CloudEventFilterMeta")
-}
-
-func init() { proto.RegisterFile("ce.proto", fileDescriptor_85884d17b902dd0c) }
-
-var fileDescriptor_85884d17b902dd0c = []byte{
- // 155 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x48, 0x4e, 0xd5, 0x2b,
- 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0x49, 0x2d, 0xd3, 0xcb, 0xce, 0x4b, 0x2c, 0xc9, 0x2c,
- 0x4b, 0x55, 0xea, 0x60, 0xe4, 0x12, 0x71, 0xce, 0xc9, 0x2f, 0x4d, 0x71, 0x2d, 0x4b, 0xcd, 0x2b,
- 0x71, 0xcb, 0xcc, 0x29, 0x49, 0x2d, 0xf2, 0x4d, 0x2d, 0x49, 0x14, 0x52, 0xe0, 0xe2, 0x2e, 0x2e,
- 0x48, 0x4d, 0x2e, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x93, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c,
- 0x42, 0x16, 0x12, 0x12, 0xe2, 0x62, 0x29, 0xa9, 0x2c, 0x48, 0x95, 0x60, 0x02, 0x4b, 0x81, 0xd9,
- 0x42, 0x62, 0x5c, 0x6c, 0xc5, 0xf9, 0xa5, 0x45, 0xc9, 0xa9, 0x12, 0xcc, 0x60, 0x51, 0x28, 0x4f,
- 0x88, 0x8f, 0x8b, 0x29, 0x33, 0x45, 0x82, 0x05, 0x2c, 0xc6, 0x94, 0x99, 0x02, 0xd6, 0x9b, 0x99,
- 0x9b, 0x2a, 0xc1, 0x0a, 0xd5, 0x9b, 0x99, 0x9b, 0x9a, 0xc4, 0x06, 0x76, 0x9e, 0x31, 0x20, 0x00,
- 0x00, 0xff, 0xff, 0x94, 0x85, 0x6f, 0x90, 0xaa, 0x00, 0x00, 0x00,
-}
diff --git a/pkg/broker/dev_knative/ce.proto b/pkg/broker/dev_knative/ce.proto
deleted file mode 100644
index 401e4e616c0..00000000000
--- a/pkg/broker/dev_knative/ce.proto
+++ /dev/null
@@ -1,11 +0,0 @@
-syntax = "proto3";
-
-package dev.knative;
-
-message CloudEventFilterMeta {
- string specversion = 1;
- string type = 2;
- string source = 3;
- string id = 4;
- string time = 5;
-}
diff --git a/pkg/broker/receiver.go b/pkg/broker/receiver.go
index cda1306f88f..5d674c2087c 100644
--- a/pkg/broker/receiver.go
+++ b/pkg/broker/receiver.go
@@ -222,7 +222,7 @@ func (r *Receiver) shouldSendMessage(ts *eventingv1alpha1.TriggerSpec, event *cl
// TODO what should happen if multiple filter types are specified? OR? AND?
// precedence rules?
- if ts.Filter.CELExpression != nil {
+ if ts.Filter.CEL != nil {
return filterEventByCEL(ts, event)
}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
new file mode 100644
index 00000000000..ada2b78e89d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -0,0 +1,1271 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
+It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
+
+This package produces a different output than the standard "encoding/json" package,
+which does not operate correctly on protocol buffers.
+*/
+package jsonpb
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ stpb "github.com/golang/protobuf/ptypes/struct"
+)
+
+const secondInNanos = int64(time.Second / time.Nanosecond)
+
+// Marshaler is a configurable object for converting between
+// protocol buffer objects and a JSON representation for them.
+type Marshaler struct {
+ // Whether to render enum values as integers, as opposed to string values.
+ EnumsAsInts bool
+
+ // Whether to render fields with zero values.
+ EmitDefaults bool
+
+ // A string to indent each level by. The presence of this field will
+ // also cause a space to appear between the field separator and
+ // value, and for newlines to be appear between fields and array
+ // elements.
+ Indent string
+
+ // Whether to use the original (.proto) name for fields.
+ OrigName bool
+
+ // A custom URL resolver to use when marshaling Any messages to JSON.
+ // If unset, the default resolution strategy is to extract the
+ // fully-qualified type name from the type URL and pass that to
+ // proto.MessageType(string).
+ AnyResolver AnyResolver
+}
+
+// AnyResolver takes a type URL, present in an Any message, and resolves it into
+// an instance of the associated message.
+type AnyResolver interface {
+ Resolve(typeUrl string) (proto.Message, error)
+}
+
+func defaultResolveAny(typeUrl string) (proto.Message, error) {
+ // Only the part of typeUrl after the last slash is relevant.
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+ mt := proto.MessageType(mname)
+ if mt == nil {
+ return nil, fmt.Errorf("unknown message type %q", mname)
+ }
+ return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should
+// also implement JSONPBUnmarshaler so that the custom format can be
+// parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBMarshaler interface {
+ MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize
+// the way they are unmarshaled from JSON. Messages that implement this
+// should also implement JSONPBMarshaler so that the custom format can be
+// produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBUnmarshaler interface {
+ UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Marshal marshals a protocol buffer into JSON.
+func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
+ v := reflect.ValueOf(pb)
+ if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return errors.New("Marshal called with nil")
+ }
+ // Check for unset required fields first.
+ if err := checkRequiredFields(pb); err != nil {
+ return err
+ }
+ writer := &errWriter{writer: out}
+ return m.marshalObject(writer, pb, "", "")
+}
+
+// MarshalToString converts a protocol buffer object to JSON string.
+func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
+ var buf bytes.Buffer
+ if err := m.Marshal(&buf, pb); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+type int32Slice []int32
+
+var nonFinite = map[string]float64{
+ `"NaN"`: math.NaN(),
+ `"Infinity"`: math.Inf(1),
+ `"-Infinity"`: math.Inf(-1),
+}
+
+// For sorting extensions ids to ensure stable output.
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type wkt interface {
+ XXX_WellKnownType() string
+}
+
+// marshalObject writes a struct to the Writer.
+func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
+ if jsm, ok := v.(JSONPBMarshaler); ok {
+ b, err := jsm.MarshalJSONPB(m)
+ if err != nil {
+ return err
+ }
+ if typeURL != "" {
+ // we are marshaling this object to an Any type
+ var js map[string]*json.RawMessage
+ if err = json.Unmarshal(b, &js); err != nil {
+ return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
+ }
+ turl, err := json.Marshal(typeURL)
+ if err != nil {
+ return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+ }
+ js["@type"] = (*json.RawMessage)(&turl)
+ if b, err = json.Marshal(js); err != nil {
+ return err
+ }
+ }
+
+ out.write(string(b))
+ return out.err
+ }
+
+ s := reflect.ValueOf(v).Elem()
+
+ // Handle well-known types.
+ if wkt, ok := v.(wkt); ok {
+ switch wkt.XXX_WellKnownType() {
+ case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+ "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+ // "Wrappers use the same representation in JSON
+ // as the wrapped primitive type, ..."
+ sprop := proto.GetProperties(s.Type())
+ return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
+ case "Any":
+ // Any is a bit more involved.
+ return m.marshalAny(out, v, indent)
+ case "Duration":
+ // "Generated output always contains 0, 3, 6, or 9 fractional digits,
+ // depending on required precision."
+ s, ns := s.Field(0).Int(), s.Field(1).Int()
+ if ns <= -secondInNanos || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+ }
+ if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+ return errors.New("signs of seconds and nanos do not match")
+ }
+ if s < 0 {
+ ns = -ns
+ }
+ x := fmt.Sprintf("%d.%09d", s, ns)
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ out.write(`"`)
+ out.write(x)
+ out.write(`s"`)
+ return out.err
+ case "Struct", "ListValue":
+ // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
+ // TODO: pass the correct Properties if needed.
+ return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
+ case "Timestamp":
+ // "RFC 3339, where generated output will always be Z-normalized
+ // and uses 0, 3, 6 or 9 fractional digits."
+ s, ns := s.Field(0).Int(), s.Field(1).Int()
+ if ns < 0 || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+ }
+ t := time.Unix(s, ns).UTC()
+ // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+ x := t.Format("2006-01-02T15:04:05.000000000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ out.write(`"`)
+ out.write(x)
+ out.write(`Z"`)
+ return out.err
+ case "Value":
+ // Value has a single oneof.
+ kind := s.Field(0)
+ if kind.IsNil() {
+ // "absence of any variant indicates an error"
+ return errors.New("nil Value")
+ }
+ // oneof -> *T -> T -> T.F
+ x := kind.Elem().Elem().Field(0)
+ // TODO: pass the correct Properties if needed.
+ return m.marshalValue(out, &proto.Properties{}, x, indent)
+ }
+ }
+
+ out.write("{")
+ if m.Indent != "" {
+ out.write("\n")
+ }
+
+ firstField := true
+
+ if typeURL != "" {
+ if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ for i := 0; i < s.NumField(); i++ {
+ value := s.Field(i)
+ valueField := s.Type().Field(i)
+ if strings.HasPrefix(valueField.Name, "XXX_") {
+ continue
+ }
+
+ // IsNil will panic on most value kinds.
+ switch value.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface:
+ if value.IsNil() {
+ continue
+ }
+ }
+
+ if !m.EmitDefaults {
+ switch value.Kind() {
+ case reflect.Bool:
+ if !value.Bool() {
+ continue
+ }
+ case reflect.Int32, reflect.Int64:
+ if value.Int() == 0 {
+ continue
+ }
+ case reflect.Uint32, reflect.Uint64:
+ if value.Uint() == 0 {
+ continue
+ }
+ case reflect.Float32, reflect.Float64:
+ if value.Float() == 0 {
+ continue
+ }
+ case reflect.String:
+ if value.Len() == 0 {
+ continue
+ }
+ case reflect.Map, reflect.Ptr, reflect.Slice:
+ if value.IsNil() {
+ continue
+ }
+ }
+ }
+
+ // Oneof fields need special handling.
+ if valueField.Tag.Get("protobuf_oneof") != "" {
+ // value is an interface containing &T{real_value}.
+ sv := value.Elem().Elem() // interface -> *T -> T
+ value = sv.Field(0)
+ valueField = sv.Type().Field(0)
+ }
+ prop := jsonProperties(valueField, m.OrigName)
+ if !firstField {
+ m.writeSep(out)
+ }
+ if err := m.marshalField(out, prop, value, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ // Handle proto2 extensions.
+ if ep, ok := v.(proto.Message); ok {
+ extensions := proto.RegisteredExtensions(v)
+ // Sort extensions for stable output.
+ ids := make([]int32, 0, len(extensions))
+ for id, desc := range extensions {
+ if !proto.HasExtension(ep, desc) {
+ continue
+ }
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ for _, id := range ids {
+ desc := extensions[id]
+ if desc == nil {
+ // unknown extension
+ continue
+ }
+ ext, extErr := proto.GetExtension(ep, desc)
+ if extErr != nil {
+ return extErr
+ }
+ value := reflect.ValueOf(ext)
+ var prop proto.Properties
+ prop.Parse(desc.Tag)
+ prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
+ if !firstField {
+ m.writeSep(out)
+ }
+ if err := m.marshalField(out, &prop, value, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ }
+
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ }
+ out.write("}")
+ return out.err
+}
+
+func (m *Marshaler) writeSep(out *errWriter) {
+ if m.Indent != "" {
+ out.write(",\n")
+ } else {
+ out.write(",")
+ }
+}
+
+func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
+ // "If the Any contains a value that has a special JSON mapping,
+ // it will be converted as follows: {"@type": xxx, "value": yyy}.
+ // Otherwise, the value will be converted into a JSON object,
+ // and the "@type" field will be inserted to indicate the actual data type."
+ v := reflect.ValueOf(any).Elem()
+ turl := v.Field(0).String()
+ val := v.Field(1).Bytes()
+
+ var msg proto.Message
+ var err error
+ if m.AnyResolver != nil {
+ msg, err = m.AnyResolver.Resolve(turl)
+ } else {
+ msg, err = defaultResolveAny(turl)
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := proto.Unmarshal(val, msg); err != nil {
+ return err
+ }
+
+ if _, ok := msg.(wkt); ok {
+ out.write("{")
+ if m.Indent != "" {
+ out.write("\n")
+ }
+ if err := m.marshalTypeURL(out, indent, turl); err != nil {
+ return err
+ }
+ m.writeSep(out)
+ if m.Indent != "" {
+ out.write(indent)
+ out.write(m.Indent)
+ out.write(`"value": `)
+ } else {
+ out.write(`"value":`)
+ }
+ if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
+ return err
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ }
+ out.write("}")
+ return out.err
+ }
+
+ return m.marshalObject(out, msg, indent, turl)
+}
+
+func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
+ if m.Indent != "" {
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write(`"@type":`)
+ if m.Indent != "" {
+ out.write(" ")
+ }
+ b, err := json.Marshal(typeURL)
+ if err != nil {
+ return err
+ }
+ out.write(string(b))
+ return out.err
+}
+
+// marshalField writes field description and value to the Writer.
+func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+ if m.Indent != "" {
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write(`"`)
+ out.write(prop.JSONName)
+ out.write(`":`)
+ if m.Indent != "" {
+ out.write(" ")
+ }
+ if err := m.marshalValue(out, prop, v, indent); err != nil {
+ return err
+ }
+ return nil
+}
+
+// marshalValue writes the value to the Writer.
+func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+ var err error
+ v = reflect.Indirect(v)
+
+ // Handle nil pointer
+ if v.Kind() == reflect.Invalid {
+ out.write("null")
+ return out.err
+ }
+
+ // Handle repeated elements.
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ out.write("[")
+ comma := ""
+ for i := 0; i < v.Len(); i++ {
+ sliceVal := v.Index(i)
+ out.write(comma)
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ out.write(m.Indent)
+ }
+ if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
+ return err
+ }
+ comma = ","
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write("]")
+ return out.err
+ }
+
+ // Handle well-known types.
+ // Most are handled up in marshalObject (because 99% are messages).
+ if wkt, ok := v.Interface().(wkt); ok {
+ switch wkt.XXX_WellKnownType() {
+ case "NullValue":
+ out.write("null")
+ return out.err
+ }
+ }
+
+ // Handle enumerations.
+ if !m.EnumsAsInts && prop.Enum != "" {
+ // Unknown enum values will are stringified by the proto library as their
+ // value. Such values should _not_ be quoted or they will be interpreted
+ // as an enum string instead of their value.
+ enumStr := v.Interface().(fmt.Stringer).String()
+ var valStr string
+ if v.Kind() == reflect.Ptr {
+ valStr = strconv.Itoa(int(v.Elem().Int()))
+ } else {
+ valStr = strconv.Itoa(int(v.Int()))
+ }
+ isKnownEnum := enumStr != valStr
+ if isKnownEnum {
+ out.write(`"`)
+ }
+ out.write(enumStr)
+ if isKnownEnum {
+ out.write(`"`)
+ }
+ return out.err
+ }
+
+ // Handle nested messages.
+ if v.Kind() == reflect.Struct {
+ return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
+ }
+
+ // Handle maps.
+ // Since Go randomizes map iteration, we sort keys for stable output.
+ if v.Kind() == reflect.Map {
+ out.write(`{`)
+ keys := v.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for i, k := range keys {
+ if i > 0 {
+ out.write(`,`)
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ out.write(m.Indent)
+ }
+
+ // TODO handle map key prop properly
+ b, err := json.Marshal(k.Interface())
+ if err != nil {
+ return err
+ }
+ s := string(b)
+
+ // If the JSON is not a string value, encode it again to make it one.
+ if !strings.HasPrefix(s, `"`) {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ s = string(b)
+ }
+
+ out.write(s)
+ out.write(`:`)
+ if m.Indent != "" {
+ out.write(` `)
+ }
+
+ vprop := prop
+ if prop != nil && prop.MapValProp != nil {
+ vprop = prop.MapValProp
+ }
+ if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
+ return err
+ }
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write(`}`)
+ return out.err
+ }
+
+ // Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ f := v.Float()
+ var sval string
+ switch {
+ case math.IsInf(f, 1):
+ sval = `"Infinity"`
+ case math.IsInf(f, -1):
+ sval = `"-Infinity"`
+ case math.IsNaN(f):
+ sval = `"NaN"`
+ }
+ if sval != "" {
+ out.write(sval)
+ return out.err
+ }
+ }
+
+ // Default handling defers to the encoding/json library.
+ b, err := json.Marshal(v.Interface())
+ if err != nil {
+ return err
+ }
+ needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
+ if needToQuote {
+ out.write(`"`)
+ }
+ out.write(string(b))
+ if needToQuote {
+ out.write(`"`)
+ }
+ return out.err
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+ // Whether to allow messages to contain unknown fields, as opposed to
+ // failing to unmarshal.
+ AllowUnknownFields bool
+
+ // A custom URL resolver to use when unmarshaling Any messages from JSON.
+ // If unset, the default resolution strategy is to extract the
+ // fully-qualified type name from the type URL and pass that to
+ // proto.MessageType(string).
+ AnyResolver AnyResolver
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+ inputValue := json.RawMessage{}
+ if err := dec.Decode(&inputValue); err != nil {
+ return err
+ }
+ if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
+ return err
+ }
+ return checkRequiredFields(pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
+ dec := json.NewDecoder(r)
+ return u.UnmarshalNext(dec, pb)
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+ return new(Unmarshaler).UnmarshalNext(dec, pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func Unmarshal(r io.Reader, pb proto.Message) error {
+ return new(Unmarshaler).Unmarshal(r, pb)
+}
+
+// UnmarshalString will populate the fields of a protocol buffer based
+// on a JSON string. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func UnmarshalString(str string, pb proto.Message) error {
+ return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
+}
+
+// unmarshalValue converts/copies a value into the target.
+// prop may be nil.
+func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
+ targetType := target.Type()
+
+ // Allocate memory for pointer fields.
+ if targetType.Kind() == reflect.Ptr {
+ // If input value is "null" and target is a pointer type, then the field should be treated as not set
+ // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
+ _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
+ if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
+ return nil
+ }
+ target.Set(reflect.New(targetType.Elem()))
+
+ return u.unmarshalValue(target.Elem(), inputValue, prop)
+ }
+
+ if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
+ return jsu.UnmarshalJSONPB(u, []byte(inputValue))
+ }
+
+ // Handle well-known types that are not pointers.
+ if w, ok := target.Addr().Interface().(wkt); ok {
+ switch w.XXX_WellKnownType() {
+ case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+ "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+ return u.unmarshalValue(target.Field(0), inputValue, prop)
+ case "Any":
+ // Use json.RawMessage pointer type instead of value to support pre-1.8 version.
+ // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
+ // https://github.com/golang/go/issues/14493
+ var jsonFields map[string]*json.RawMessage
+ if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+ return err
+ }
+
+ val, ok := jsonFields["@type"]
+ if !ok || val == nil {
+ return errors.New("Any JSON doesn't have '@type'")
+ }
+
+ var turl string
+ if err := json.Unmarshal([]byte(*val), &turl); err != nil {
+ return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
+ }
+ target.Field(0).SetString(turl)
+
+ var m proto.Message
+ var err error
+ if u.AnyResolver != nil {
+ m, err = u.AnyResolver.Resolve(turl)
+ } else {
+ m, err = defaultResolveAny(turl)
+ }
+ if err != nil {
+ return err
+ }
+
+ if _, ok := m.(wkt); ok {
+ val, ok := jsonFields["value"]
+ if !ok {
+ return errors.New("Any JSON doesn't have 'value'")
+ }
+
+ if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+ }
+ } else {
+ delete(jsonFields, "@type")
+ nestedProto, err := json.Marshal(jsonFields)
+ if err != nil {
+ return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+ }
+
+ if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+ }
+ }
+
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
+ }
+ target.Field(1).SetBytes(b)
+
+ return nil
+ case "Duration":
+ unq, err := unquote(string(inputValue))
+ if err != nil {
+ return err
+ }
+
+ d, err := time.ParseDuration(unq)
+ if err != nil {
+ return fmt.Errorf("bad Duration: %v", err)
+ }
+
+ ns := d.Nanoseconds()
+ s := ns / 1e9
+ ns %= 1e9
+ target.Field(0).SetInt(s)
+ target.Field(1).SetInt(ns)
+ return nil
+ case "Timestamp":
+ unq, err := unquote(string(inputValue))
+ if err != nil {
+ return err
+ }
+
+ t, err := time.Parse(time.RFC3339Nano, unq)
+ if err != nil {
+ return fmt.Errorf("bad Timestamp: %v", err)
+ }
+
+ target.Field(0).SetInt(t.Unix())
+ target.Field(1).SetInt(int64(t.Nanosecond()))
+ return nil
+ case "Struct":
+ var m map[string]json.RawMessage
+ if err := json.Unmarshal(inputValue, &m); err != nil {
+ return fmt.Errorf("bad StructValue: %v", err)
+ }
+
+ target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
+ for k, jv := range m {
+ pv := &stpb.Value{}
+ if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
+ return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
+ }
+ target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
+ }
+ return nil
+ case "ListValue":
+ var s []json.RawMessage
+ if err := json.Unmarshal(inputValue, &s); err != nil {
+ return fmt.Errorf("bad ListValue: %v", err)
+ }
+
+ target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
+ for i, sv := range s {
+ if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
+ return err
+ }
+ }
+ return nil
+ case "Value":
+ ivStr := string(inputValue)
+ if ivStr == "null" {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
+ } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
+ } else if v, err := unquote(ivStr); err == nil {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
+ } else if v, err := strconv.ParseBool(ivStr); err == nil {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
+ } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
+ lv := &stpb.ListValue{}
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
+ return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
+ } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
+ sv := &stpb.Struct{}
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
+ return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
+ } else {
+ return fmt.Errorf("unrecognized type for Value %q", ivStr)
+ }
+ return nil
+ }
+ }
+
+ // Handle enums, which have an underlying type of int32,
+ // and may appear as strings.
+ // The case of an enum appearing as a number is handled
+ // at the bottom of this function.
+ if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
+ vmap := proto.EnumValueMap(prop.Enum)
+ // Don't need to do unquoting; valid enum names
+ // are from a limited character set.
+ s := inputValue[1 : len(inputValue)-1]
+ n, ok := vmap[string(s)]
+ if !ok {
+ return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
+ }
+ if target.Kind() == reflect.Ptr { // proto2
+ target.Set(reflect.New(targetType.Elem()))
+ target = target.Elem()
+ }
+ if targetType.Kind() != reflect.Int32 {
+ return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
+ }
+ target.SetInt(int64(n))
+ return nil
+ }
+
+ // Handle nested messages.
+ if targetType.Kind() == reflect.Struct {
+ var jsonFields map[string]json.RawMessage
+ if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+ return err
+ }
+
+ consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
+ // Be liberal in what names we accept; both orig_name and camelName are okay.
+ fieldNames := acceptedJSONFieldNames(prop)
+
+ vOrig, okOrig := jsonFields[fieldNames.orig]
+ vCamel, okCamel := jsonFields[fieldNames.camel]
+ if !okOrig && !okCamel {
+ return nil, false
+ }
+ // If, for some reason, both are present in the data, favour the camelName.
+ var raw json.RawMessage
+ if okOrig {
+ raw = vOrig
+ delete(jsonFields, fieldNames.orig)
+ }
+ if okCamel {
+ raw = vCamel
+ delete(jsonFields, fieldNames.camel)
+ }
+ return raw, true
+ }
+
+ sprops := proto.GetProperties(targetType)
+ for i := 0; i < target.NumField(); i++ {
+ ft := target.Type().Field(i)
+ if strings.HasPrefix(ft.Name, "XXX_") {
+ continue
+ }
+
+ valueForField, ok := consumeField(sprops.Prop[i])
+ if !ok {
+ continue
+ }
+
+ if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
+ return err
+ }
+ }
+ // Check for any oneof fields.
+ if len(jsonFields) > 0 {
+ for _, oop := range sprops.OneofTypes {
+ raw, ok := consumeField(oop.Prop)
+ if !ok {
+ continue
+ }
+ nv := reflect.New(oop.Type.Elem())
+ target.Field(oop.Field).Set(nv)
+ if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
+ return err
+ }
+ }
+ }
+ // Handle proto2 extensions.
+ if len(jsonFields) > 0 {
+ if ep, ok := target.Addr().Interface().(proto.Message); ok {
+ for _, ext := range proto.RegisteredExtensions(ep) {
+ name := fmt.Sprintf("[%s]", ext.Name)
+ raw, ok := jsonFields[name]
+ if !ok {
+ continue
+ }
+ delete(jsonFields, name)
+ nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
+ if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
+ return err
+ }
+ if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ if !u.AllowUnknownFields && len(jsonFields) > 0 {
+ // Pick any field to be the scapegoat.
+ var f string
+ for fname := range jsonFields {
+ f = fname
+ break
+ }
+ return fmt.Errorf("unknown field %q in %v", f, targetType)
+ }
+ return nil
+ }
+
+ // Handle arrays (which aren't encoded bytes)
+ if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
+ var slc []json.RawMessage
+ if err := json.Unmarshal(inputValue, &slc); err != nil {
+ return err
+ }
+ if slc != nil {
+ l := len(slc)
+ target.Set(reflect.MakeSlice(targetType, l, l))
+ for i := 0; i < l; i++ {
+ if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+
+ // Handle maps (whose keys are always strings)
+ if targetType.Kind() == reflect.Map {
+ var mp map[string]json.RawMessage
+ if err := json.Unmarshal(inputValue, &mp); err != nil {
+ return err
+ }
+ if mp != nil {
+ target.Set(reflect.MakeMap(targetType))
+ for ks, raw := range mp {
+ // Unmarshal map key. The core json library already decoded the key into a
+ // string, so we handle that specially. Other types were quoted post-serialization.
+ var k reflect.Value
+ if targetType.Key().Kind() == reflect.String {
+ k = reflect.ValueOf(ks)
+ } else {
+ k = reflect.New(targetType.Key()).Elem()
+ var kprop *proto.Properties
+ if prop != nil && prop.MapKeyProp != nil {
+ kprop = prop.MapKeyProp
+ }
+ if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
+ return err
+ }
+ }
+
+ // Unmarshal map value.
+ v := reflect.New(targetType.Elem()).Elem()
+ var vprop *proto.Properties
+ if prop != nil && prop.MapValProp != nil {
+ vprop = prop.MapValProp
+ }
+ if err := u.unmarshalValue(v, raw, vprop); err != nil {
+ return err
+ }
+ target.SetMapIndex(k, v)
+ }
+ }
+ return nil
+ }
+
+ // Non-finite numbers can be encoded as strings.
+ isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+ if isFloat {
+ if num, ok := nonFinite[string(inputValue)]; ok {
+ target.SetFloat(num)
+ return nil
+ }
+ }
+
+ // integers & floats can be encoded as strings. In this case we drop
+ // the quotes and proceed as normal.
+ isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
+ targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
+ targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+ if isNum && strings.HasPrefix(string(inputValue), `"`) {
+ inputValue = inputValue[1 : len(inputValue)-1]
+ }
+
+ // Use the encoding/json for parsing other value types.
+ return json.Unmarshal(inputValue, target.Addr().Interface())
+}
+
+func unquote(s string) (string, error) {
+ var ret string
+ err := json.Unmarshal([]byte(s), &ret)
+ return ret, err
+}
+
+// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
+func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
+ var prop proto.Properties
+ prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
+ if origName || prop.JSONName == "" {
+ prop.JSONName = prop.OrigName
+ }
+ return &prop
+}
+
+type fieldNames struct {
+ orig, camel string
+}
+
+func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
+ opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
+ if prop.JSONName != "" {
+ opts.camel = prop.JSONName
+ }
+ return opts
+}
+
+// Writer wrapper inspired by https://blog.golang.org/errors-are-values
+type errWriter struct {
+ writer io.Writer
+ err error
+}
+
+func (w *errWriter) write(str string) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write([]byte(str))
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+//
+// Numeric keys are sorted in numeric order per
+// https://developers.google.com/protocol-buffers/docs/proto#maps.
+type mapKeys []reflect.Value
+
+func (s mapKeys) Len() int { return len(s) }
+func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s mapKeys) Less(i, j int) bool {
+ if k := s[i].Kind(); k == s[j].Kind() {
+ switch k {
+ case reflect.String:
+ return s[i].String() < s[j].String()
+ case reflect.Int32, reflect.Int64:
+ return s[i].Int() < s[j].Int()
+ case reflect.Uint32, reflect.Uint64:
+ return s[i].Uint() < s[j].Uint()
+ }
+ }
+ return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+}
+
+// checkRequiredFields returns an error if any required field in the given proto message is not set.
+// This function is used by both Marshal and Unmarshal. While required fields only exist in a
+// proto2 message, a proto3 message can contain proto2 message(s).
+func checkRequiredFields(pb proto.Message) error {
+ // Most well-known type messages do not contain required fields. The "Any" type may contain
+ // a message that has required fields.
+ //
+ // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
+ // field in order to transform that into JSON, and that should have returned an error if a
+ // required field is not set in the embedded message.
+ //
+ // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
+ // embedded message to store the serialized message in Any.Value field, and that should have
+ // returned an error if a required field is not set.
+ if _, ok := pb.(wkt); ok {
+ return nil
+ }
+
+ v := reflect.ValueOf(pb)
+ // Skip message if it is not a struct pointer.
+ if v.Kind() != reflect.Ptr {
+ return nil
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return nil
+ }
+
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ sfield := v.Type().Field(i)
+
+ if sfield.PkgPath != "" {
+ // blank PkgPath means the field is exported; skip if not exported
+ continue
+ }
+
+ if strings.HasPrefix(sfield.Name, "XXX_") {
+ continue
+ }
+
+ // Oneof field is an interface implemented by wrapper structs containing the actual oneof
+ // field, i.e. an interface containing &T{real_value}.
+ if sfield.Tag.Get("protobuf_oneof") != "" {
+ if field.Kind() != reflect.Interface {
+ continue
+ }
+ v := field.Elem()
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ continue
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct || v.NumField() < 1 {
+ continue
+ }
+ field = v.Field(0)
+ sfield = v.Type().Field(0)
+ }
+
+ protoTag := sfield.Tag.Get("protobuf")
+ if protoTag == "" {
+ continue
+ }
+ var prop proto.Properties
+ prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
+
+ switch field.Kind() {
+ case reflect.Map:
+ if field.IsNil() {
+ continue
+ }
+ // Check each map value.
+ keys := field.MapKeys()
+ for _, k := range keys {
+ v := field.MapIndex(k)
+ if err := checkRequiredFieldsInValue(v); err != nil {
+ return err
+ }
+ }
+ case reflect.Slice:
+ // Handle non-repeated type, e.g. bytes.
+ if !prop.Repeated {
+ if prop.Required && field.IsNil() {
+ return fmt.Errorf("required field %q is not set", prop.Name)
+ }
+ continue
+ }
+
+ // Handle repeated type.
+ if field.IsNil() {
+ continue
+ }
+ // Check each slice item.
+ for i := 0; i < field.Len(); i++ {
+ v := field.Index(i)
+ if err := checkRequiredFieldsInValue(v); err != nil {
+ return err
+ }
+ }
+ case reflect.Ptr:
+ if field.IsNil() {
+ if prop.Required {
+ return fmt.Errorf("required field %q is not set", prop.Name)
+ }
+ continue
+ }
+ if err := checkRequiredFieldsInValue(field); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Handle proto2 extensions.
+ for _, ext := range proto.RegisteredExtensions(pb) {
+ if !proto.HasExtension(pb, ext) {
+ continue
+ }
+ ep, err := proto.GetExtension(pb, ext)
+ if err != nil {
+ return err
+ }
+ err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func checkRequiredFieldsInValue(v reflect.Value) error {
+ if pm, ok := v.Interface().(proto.Message); ok {
+ return checkRequiredFields(pm)
+ }
+ return nil
+}
From 9ea34c06d7dc9e6c29517227bb38d2a1a83149f1 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 8 Apr 2019 15:54:44 -0700
Subject: [PATCH 05/76] Add support for JSON event data
If the event has datacontenttype "application/json", the CEL environment
will have the parsed json data in the data field.
---
pkg/broker/cel.go | 26 ++++++++++++++++++++++----
1 file changed, 22 insertions(+), 4 deletions(-)
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index aa49f36b92c..5f33074e5c8 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -18,6 +18,7 @@ func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event
e, err := cel.NewEnv(
cel.Declarations(
decls.NewIdent("ce", decls.NewObjectType("google.protobuf.Struct"), nil),
+ decls.NewIdent("data", decls.NewObjectType("google.protobuf.Struct"), nil),
),
)
if err != nil {
@@ -42,18 +43,35 @@ func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event
return false
}
- cloudEvent := &structpb.Struct{}
- eventJSON, err := json.Marshal(event.Context.AsV02())
+ eventContextStruct := &structpb.Struct{}
+ eventContextJSON, err := json.Marshal(event.Context.AsV02())
if err != nil {
//TODO do something with error
return false
}
- if err := jsonpb.Unmarshal(bytes.NewBuffer(eventJSON), cloudEvent); err != nil {
+ if err := jsonpb.Unmarshal(bytes.NewBuffer(eventContextJSON), eventContextStruct); err != nil {
log.Fatalf("json parse error: %s\n", err)
}
+ eventDataStruct := &structpb.Struct{}
+ // TODO should use of dynamic data be configurable by a flag?
+ // CloudEvents SDK might have a better way to do this with data codecs
+ if event.Context.AsV02().GetDataContentType() == "application/json" {
+ eventDataJSON, err := json.Marshal(event.Data)
+ if err != nil {
+ //TODO do something with error
+ //TODO should this return? Only the data failed to parse, not the context,
+ // and the user might just be filtering on context
+ } else {
+ if err := jsonpb.Unmarshal(bytes.NewBuffer(eventDataJSON), eventDataStruct); err != nil {
+ log.Fatalf("json parse error: %s\n", err)
+ }
+ }
+ }
+
out, _, err := prg.Eval(map[string]interface{}{
- "ce": cloudEvent,
+ "ce": eventContextStruct,
+ "data": eventDataStruct,
})
if err != nil {
//TODO do something with error
From 3f12c5e755de4a6ffde0335bd6fc758e3170d4e5 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 8 Apr 2019 16:28:37 -0700
Subject: [PATCH 06/76] Add ParseExtensions and ParseData flags
These fields must be true to enable dynamic parsing of event
extensions and data respectively in the CEL environment. If parsing
fails, the standard CE fields will still be available for filtering.
---
pkg/apis/eventing/v1alpha1/trigger_types.go | 15 ++-
pkg/broker/cel.go | 104 +++++++++++++++-----
2 files changed, 90 insertions(+), 29 deletions(-)
diff --git a/pkg/apis/eventing/v1alpha1/trigger_types.go b/pkg/apis/eventing/v1alpha1/trigger_types.go
index 446e717099a..217ada63a47 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_types.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_types.go
@@ -75,8 +75,6 @@ type TriggerFilter struct {
//
// +optional
CEL *TriggerFilterCEL `json:"cel,omitempty"`
- // Alternate:
- // Expression *TriggerFilterExpression{Lang, Expr}
}
// TriggerFilterSourceAndType filters events based on exact matches on the cloud event's type and
@@ -91,7 +89,20 @@ type TriggerFilterSourceAndType struct {
// Expression Language runtime. An event passes the filter if the expression
// evaluates to true.
type TriggerFilterCEL struct {
+ // Expression is the CEL expression to evaluate. Required.
Expression string `json:"expression,omitempty"`
+ // ParseExtensions enables parsing of dynamic extensions attached to the event
+ // and makes the extensions available in the CEL environment. If extensions
+ // cannot be parsed they will be ignored. Defaults to false.
+ //
+ // +optional
+ ParseExtensions bool `json:"parseExtensions"`
+ // ParseData enables parsing of the event data and makes the parsed data
+ // available in the CEL environment. Currently this is only available for
+ // JSON data. Defaults to false.
+ //
+ // +optional
+ ParseData bool `json:"parseData"`
}
var triggerCondSet = duckv1alpha1.NewLivingConditionSet(TriggerConditionBrokerExists, TriggerConditionKubernetesService, TriggerConditionVirtualService, TriggerConditionSubscribed)
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index 5f33074e5c8..e8508dce2f2 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -3,7 +3,6 @@ package broker
import (
"bytes"
"encoding/json"
- "log"
"github.com/cloudevents/sdk-go/pkg/cloudevents"
"github.com/golang/protobuf/jsonpb"
@@ -26,7 +25,7 @@ func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event
return false
}
- p, iss := e.Parse(ts.Filter.CEL.Expr)
+ p, iss := e.Parse(ts.Filter.CEL.Expression)
if iss != nil && iss.Err() != nil {
//TODO do something with error
return false
@@ -43,36 +42,50 @@ func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event
return false
}
- eventContextStruct := &structpb.Struct{}
- eventContextJSON, err := json.Marshal(event.Context.AsV02())
- if err != nil {
- //TODO do something with error
- return false
- }
- if err := jsonpb.Unmarshal(bytes.NewBuffer(eventContextJSON), eventContextStruct); err != nil {
- log.Fatalf("json parse error: %s\n", err)
+ vars := map[string]interface{}{}
+
+ // Create a Struct containing all the known CloudEvents fields. This is the
+ // filtering baseline.
+ // TODO refactor this so the extra struct isn't allocated if it isn't used
+ vars["ce"] = ceContextToStruct(event.Context)
+
+ if ts.Filter.CEL.ParseExtensions {
+ func() {
+ eventContextStruct := &structpb.Struct{}
+ eventContextJSON, err := json.Marshal(event.Context.AsV02())
+ if err != nil {
+ //TODO do something with error
+ return
+ }
+ if err := jsonpb.Unmarshal(bytes.NewBuffer(eventContextJSON), eventContextStruct); err != nil {
+ //TODO do something with error
+ return
+ }
+ // If we get here, replace the static context with the dynamic one
+ vars["ce"] = eventContextStruct
+ }()
}
- eventDataStruct := &structpb.Struct{}
- // TODO should use of dynamic data be configurable by a flag?
- // CloudEvents SDK might have a better way to do this with data codecs
- if event.Context.AsV02().GetDataContentType() == "application/json" {
- eventDataJSON, err := json.Marshal(event.Data)
- if err != nil {
- //TODO do something with error
- //TODO should this return? Only the data failed to parse, not the context,
- // and the user might just be filtering on context
- } else {
- if err := jsonpb.Unmarshal(bytes.NewBuffer(eventDataJSON), eventDataStruct); err != nil {
- log.Fatalf("json parse error: %s\n", err)
+ if ts.Filter.CEL.ParseData {
+ func() {
+ eventDataStruct := &structpb.Struct{}
+ // CloudEvents SDK might have a better way to do this with data codecs
+ if event.Context.AsV02().GetDataContentType() == "application/json" {
+ eventDataJSON, err := json.Marshal(event.Data)
+ if err != nil {
+ //TODO do something with error
+ return
+ }
+ if err := jsonpb.Unmarshal(bytes.NewBuffer(eventDataJSON), eventDataStruct); err != nil {
+ //TODO do something with error
+ return
+ }
}
- }
+ vars["data"] = eventDataStruct
+ }()
}
- out, _, err := prg.Eval(map[string]interface{}{
- "ce": eventContextStruct,
- "data": eventDataStruct,
- })
+ out, _, err := prg.Eval(vars)
if err != nil {
//TODO do something with error
return false
@@ -80,3 +93,40 @@ func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event
return out == types.True
}
+
+func ceContextToStruct(eventCtx cloudevents.EventContext) *structpb.Struct {
+ return &structpb.Struct{
+ Fields: map[string]*structpb.Value{
+ "specversion": &structpb.Value{
+ Kind: &structpb.Value_StringValue{
+ StringValue: eventCtx.GetSpecVersion(),
+ },
+ },
+ "type": &structpb.Value{
+ Kind: &structpb.Value_StringValue{
+ StringValue: eventCtx.GetType(),
+ },
+ },
+ "source": &structpb.Value{
+ Kind: &structpb.Value_StringValue{
+ StringValue: eventCtx.GetSource(),
+ },
+ },
+ "schemaurl": &structpb.Value{
+ Kind: &structpb.Value_StringValue{
+ StringValue: eventCtx.GetSchemaURL(),
+ },
+ },
+ "datamediatype": &structpb.Value{
+ Kind: &structpb.Value_StringValue{
+ StringValue: eventCtx.GetDataMediaType(),
+ },
+ },
+ "datacontenttype": &structpb.Value{
+ Kind: &structpb.Value_StringValue{
+ StringValue: eventCtx.GetDataContentType(),
+ },
+ },
+ },
+ }
+}
From 10f9f2b24f75ffec1b9e825fd23568e9f8161586 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Tue, 9 Apr 2019 11:35:12 -0700
Subject: [PATCH 07/76] Log parse and eval errors
Also refactor event context parsing to be more efficient and readable.
---
pkg/broker/cel.go | 124 ++++++++++++++++++++++++-----------------
pkg/broker/receiver.go | 6 +-
2 files changed, 78 insertions(+), 52 deletions(-)
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index e8508dce2f2..cc115d7f21f 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -11,90 +11,96 @@ import (
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "go.uber.org/zap"
)
-func filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) bool {
+const (
+ // CELVarKeyContext is the CEL variable key used for the CloudEvent event
+ // context.
+ CELVarKeyContext = "ce"
+ // CELVarKeyData is the CEL variable key used for the CloudEvent event data.
+ CELVarKeyData = "data"
+)
+
+func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) (bool, error) {
e, err := cel.NewEnv(
cel.Declarations(
- decls.NewIdent("ce", decls.NewObjectType("google.protobuf.Struct"), nil),
- decls.NewIdent("data", decls.NewObjectType("google.protobuf.Struct"), nil),
+ decls.NewIdent(CELVarKeyContext, decls.NewObjectType("google.protobuf.Struct"), nil),
+ decls.NewIdent(CELVarKeyData, decls.NewObjectType("google.protobuf.Struct"), nil),
),
)
if err != nil {
- //TODO do something with error
- return false
+ return false, err
}
p, iss := e.Parse(ts.Filter.CEL.Expression)
if iss != nil && iss.Err() != nil {
- //TODO do something with error
- return false
+ return false, iss.Err()
}
c, iss := e.Check(p)
if iss != nil && iss.Err() != nil {
- //TODO do something with error
- return false
+ return false, iss.Err()
}
+ //TODO cache these by hash of expression. Programs are thread-safe so it's
+ // ok to share them between triggers and events.
prg, err := e.Program(c)
if err != nil {
- //TODO do something with error
- return false
+ return false, err
}
vars := map[string]interface{}{}
-
- // Create a Struct containing all the known CloudEvents fields. This is the
- // filtering baseline.
- // TODO refactor this so the extra struct isn't allocated if it isn't used
- vars["ce"] = ceContextToStruct(event.Context)
-
+ // If the Trigger has requested parsing of extensions, attempt to turn them
+ // into a dynamic struct.
if ts.Filter.CEL.ParseExtensions {
- func() {
- eventContextStruct := &structpb.Struct{}
- eventContextJSON, err := json.Marshal(event.Context.AsV02())
- if err != nil {
- //TODO do something with error
- return
- }
- if err := jsonpb.Unmarshal(bytes.NewBuffer(eventContextJSON), eventContextStruct); err != nil {
- //TODO do something with error
- return
- }
- // If we get here, replace the static context with the dynamic one
- vars["ce"] = eventContextStruct
- }()
+ ctxStruct, err := ceParsedContextStruct(event.Context)
+ if err != nil {
+ r.logger.Error("Failed to parse event context for CEL filtering", zap.String("id", event.Context.AsV02().ID), zap.Error(err))
+ } else {
+ vars[CELVarKeyContext] = ctxStruct
+ }
}
+ // If the context var wasn't set due to trigger config or a failure to parse
+ // extensions, create a struct with the known CE fields as a filtering
+ // baseline.
+ if _, exists := vars[CELVarKeyContext]; !exists {
+ vars[CELVarKeyContext] = ceBaselineContextStruct(event.Context)
+ }
+
+ // If the Trigger has requested parsing of data, attempt to turn them into
+ // a dynamic struct.
if ts.Filter.CEL.ParseData {
- func() {
- eventDataStruct := &structpb.Struct{}
- // CloudEvents SDK might have a better way to do this with data codecs
- if event.Context.AsV02().GetDataContentType() == "application/json" {
- eventDataJSON, err := json.Marshal(event.Data)
- if err != nil {
- //TODO do something with error
- return
- }
- if err := jsonpb.Unmarshal(bytes.NewBuffer(eventDataJSON), eventDataStruct); err != nil {
- //TODO do something with error
- return
- }
- }
- vars["data"] = eventDataStruct
- }()
+ dataStruct, err := ceParsedDataStruct(event)
+ if err != nil {
+ r.logger.Error("Failed to parse event data for CEL filtering", zap.String("id", event.Context.AsV02().ID), zap.Error(err))
+ } else {
+ vars[CELVarKeyData] = dataStruct
+ }
}
out, _, err := prg.Eval(vars)
if err != nil {
- //TODO do something with error
- return false
+ return false, err
}
- return out == types.True
+ return out == types.True, nil
+}
+
+func ceParsedContextStruct(eventCtx cloudevents.EventContext) (*structpb.Struct, error) {
+ ctxStruct := &structpb.Struct{}
+ //TODO should this coerce to V02?
+ ctxJSON, err := json.Marshal(eventCtx.AsV02())
+ if err != nil {
+ return nil, err
+ }
+ if err := jsonpb.Unmarshal(bytes.NewBuffer(ctxJSON), ctxStruct); err != nil {
+ return nil, err
+ }
+ return ctxStruct, nil
}
-func ceContextToStruct(eventCtx cloudevents.EventContext) *structpb.Struct {
+func ceBaselineContextStruct(eventCtx cloudevents.EventContext) *structpb.Struct {
return &structpb.Struct{
Fields: map[string]*structpb.Value{
"specversion": &structpb.Value{
@@ -130,3 +136,19 @@ func ceContextToStruct(eventCtx cloudevents.EventContext) *structpb.Struct {
},
}
}
+
+func ceParsedDataStruct(event *cloudevents.Event) (*structpb.Struct, error) {
+ //TODO CloudEvents SDK might have a better way to do this with data codecs
+ if event.Context.GetDataContentType() == "application/json" {
+ dataStruct := &structpb.Struct{}
+ dataJSON, err := json.Marshal(event.Data)
+ if err != nil {
+ return nil, err
+ }
+ if err := jsonpb.Unmarshal(bytes.NewBuffer(dataJSON), dataStruct); err != nil {
+ return nil, err
+ }
+ return dataStruct, nil
+ }
+ return nil, nil
+}
diff --git a/pkg/broker/receiver.go b/pkg/broker/receiver.go
index 5d674c2087c..000408c622b 100644
--- a/pkg/broker/receiver.go
+++ b/pkg/broker/receiver.go
@@ -223,7 +223,11 @@ func (r *Receiver) shouldSendMessage(ts *eventingv1alpha1.TriggerSpec, event *cl
// TODO what should happen if multiple filter types are specified? OR? AND?
// precedence rules?
if ts.Filter.CEL != nil {
- return filterEventByCEL(ts, event)
+ pass, err := r.filterEventByCEL(ts, event)
+ if err != nil {
+ r.logger.Error("CEL filtering failure", zap.Error(err))
+ }
+ return pass
}
if ts.Filter.SourceAndType != nil {
From 983f5db7ce58ceeec91e9c919f50252fb941942f Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Tue, 9 Apr 2019 13:07:31 -0700
Subject: [PATCH 08/76] Refactor trigger receiver test to use builders
Triggers and Event fixtures are now generated by builders in an attempt
to make the content of the fixture clearer and more flexible.
---
pkg/broker/receiver_test.go | 173 +++++++++++++++++++-----------------
1 file changed, 93 insertions(+), 80 deletions(-)
diff --git a/pkg/broker/receiver_test.go b/pkg/broker/receiver_test.go
index 12db0e7529a..f578ce75551 100644
--- a/pkg/broker/receiver_test.go
+++ b/pkg/broker/receiver_test.go
@@ -61,12 +61,12 @@ func init() {
func TestReceiver(t *testing.T) {
testCases := map[string]struct {
- triggers []*eventingv1alpha1.Trigger
+ triggers []*TriggerBuilder
mocks controllertesting.Mocks
tctx *cehttp.TransportContext
- event *cloudevents.Event
+ event *EventBuilder
requestFails bool
- returnedEvent *cloudevents.Event
+ returnedEvent *EventBuilder
expectNewToFail bool
expectedErr bool
expectedDispatch bool
@@ -112,68 +112,68 @@ func TestReceiver(t *testing.T) {
expectedErr: true,
},
"Trigger doesn't have SubscriberURI": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTriggerWithoutSubscriberURI(),
+ triggers: []*TriggerBuilder{
+ Trigger(),
},
expectedErr: true,
},
"Trigger with bad SubscriberURI": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTriggerWithBadSubscriberURI(),
+ triggers: []*TriggerBuilder{
+ Trigger().BadSubscriberURI(),
},
expectedErr: true,
},
"Trigger without a Filter": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTriggerWithoutFilter(),
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI(),
},
},
"No TTL": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTrigger("", ""),
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterSourceAndType("", ""),
},
- event: makeEventWithoutTTL(),
+ event: Event(),
},
"Wrong type": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTrigger("some-other-type", ""),
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterSourceAndType("some-other-type", ""),
},
},
"Wrong source": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTrigger("", "some-other-source"),
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterSourceAndType("", "some-other-source"),
},
},
"Dispatch failed": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTrigger("", ""),
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterSourceAndType("", ""),
},
requestFails: true,
expectedErr: true,
expectedDispatch: true,
},
"Dispatch succeeded - Any": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTrigger("", ""),
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterSourceAndType("", ""),
},
expectedDispatch: true,
},
"Dispatch succeeded - Specific": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTrigger(eventType, eventSource),
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterSourceAndType(eventType, eventSource),
},
expectedDispatch: true,
},
"Returned Cloud Event": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTrigger("", ""),
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterSourceAndType("", ""),
},
expectedDispatch: true,
- returnedEvent: makeDifferentEvent(),
+ returnedEvent: Event().Type("some-other-type"),
},
"Returned Cloud Event with custom headers": {
- triggers: []*eventingv1alpha1.Trigger{
- makeTrigger("", ""),
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterSourceAndType("", ""),
},
tctx: &cehttp.TransportContext{
Method: "POST",
@@ -207,12 +207,11 @@ func TestReceiver(t *testing.T) {
"X-Ot-Foo": []string{"haden"},
},
expectedDispatch: true,
- returnedEvent: makeDifferentEvent(),
+ returnedEvent: Event().Type("some-other-type"),
},
}
for n, tc := range testCases {
t.Run(n, func(t *testing.T) {
-
fh := fakeHandler{
failRequest: tc.requestFails,
returnedEvent: tc.returnedEvent,
@@ -228,7 +227,7 @@ func TestReceiver(t *testing.T) {
if trig.Status.SubscriberURI == toBeReplaced {
trig.Status.SubscriberURI = s.URL
}
- correctURI = append(correctURI, trig)
+ correctURI = append(correctURI, trig.Build())
}
r, err := New(
@@ -255,9 +254,9 @@ func TestReceiver(t *testing.T) {
resp := &cloudevents.EventResponse{}
event := tc.event
if event == nil {
- event = makeEvent()
+ event = Event().TTL()
}
- err = r.serveHTTP(ctx, *event, resp)
+ err = r.serveHTTP(ctx, *event.Build(), resp)
if tc.expectedErr && err == nil {
t.Errorf("Expected an error, received nil")
@@ -283,7 +282,7 @@ func TestReceiver(t *testing.T) {
}
// The TTL will be added again.
- expectedResponseEvent := addTTLToEvent(*tc.returnedEvent)
+ expectedResponseEvent := *tc.returnedEvent.TTL().Build()
if diff := cmp.Diff(expectedResponseEvent.Context.AsV02(), resp.Event.Context.AsV02()); diff != "" {
t.Errorf("Incorrect response event context (-want +got): %s", diff)
}
@@ -298,7 +297,7 @@ type fakeHandler struct {
failRequest bool
requestReceived bool
headers http.Header
- returnedEvent *cloudevents.Event
+ returnedEvent *EventBuilder
t *testing.T
}
@@ -324,7 +323,7 @@ func (h *fakeHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
}
c := &cehttp.CodecV02{}
- m, err := c.Encode(*h.returnedEvent)
+ m, err := c.Encode(*h.returnedEvent.Build())
if err != nil {
h.t.Fatalf("Could not encode message: %v", err)
}
@@ -346,52 +345,65 @@ func getClient(initial []runtime.Object, mocks controllertesting.Mocks) *control
return controllertesting.NewMockClient(innerClient, mocks)
}
-func makeTrigger(t, s string) *eventingv1alpha1.Trigger {
- return &eventingv1alpha1.Trigger{
+type TriggerBuilder struct {
+ *eventingv1alpha1.Trigger
+}
+
+var _ controllertesting.Buildable = &TriggerBuilder{}
+
+func Trigger() *TriggerBuilder {
+ trigger := &eventingv1alpha1.Trigger{
TypeMeta: v1.TypeMeta{
- APIVersion: "eventing.knative.dev/v1alpha1",
+ APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
Kind: "Trigger",
},
ObjectMeta: v1.ObjectMeta{
Namespace: testNS,
Name: triggerName,
},
- Spec: eventingv1alpha1.TriggerSpec{
- Filter: &eventingv1alpha1.TriggerFilter{
- SourceAndType: &eventingv1alpha1.TriggerFilterSourceAndType{
- Type: t,
- Source: s,
- },
- },
- },
- Status: eventingv1alpha1.TriggerStatus{
- SubscriberURI: "toBeReplaced",
+ }
+ return &TriggerBuilder{
+ Trigger: trigger,
+ }
+}
+
+func (b *TriggerBuilder) Build() runtime.Object {
+ return b.Trigger
+}
+
+func (b *TriggerBuilder) FilterSourceAndType(t, s string) *TriggerBuilder {
+ b.Spec.Filter = &eventingv1alpha1.TriggerFilter{
+ SourceAndType: &eventingv1alpha1.TriggerFilterSourceAndType{
+ Type: t,
+ Source: s,
},
}
+ return b
}
-func makeTriggerWithoutFilter() *eventingv1alpha1.Trigger {
- t := makeTrigger("", "")
- t.Spec.Filter = nil
- return t
+func (b *TriggerBuilder) SubscriberURI() *TriggerBuilder {
+ b.Status = eventingv1alpha1.TriggerStatus{
+ SubscriberURI: toBeReplaced,
+ }
+ return b
}
-func makeTriggerWithoutSubscriberURI() *eventingv1alpha1.Trigger {
- t := makeTrigger("", "")
- t.Status = eventingv1alpha1.TriggerStatus{}
- return t
+func (b *TriggerBuilder) BadSubscriberURI() *TriggerBuilder {
+ // This should fail url.Parse(). It was taken from the unit tests for
+ // url.Parse(), it violates rfc3986 3.2.3, namely that the port must be
+ // digits.
+ b.Status = eventingv1alpha1.TriggerStatus{
+ SubscriberURI: "http://[::1]:namedport",
+ }
+ return b
}
-func makeTriggerWithBadSubscriberURI() *eventingv1alpha1.Trigger {
- t := makeTrigger("", "")
- // This should fail url.Parse(). It was taken from the unit tests for url.Parse(), it violates
- // rfc3986 3.2.3, namely that the port must be digits.
- t.Status.SubscriberURI = "http://[::1]:namedport"
- return t
+type EventBuilder struct {
+ *cloudevents.Event
}
-func makeEventWithoutTTL() *cloudevents.Event {
- return &cloudevents.Event{
+func Event() *EventBuilder {
+ event := &cloudevents.Event{
Context: cloudevents.EventContextV02{
Type: eventType,
Source: types.URLRef{
@@ -402,29 +414,30 @@ func makeEventWithoutTTL() *cloudevents.Event {
ContentType: cloudevents.StringOfApplicationJSON(),
},
}
+ return &EventBuilder{
+ Event: event,
+ }
}
-func makeEvent() *cloudevents.Event {
- noTTL := makeEventWithoutTTL()
- e := addTTLToEvent(*noTTL)
- return &e
+func (b *EventBuilder) Build() *cloudevents.Event {
+ return b.Event
}
-func addTTLToEvent(e cloudevents.Event) cloudevents.Event {
- e.Context = SetTTL(e.Context, 1)
- return e
+func (b *EventBuilder) TTL() *EventBuilder {
+ b.Context = SetTTL(b.Context, 1)
+ return b
}
-func makeDifferentEvent() *cloudevents.Event {
- return &cloudevents.Event{
- Context: cloudevents.EventContextV02{
- Type: "some-other-type",
- Source: types.URLRef{
- URL: url.URL{
- Path: eventSource,
- },
+func (b *EventBuilder) Type(t string) *EventBuilder {
+ ct := b.Context.GetDataContentType()
+ b.Context = cloudevents.EventContextV02{
+ Type: t,
+ Source: types.URLRef{
+ URL: url.URL{
+ Path: b.Context.GetSource(),
},
- ContentType: cloudevents.StringOfApplicationJSON(),
},
+ ContentType: &ct,
}
+ return b
}
From b2d4cebf515b70529f78e3b86a3657679d5bacc3 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Wed, 10 Apr 2019 13:38:40 -0700
Subject: [PATCH 09/76] Refactor CEL variables and add tests
The CloudEvents SDK seems unable to return a single object with both the
standard fields and the extension fields. Until that's possible, the
parsed extensions will be prefixed with `ext.` in the CEL expression.
The parsed JSON data will be prefixed with `data.` in the CEL
expression.
The standard fields are currently un-prefixed, but that will probably
need to change because CEL reserves `type`. Currently the CloudEvents
type is called `typ` as a temporary workaround.
Tests now verify that basic expressions on type, source, parsed
extensions, and parsed data filter correctly.
---
pkg/broker/cel.go | 97 ++++++++++++-----------------
pkg/broker/receiver_test.go | 120 ++++++++++++++++++++++++++++++------
2 files changed, 141 insertions(+), 76 deletions(-)
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index cc115d7f21f..b183a353c4d 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -15,17 +15,25 @@ import (
)
const (
- // CELVarKeyContext is the CEL variable key used for the CloudEvent event
- // context.
- CELVarKeyContext = "ce"
+ // CELVarKeyExtensions is the CEL variable key used for the CloudEvent event
+ // context extensions.
+ CELVarKeyExtensions = "ext"
// CELVarKeyData is the CEL variable key used for the CloudEvent event data.
CELVarKeyData = "data"
+ //TODO add a key that contains both the extensions and the baseline context
+ // so extensions can be future proofed
)
func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) (bool, error) {
e, err := cel.NewEnv(
cel.Declarations(
- decls.NewIdent(CELVarKeyContext, decls.NewObjectType("google.protobuf.Struct"), nil),
+ decls.NewIdent("specversion", decls.String, nil),
+ decls.NewIdent("typ", decls.String, nil),
+ decls.NewIdent("source", decls.String, nil),
+ decls.NewIdent("schemaurl", decls.String, nil),
+ decls.NewIdent("datamediatype", decls.String, nil),
+ decls.NewIdent("datacontenttype", decls.String, nil),
+ decls.NewIdent(CELVarKeyExtensions, decls.NewObjectType("google.protobuf.Struct"), nil),
decls.NewIdent(CELVarKeyData, decls.NewObjectType("google.protobuf.Struct"), nil),
),
)
@@ -50,24 +58,27 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
}
vars := map[string]interface{}{}
+ // Set baseline context fields
+ vars["specversion"] = event.Context.GetSpecVersion()
+ // TODO this doesn't work because `type` is reserved in CEL (it's a cast)
+ vars["typ"] = event.Context.GetType()
+ vars["source"] = event.Context.GetSource()
+ vars["schemaurl"] = event.Context.GetSchemaURL()
+ vars["datamediatype"] = event.Context.GetDataMediaType()
+ vars["datacontenttype"] = event.Context.GetDataContentType()
+
// If the Trigger has requested parsing of extensions, attempt to turn them
// into a dynamic struct.
if ts.Filter.CEL.ParseExtensions {
- ctxStruct, err := ceParsedContextStruct(event.Context)
+ //TODO should this coerce to V02?
+ extStruct, err := ceParsedExtensionsStruct(event.Context.AsV02().Extensions)
if err != nil {
r.logger.Error("Failed to parse event context for CEL filtering", zap.String("id", event.Context.AsV02().ID), zap.Error(err))
} else {
- vars[CELVarKeyContext] = ctxStruct
+ vars[CELVarKeyExtensions] = extStruct
}
}
- // If the context var wasn't set due to trigger config or a failure to parse
- // extensions, create a struct with the known CE fields as a filtering
- // baseline.
- if _, exists := vars[CELVarKeyContext]; !exists {
- vars[CELVarKeyContext] = ceBaselineContextStruct(event.Context)
- }
-
// If the Trigger has requested parsing of data, attempt to turn them into
// a dynamic struct.
if ts.Filter.CEL.ParseData {
@@ -87,64 +98,34 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
return out == types.True, nil
}
-func ceParsedContextStruct(eventCtx cloudevents.EventContext) (*structpb.Struct, error) {
- ctxStruct := &structpb.Struct{}
- //TODO should this coerce to V02?
- ctxJSON, err := json.Marshal(eventCtx.AsV02())
+func ceParsedExtensionsStruct(ext map[string]interface{}) (*structpb.Struct, error) {
+ extJSON, err := json.Marshal(ext)
if err != nil {
return nil, err
}
- if err := jsonpb.Unmarshal(bytes.NewBuffer(ctxJSON), ctxStruct); err != nil {
- return nil, err
- }
- return ctxStruct, nil
-}
-func ceBaselineContextStruct(eventCtx cloudevents.EventContext) *structpb.Struct {
- return &structpb.Struct{
- Fields: map[string]*structpb.Value{
- "specversion": &structpb.Value{
- Kind: &structpb.Value_StringValue{
- StringValue: eventCtx.GetSpecVersion(),
- },
- },
- "type": &structpb.Value{
- Kind: &structpb.Value_StringValue{
- StringValue: eventCtx.GetType(),
- },
- },
- "source": &structpb.Value{
- Kind: &structpb.Value_StringValue{
- StringValue: eventCtx.GetSource(),
- },
- },
- "schemaurl": &structpb.Value{
- Kind: &structpb.Value_StringValue{
- StringValue: eventCtx.GetSchemaURL(),
- },
- },
- "datamediatype": &structpb.Value{
- Kind: &structpb.Value_StringValue{
- StringValue: eventCtx.GetDataMediaType(),
- },
- },
- "datacontenttype": &structpb.Value{
- Kind: &structpb.Value_StringValue{
- StringValue: eventCtx.GetDataContentType(),
- },
- },
- },
+ extStruct := &structpb.Struct{}
+ if err := jsonpb.Unmarshal(bytes.NewBuffer(extJSON), extStruct); err != nil {
+ return nil, err
}
+ return extStruct, nil
}
func ceParsedDataStruct(event *cloudevents.Event) (*structpb.Struct, error) {
//TODO CloudEvents SDK might have a better way to do this with data codecs
if event.Context.GetDataContentType() == "application/json" {
- dataStruct := &structpb.Struct{}
- dataJSON, err := json.Marshal(event.Data)
+ var decodedData map[string]interface{}
+ err := event.DataAs(&decodedData)
+ if err != nil {
+ return nil, err
+ }
+ dataJSON, err := json.Marshal(decodedData)
if err != nil {
return nil, err
}
+
+ dataStruct := &structpb.Struct{}
+ //TODO is there a way to convert a map into a structpb.Struct?
if err := jsonpb.Unmarshal(bytes.NewBuffer(dataJSON), dataStruct); err != nil {
return nil, err
}
diff --git a/pkg/broker/receiver_test.go b/pkg/broker/receiver_test.go
index f578ce75551..efbd6250e4b 100644
--- a/pkg/broker/receiver_test.go
+++ b/pkg/broker/receiver_test.go
@@ -132,7 +132,7 @@ func TestReceiver(t *testing.T) {
triggers: []*TriggerBuilder{
Trigger().SubscriberURI().FilterSourceAndType("", ""),
},
- event: Event(),
+ event: EventWithoutTTL(),
},
"Wrong type": {
triggers: []*TriggerBuilder{
@@ -152,18 +152,66 @@ func TestReceiver(t *testing.T) {
expectedErr: true,
expectedDispatch: true,
},
- "Dispatch succeeded - Any": {
+ "Dispatch succeeded - SourceAndType Any": {
triggers: []*TriggerBuilder{
Trigger().SubscriberURI().FilterSourceAndType("", ""),
},
expectedDispatch: true,
},
- "Dispatch succeeded - Specific": {
+ "Dispatch succeeded - SourceAndType Specific": {
triggers: []*TriggerBuilder{
Trigger().SubscriberURI().FilterSourceAndType(eventType, eventSource),
},
expectedDispatch: true,
},
+ "CEL wrong type": {
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterCEL(`typ == "some-other-type"`),
+ },
+ },
+ "CEL wrong source": {
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterCEL(`source == "some-other-source"`),
+ },
+ },
+ "CEL wrong parsed extensions": {
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterCEL(`ext.foo == "baz"`).CELParseExtensions(),
+ },
+ event: Event().Extension("foo", "bar"),
+ },
+ "CEL wrong parsed data": {
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterCEL(`data.baz == "quz"`).CELParseData(),
+ },
+ event: Event().JSONData(`{"baz":"qux"}`),
+ },
+ "Dispatch succeeded - CEL Any": {
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterCEL("1 == 1"),
+ },
+ expectedDispatch: true,
+ },
+ "Dispatch succeeded - CEL Specific": {
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterCEL(fmt.Sprintf(`typ == "%s" && source == "%s"`, eventType, eventSource)),
+ },
+ expectedDispatch: true,
+ },
+ "Dispatch succeeded - CEL parsed extensions": {
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterCEL(`ext.foo == "bar"`).CELParseExtensions(),
+ },
+ event: Event().Extension("foo", "bar"),
+ expectedDispatch: true,
+ },
+ "Dispatch succeeded - CEL parsed data": {
+ triggers: []*TriggerBuilder{
+ Trigger().SubscriberURI().FilterCEL(`data.baz == "qux"`).CELParseData(),
+ },
+ event: Event().JSONData(`{"baz":"qux"}`),
+ expectedDispatch: true,
+ },
"Returned Cloud Event": {
triggers: []*TriggerBuilder{
Trigger().SubscriberURI().FilterSourceAndType("", ""),
@@ -254,7 +302,7 @@ func TestReceiver(t *testing.T) {
resp := &cloudevents.EventResponse{}
event := tc.event
if event == nil {
- event = Event().TTL()
+ event = Event()
}
err = r.serveHTTP(ctx, *event.Build(), resp)
@@ -282,7 +330,7 @@ func TestReceiver(t *testing.T) {
}
// The TTL will be added again.
- expectedResponseEvent := *tc.returnedEvent.TTL().Build()
+ expectedResponseEvent := *tc.returnedEvent.Build()
if diff := cmp.Diff(expectedResponseEvent.Context.AsV02(), resp.Event.Context.AsV02()); diff != "" {
t.Errorf("Incorrect response event context (-want +got): %s", diff)
}
@@ -381,6 +429,25 @@ func (b *TriggerBuilder) FilterSourceAndType(t, s string) *TriggerBuilder {
return b
}
+func (b *TriggerBuilder) FilterCEL(expr string) *TriggerBuilder {
+ b.Spec.Filter = &eventingv1alpha1.TriggerFilter{
+ CEL: &eventingv1alpha1.TriggerFilterCEL{
+ Expression: expr,
+ },
+ }
+ return b
+}
+
+func (b *TriggerBuilder) CELParseExtensions() *TriggerBuilder {
+ b.Spec.Filter.CEL.ParseExtensions = true
+ return b
+}
+
+func (b *TriggerBuilder) CELParseData() *TriggerBuilder {
+ b.Spec.Filter.CEL.ParseData = true
+ return b
+}
+
func (b *TriggerBuilder) SubscriberURI() *TriggerBuilder {
b.Status = eventingv1alpha1.TriggerStatus{
SubscriberURI: toBeReplaced,
@@ -402,7 +469,11 @@ type EventBuilder struct {
*cloudevents.Event
}
-func Event() *EventBuilder {
+func (b *EventBuilder) Build() *cloudevents.Event {
+ return b.Event
+}
+
+func EventWithoutTTL() *EventBuilder {
event := &cloudevents.Event{
Context: cloudevents.EventContextV02{
Type: eventType,
@@ -419,8 +490,8 @@ func Event() *EventBuilder {
}
}
-func (b *EventBuilder) Build() *cloudevents.Event {
- return b.Event
+func Event() *EventBuilder {
+ return EventWithoutTTL().TTL()
}
func (b *EventBuilder) TTL() *EventBuilder {
@@ -429,15 +500,28 @@ func (b *EventBuilder) TTL() *EventBuilder {
}
func (b *EventBuilder) Type(t string) *EventBuilder {
- ct := b.Context.GetDataContentType()
- b.Context = cloudevents.EventContextV02{
- Type: t,
- Source: types.URLRef{
- URL: url.URL{
- Path: b.Context.GetSource(),
- },
- },
- ContentType: &ct,
- }
+ ctx := b.Context.AsV02()
+ ctx.Type = t
+ b.Context = ctx
+ return b
+}
+
+func (b *EventBuilder) DataContentType(t string) *EventBuilder {
+ ctx := b.Context.AsV02()
+ ctx.ContentType = &t
+ b.Context = ctx
+ return b
+}
+
+func (b *EventBuilder) Extension(k string, v interface{}) *EventBuilder {
+ ctx := b.Context.AsV02()
+ ctx.SetExtension(k, v)
+ b.Context = ctx
+ return b
+}
+
+func (b *EventBuilder) JSONData(d string) *EventBuilder {
+ b = b.DataContentType("application/json")
+ b.Data = []byte(d)
return b
}
From 076768ade973590d1fa965ff0308e4f4aefb972e Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Wed, 10 Apr 2019 13:52:28 -0700
Subject: [PATCH 10/76] Prefix CloudEvent context keys
`type` is problematic in CEL because it's a reserved word, and in
general it's probably a bad idea to risk collisions with top-level
reserved words.
This change nests all CE keys under `ce.` to avoid the `type` collision
and all future collisions. A proto is defined for these keys to avoid
issues with dynamic typing and make the definition more shareable.
Eventually the ce and ext prefixes could be combined into a single
prefix for future proofing of extensions and simpler user experience.
---
pkg/broker/cel.go | 30 +++---
pkg/broker/dev_knative/ce_context.pb.go | 122 ++++++++++++++++++++++++
pkg/broker/dev_knative/ce_context.proto | 12 +++
pkg/broker/receiver_test.go | 6 +-
4 files changed, 153 insertions(+), 17 deletions(-)
create mode 100644 pkg/broker/dev_knative/ce_context.pb.go
create mode 100644 pkg/broker/dev_knative/ce_context.proto
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index b183a353c4d..07ee078d98f 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -11,28 +11,28 @@ import (
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ celprotos "github.com/knative/eventing/pkg/broker/dev_knative"
"go.uber.org/zap"
)
const (
+ // CELVarKeyContext is the CEL variable key used for the CloudEvent event
+ // context fields defined in the spec.
+ CELVarKeyContext = "ce"
// CELVarKeyExtensions is the CEL variable key used for the CloudEvent event
// context extensions.
CELVarKeyExtensions = "ext"
// CELVarKeyData is the CEL variable key used for the CloudEvent event data.
CELVarKeyData = "data"
//TODO add a key that contains both the extensions and the baseline context
- // so extensions can be future proofed
+ // vars so extensions can be future proofed.
)
func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) (bool, error) {
e, err := cel.NewEnv(
+ cel.Types(&celprotos.CloudEventContext{}),
cel.Declarations(
- decls.NewIdent("specversion", decls.String, nil),
- decls.NewIdent("typ", decls.String, nil),
- decls.NewIdent("source", decls.String, nil),
- decls.NewIdent("schemaurl", decls.String, nil),
- decls.NewIdent("datamediatype", decls.String, nil),
- decls.NewIdent("datacontenttype", decls.String, nil),
+ decls.NewIdent(CELVarKeyContext, decls.NewObjectType("dev.knative.CloudEventContext"), nil),
decls.NewIdent(CELVarKeyExtensions, decls.NewObjectType("google.protobuf.Struct"), nil),
decls.NewIdent(CELVarKeyData, decls.NewObjectType("google.protobuf.Struct"), nil),
),
@@ -59,13 +59,15 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
vars := map[string]interface{}{}
// Set baseline context fields
- vars["specversion"] = event.Context.GetSpecVersion()
- // TODO this doesn't work because `type` is reserved in CEL (it's a cast)
- vars["typ"] = event.Context.GetType()
- vars["source"] = event.Context.GetSource()
- vars["schemaurl"] = event.Context.GetSchemaURL()
- vars["datamediatype"] = event.Context.GetDataMediaType()
- vars["datacontenttype"] = event.Context.GetDataContentType()
+ ceCtx := &celprotos.CloudEventContext{
+ Specversion: event.Context.GetSpecVersion(),
+ Type: event.Context.GetType(),
+ Source: event.Context.GetSource(),
+ Schemaurl: event.Context.GetSchemaURL(),
+ Datamediatype: event.Context.GetDataMediaType(),
+ Datacontenttype: event.Context.GetDataContentType(),
+ }
+ vars[CELVarKeyContext] = ceCtx
// If the Trigger has requested parsing of extensions, attempt to turn them
// into a dynamic struct.
diff --git a/pkg/broker/dev_knative/ce_context.pb.go b/pkg/broker/dev_knative/ce_context.pb.go
new file mode 100644
index 00000000000..f6c36fed60f
--- /dev/null
+++ b/pkg/broker/dev_knative/ce_context.pb.go
@@ -0,0 +1,122 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: ce_context.proto
+
+package dev_knative
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type CloudEventContext struct {
+ Specversion string `protobuf:"bytes,1,opt,name=specversion,proto3" json:"specversion,omitempty"`
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
+ Schemaurl string `protobuf:"bytes,4,opt,name=schemaurl,proto3" json:"schemaurl,omitempty"`
+ Datamediatype string `protobuf:"bytes,5,opt,name=datamediatype,proto3" json:"datamediatype,omitempty"`
+ Datacontenttype string `protobuf:"bytes,6,opt,name=datacontenttype,proto3" json:"datacontenttype,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CloudEventContext) Reset() { *m = CloudEventContext{} }
+func (m *CloudEventContext) String() string { return proto.CompactTextString(m) }
+func (*CloudEventContext) ProtoMessage() {}
+func (*CloudEventContext) Descriptor() ([]byte, []int) {
+ return fileDescriptor_fc676048f2e074ad, []int{0}
+}
+
+func (m *CloudEventContext) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CloudEventContext.Unmarshal(m, b)
+}
+func (m *CloudEventContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CloudEventContext.Marshal(b, m, deterministic)
+}
+func (m *CloudEventContext) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudEventContext.Merge(m, src)
+}
+func (m *CloudEventContext) XXX_Size() int {
+ return xxx_messageInfo_CloudEventContext.Size(m)
+}
+func (m *CloudEventContext) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudEventContext.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudEventContext proto.InternalMessageInfo
+
+func (m *CloudEventContext) GetSpecversion() string {
+ if m != nil {
+ return m.Specversion
+ }
+ return ""
+}
+
+func (m *CloudEventContext) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *CloudEventContext) GetSource() string {
+ if m != nil {
+ return m.Source
+ }
+ return ""
+}
+
+func (m *CloudEventContext) GetSchemaurl() string {
+ if m != nil {
+ return m.Schemaurl
+ }
+ return ""
+}
+
+func (m *CloudEventContext) GetDatamediatype() string {
+ if m != nil {
+ return m.Datamediatype
+ }
+ return ""
+}
+
+func (m *CloudEventContext) GetDatacontenttype() string {
+ if m != nil {
+ return m.Datacontenttype
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*CloudEventContext)(nil), "dev.knative.CloudEventContext")
+}
+
+func init() { proto.RegisterFile("ce_context.proto", fileDescriptor_fc676048f2e074ad) }
+
+var fileDescriptor_fc676048f2e074ad = []byte{
+ // 188 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x4e, 0x8d, 0x4f,
+ 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0x49,
+ 0x2d, 0xd3, 0xcb, 0xce, 0x4b, 0x2c, 0xc9, 0x2c, 0x4b, 0x55, 0x3a, 0xcf, 0xc8, 0x25, 0xe8, 0x9c,
+ 0x93, 0x5f, 0x9a, 0xe2, 0x5a, 0x96, 0x9a, 0x57, 0xe2, 0x0c, 0x51, 0x28, 0xa4, 0xc0, 0xc5, 0x5d,
+ 0x5c, 0x90, 0x9a, 0x5c, 0x96, 0x5a, 0x54, 0x9c, 0x99, 0x9f, 0x27, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1,
+ 0x19, 0x84, 0x2c, 0x24, 0x24, 0xc4, 0xc5, 0x52, 0x52, 0x59, 0x90, 0x2a, 0xc1, 0x04, 0x96, 0x02,
+ 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0xf3, 0x4b, 0x8b, 0x92, 0x53, 0x25, 0x98, 0xc1, 0xa2, 0x50,
+ 0x9e, 0x90, 0x0c, 0x17, 0x67, 0x71, 0x72, 0x46, 0x6a, 0x6e, 0x62, 0x69, 0x51, 0x8e, 0x04, 0x0b,
+ 0x58, 0x0a, 0x21, 0x20, 0xa4, 0xc2, 0xc5, 0x9b, 0x92, 0x58, 0x92, 0x98, 0x9b, 0x9a, 0x92, 0x99,
+ 0x08, 0x36, 0x92, 0x15, 0xac, 0x02, 0x55, 0x50, 0x48, 0x83, 0x8b, 0x1f, 0x24, 0x00, 0xf6, 0x49,
+ 0x5e, 0x09, 0x58, 0x1d, 0x1b, 0x58, 0x1d, 0xba, 0x70, 0x12, 0x1b, 0xd8, 0x97, 0xc6, 0x80, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0x9f, 0x0a, 0x80, 0x22, 0xf9, 0x00, 0x00, 0x00,
+}
diff --git a/pkg/broker/dev_knative/ce_context.proto b/pkg/broker/dev_knative/ce_context.proto
new file mode 100644
index 00000000000..b5dac07e6b0
--- /dev/null
+++ b/pkg/broker/dev_knative/ce_context.proto
@@ -0,0 +1,12 @@
+syntax = "proto3";
+
+package dev.knative;
+
+message CloudEventContext {
+ string specversion = 1;
+ string type = 2;
+ string source = 3;
+ string schemaurl = 4;
+ string datamediatype = 5;
+ string datacontenttype = 6;
+}
\ No newline at end of file
diff --git a/pkg/broker/receiver_test.go b/pkg/broker/receiver_test.go
index efbd6250e4b..26ff2a3ef59 100644
--- a/pkg/broker/receiver_test.go
+++ b/pkg/broker/receiver_test.go
@@ -166,12 +166,12 @@ func TestReceiver(t *testing.T) {
},
"CEL wrong type": {
triggers: []*TriggerBuilder{
- Trigger().SubscriberURI().FilterCEL(`typ == "some-other-type"`),
+ Trigger().SubscriberURI().FilterCEL(`ce.type == "some-other-type"`),
},
},
"CEL wrong source": {
triggers: []*TriggerBuilder{
- Trigger().SubscriberURI().FilterCEL(`source == "some-other-source"`),
+ Trigger().SubscriberURI().FilterCEL(`ce.source == "some-other-source"`),
},
},
"CEL wrong parsed extensions": {
@@ -194,7 +194,7 @@ func TestReceiver(t *testing.T) {
},
"Dispatch succeeded - CEL Specific": {
triggers: []*TriggerBuilder{
- Trigger().SubscriberURI().FilterCEL(fmt.Sprintf(`typ == "%s" && source == "%s"`, eventType, eventSource)),
+ Trigger().SubscriberURI().FilterCEL(fmt.Sprintf(`ce.type == "%s" && ce.source == "%s"`, eventType, eventSource)),
},
expectedDispatch: true,
},
From 58382f4a6a4bf0e3df2c6b7c12152947b559effa Mon Sep 17 00:00:00 2001
From: Adam Harwayne
Date: Thu, 11 Apr 2019 14:40:58 -0700
Subject: [PATCH 11/76] Add documentation for Brokers changing channel
provisioners. (#996)
* Add documentation for changing channel provisioners.
* lost -> lose
* Links go to knative.dev.
---
docs/broker/README.md | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/docs/broker/README.md b/docs/broker/README.md
index cd3905d7e7d..b0dc6be9ee4 100644
--- a/docs/broker/README.md
+++ b/docs/broker/README.md
@@ -51,6 +51,28 @@ spec:
## Usage
+### ClusterChannelProvisioner
+
+`Broker`'s use their `spec.channelTemplate` to create their internal `Channel`s, which dictate the durability guarantees of events sent to that `Broker`. If `spec.channelTemplate` is not specified, then the [default provisioner](https://www.knative.dev/docs/eventing/channels/default-channels/) for their namespace is used.
+
+#### Setup
+
+Have a `ClusterChannelProvisioner` installed and set as the [default provisioner](https://www.knative.dev/docs/eventing/channels/default-channels/) for the namespace you are interested in. For development, the [`in-memory` `ClusterChannelProvisioner`](https://github.com/knative/eventing/tree/master/config/provisioners/in-memory-channel#deployment-steps) is normally used.
+
+#### Changing
+
+**Note** changing the `ClusterChannelProvisioner` of a running `Broker` will lose all in-flight events.
+
+If you want to change which `ClusterChannelProvisioner` is used by a given `Broker`, then determine if the `spec.channelTemplate` is specified or not.
+
+If `spec.channelTemplate` is specified:
+1. Delete the `Broker`.
+1. Create the `Broker` with the updated `spec.channelTemplate`.
+
+If `spec.channelTemplate` is not specified:
+1. Change the [default provisioner](https://github.com/knative/docs/blob/master/docs/eventing/channels/default-channels.md#setting-the-default-channel-configuration) for the namespace that `Broker` is in.
+1. Delete and recreate the `Broker`.
+
### Broker
There are two ways to create a Broker, via [namespace annotation](#annotation)
From ec46349474472506b584b572d35344ca7407bd1f Mon Sep 17 00:00:00 2001
From: mattmoor-sockpuppet
Date: Sun, 14 Apr 2019 14:10:00 -0700
Subject: [PATCH 12/76] Format markdown (#1049)
Produced via: `prettier --write --prose-wrap=always $(find -name '*.md' | grep -v vendor | grep -v .github)`
---
docs/broker/README.md | 24 +++++++++++++++++++-----
1 file changed, 19 insertions(+), 5 deletions(-)
diff --git a/docs/broker/README.md b/docs/broker/README.md
index b0dc6be9ee4..335e132b21f 100644
--- a/docs/broker/README.md
+++ b/docs/broker/README.md
@@ -53,24 +53,38 @@ spec:
### ClusterChannelProvisioner
-`Broker`'s use their `spec.channelTemplate` to create their internal `Channel`s, which dictate the durability guarantees of events sent to that `Broker`. If `spec.channelTemplate` is not specified, then the [default provisioner](https://www.knative.dev/docs/eventing/channels/default-channels/) for their namespace is used.
+`Broker`'s use their `spec.channelTemplate` to create their internal `Channel`s,
+which dictate the durability guarantees of events sent to that `Broker`. If
+`spec.channelTemplate` is not specified, then the
+[default provisioner](https://www.knative.dev/docs/eventing/channels/default-channels/)
+for their namespace is used.
#### Setup
-Have a `ClusterChannelProvisioner` installed and set as the [default provisioner](https://www.knative.dev/docs/eventing/channels/default-channels/) for the namespace you are interested in. For development, the [`in-memory` `ClusterChannelProvisioner`](https://github.com/knative/eventing/tree/master/config/provisioners/in-memory-channel#deployment-steps) is normally used.
+Have a `ClusterChannelProvisioner` installed and set as the
+[default provisioner](https://www.knative.dev/docs/eventing/channels/default-channels/)
+for the namespace you are interested in. For development, the
+[`in-memory` `ClusterChannelProvisioner`](https://github.com/knative/eventing/tree/master/config/provisioners/in-memory-channel#deployment-steps)
+is normally used.
#### Changing
-**Note** changing the `ClusterChannelProvisioner` of a running `Broker` will lose all in-flight events.
+**Note** changing the `ClusterChannelProvisioner` of a running `Broker` will
+lose all in-flight events.
-If you want to change which `ClusterChannelProvisioner` is used by a given `Broker`, then determine if the `spec.channelTemplate` is specified or not.
+If you want to change which `ClusterChannelProvisioner` is used by a given
+`Broker`, then determine if the `spec.channelTemplate` is specified or not.
If `spec.channelTemplate` is specified:
+
1. Delete the `Broker`.
1. Create the `Broker` with the updated `spec.channelTemplate`.
If `spec.channelTemplate` is not specified:
-1. Change the [default provisioner](https://github.com/knative/docs/blob/master/docs/eventing/channels/default-channels.md#setting-the-default-channel-configuration) for the namespace that `Broker` is in.
+
+1. Change the
+ [default provisioner](https://github.com/knative/docs/blob/master/docs/eventing/channels/default-channels.md#setting-the-default-channel-configuration)
+ for the namespace that `Broker` is in.
1. Delete and recreate the `Broker`.
### Broker
From 9ad7999b1f35c2ab4a0a9d953ee1ba3aedcc5c55 Mon Sep 17 00:00:00 2001
From: mattmoor-sockpuppet
Date: Sun, 14 Apr 2019 14:35:00 -0700
Subject: [PATCH 13/76] Format markdown (#1051)
Produced via: `prettier --write --prose-wrap=always $(find -name '*.md' | grep -v vendor | grep -v .github)`
From c43fef40bfec05321d6d9017e75d524fb4d6d94a Mon Sep 17 00:00:00 2001
From: Scott Nichols <32305648+n3wscott@users.noreply.github.com>
Date: Mon, 15 Apr 2019 10:01:00 -0700
Subject: [PATCH 14/76] Update CloudEvents SDK to 0.6.0 (#1052)
---
Gopkg.lock | 6 +-
Gopkg.toml | 2 +-
pkg/broker/receiver_test.go | 4 +-
.../sdk-go/pkg/cloudevents/client/client.go | 1 +
.../pkg/cloudevents/client/defaulters.go | 45 +----
.../sdk-go/pkg/cloudevents/codec/jsoncodec.go | 68 ++++---
.../sdk-go/pkg/cloudevents/content_type.go | 1 +
.../sdk-go/pkg/cloudevents/context/logger.go | 42 +++++
.../pkg/cloudevents/data_content_encoding.go | 11 ++
.../sdk-go/pkg/cloudevents/datacodec/codec.go | 4 +
.../sdk-go/pkg/cloudevents/event.go | 138 +++-----------
.../sdk-go/pkg/cloudevents/event_data.go | 96 ++++++++++
.../sdk-go/pkg/cloudevents/event_interface.go | 72 ++++++++
.../sdk-go/pkg/cloudevents/event_reader.go | 58 ++++++
.../sdk-go/pkg/cloudevents/event_writer.go | 92 ++++++++++
.../sdk-go/pkg/cloudevents/eventcontext.go | 111 ++++++++---
.../pkg/cloudevents/eventcontext_v01.go | 122 ++++++-------
.../cloudevents/eventcontext_v01_reader.go | 86 +++++++++
.../cloudevents/eventcontext_v01_writer.go | 103 +++++++++++
.../pkg/cloudevents/eventcontext_v02.go | 145 ++++++++-------
.../cloudevents/eventcontext_v02_reader.go | 86 +++++++++
.../cloudevents/eventcontext_v02_writer.go | 103 +++++++++++
.../pkg/cloudevents/eventcontext_v03.go | 172 ++++++++++++------
.../cloudevents/eventcontext_v03_reader.go | 81 +++++++++
.../cloudevents/eventcontext_v03_writer.go | 107 +++++++++++
.../sdk-go/pkg/cloudevents/extensions.go | 13 ++
.../pkg/cloudevents/transport/http/codec.go | 2 +-
.../cloudevents/transport/http/codec_v01.go | 23 ++-
.../cloudevents/transport/http/codec_v02.go | 19 +-
.../cloudevents/transport/http/codec_v03.go | 45 +++--
.../cloudevents/transport/http/transport.go | 81 +++++----
31 files changed, 1478 insertions(+), 461 deletions(-)
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go
create mode 100644 vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 0020f129e36..7e30139f683 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -58,7 +58,7 @@
version = "v2.1.15"
[[projects]]
- digest = "1:f616892b62757b2dcd70957602981d4f05c4d75017138cea02b3845d07c18785"
+ digest = "1:fa1c3e6de410f74eb102fd927c838a66feb5b825fdf63d0e82cbbfd1a16db8a1"
name = "github.com/cloudevents/sdk-go"
packages = [
"pkg/cloudevents",
@@ -74,8 +74,8 @@
"pkg/cloudevents/types",
]
pruneopts = "NUT"
- revision = "e00e75c8a1befe895cc00def1f88adb611195159"
- version = "0.4.4"
+ revision = "51b1fb67ea02b6edb9c158592f4c996d6edd1493"
+ version = "0.6.0"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
diff --git a/Gopkg.toml b/Gopkg.toml
index b8c26f78819..684a7b0e669 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -130,4 +130,4 @@ required = [
[[constraint]]
name = "github.com/cloudevents/sdk-go"
- version = "=0.4.4"
+ version = "=0.6.0"
diff --git a/pkg/broker/receiver_test.go b/pkg/broker/receiver_test.go
index 12db0e7529a..cfa26f49a32 100644
--- a/pkg/broker/receiver_test.go
+++ b/pkg/broker/receiver_test.go
@@ -400,7 +400,7 @@ func makeEventWithoutTTL() *cloudevents.Event {
},
},
ContentType: cloudevents.StringOfApplicationJSON(),
- },
+ }.AsV02(),
}
}
@@ -425,6 +425,6 @@ func makeDifferentEvent() *cloudevents.Event {
},
},
ContentType: cloudevents.StringOfApplicationJSON(),
- },
+ }.AsV02(),
}
}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/client.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/client.go
index 2a2ee3758a1..9eefa27c8ec 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/client.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/client.go
@@ -102,6 +102,7 @@ func (c *ceClient) obsSend(ctx context.Context, event cloudevents.Event) (*cloud
event = fn(event)
}
}
+
// Validate the event conforms to the CloudEvents Spec.
if err := event.Validate(); err != nil {
return nil, err
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go
index b1653264427..a5cd591c740 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go
@@ -2,7 +2,6 @@ package client
import (
"github.com/cloudevents/sdk-go/pkg/cloudevents"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
"github.com/google/uuid"
"time"
)
@@ -15,25 +14,9 @@ type EventDefaulter func(event cloudevents.Event) cloudevents.Event
// context.ID if it is found to be empty.
func DefaultIDToUUIDIfNotSet(event cloudevents.Event) cloudevents.Event {
if event.Context != nil {
- switch event.Context.GetSpecVersion() {
- case cloudevents.CloudEventsVersionV01:
- ec := event.Context.AsV01()
- if ec.EventID == "" {
- ec.EventID = uuid.New().String()
- event.Context = ec
- }
- case cloudevents.CloudEventsVersionV02:
- ec := event.Context.AsV02()
- if ec.ID == "" {
- ec.ID = uuid.New().String()
- event.Context = ec
- }
- case cloudevents.CloudEventsVersionV03:
- ec := event.Context.AsV03()
- if ec.ID == "" {
- ec.ID = uuid.New().String()
- event.Context = ec
- }
+ if event.ID() == "" {
+ event.Context = event.Context.Clone()
+ event.SetID(uuid.New().String())
}
}
return event
@@ -43,25 +26,9 @@ func DefaultIDToUUIDIfNotSet(event cloudevents.Event) cloudevents.Event {
// Timestamp to context.Time if it is found to be nil or zero.
func DefaultTimeToNowIfNotSet(event cloudevents.Event) cloudevents.Event {
if event.Context != nil {
- switch event.Context.GetSpecVersion() {
- case cloudevents.CloudEventsVersionV01:
- ec := event.Context.AsV01()
- if ec.EventTime == nil || ec.EventTime.IsZero() {
- ec.EventTime = &types.Timestamp{Time: time.Now()}
- event.Context = ec
- }
- case cloudevents.CloudEventsVersionV02:
- ec := event.Context.AsV02()
- if ec.Time == nil || ec.Time.IsZero() {
- ec.Time = &types.Timestamp{Time: time.Now()}
- event.Context = ec
- }
- case cloudevents.CloudEventsVersionV03:
- ec := event.Context.AsV03()
- if ec.Time == nil || ec.Time.IsZero() {
- ec.Time = &types.Timestamp{Time: time.Now()}
- event.Context = ec
- }
+ if event.Time().IsZero() {
+ event.Context = event.Context.Clone()
+ event.SetTime(time.Now())
}
}
return event
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/codec/jsoncodec.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/codec/jsoncodec.go
index 78d6765f890..ea607f3f9f1 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/codec/jsoncodec.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/codec/jsoncodec.go
@@ -6,7 +6,6 @@ import (
"github.com/cloudevents/sdk-go/pkg/cloudevents"
"github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec"
"github.com/cloudevents/sdk-go/pkg/cloudevents/observability"
- "log"
"strconv"
)
@@ -28,7 +27,11 @@ func obsJsonEncodeV01(e cloudevents.Event) ([]byte, error) {
if ctx.ContentType == nil {
ctx.ContentType = cloudevents.StringOfApplicationJSON()
}
- return jsonEncode(ctx, e.Data)
+ data, err := e.DataBytes()
+ if err != nil {
+ return nil, err
+ }
+ return jsonEncode(ctx, data)
}
// JsonEncodeV02 takes in a cloudevent.Event and outputs the byte representation of that event using CloudEvents
@@ -49,7 +52,11 @@ func obsJsonEncodeV02(e cloudevents.Event) ([]byte, error) {
if ctx.ContentType == nil {
ctx.ContentType = cloudevents.StringOfApplicationJSON()
}
- return jsonEncode(ctx, e.Data)
+ data, err := e.DataBytes()
+ if err != nil {
+ return nil, err
+ }
+ return jsonEncode(ctx, data)
}
// JsonEncodeV03 takes in a cloudevent.Event and outputs the byte representation of that event using CloudEvents
@@ -70,10 +77,15 @@ func obsJsonEncodeV03(e cloudevents.Event) ([]byte, error) {
if ctx.DataContentType == nil {
ctx.DataContentType = cloudevents.StringOfApplicationJSON()
}
- return jsonEncode(ctx, e.Data)
+
+ data, err := e.DataBytes()
+ if err != nil {
+ return nil, err
+ }
+ return jsonEncode(ctx, data)
}
-func jsonEncode(ctx cloudevents.EventContext, data interface{}) ([]byte, error) {
+func jsonEncode(ctx cloudevents.EventContextReader, data []byte) ([]byte, error) {
ctxb, err := marshalEvent(ctx)
if err != nil {
return nil, err
@@ -86,19 +98,26 @@ func jsonEncode(ctx cloudevents.EventContext, data interface{}) ([]byte, error)
return nil, err
}
- mediaType := ctx.GetDataMediaType()
- datab, err := marshalEventData(mediaType, data)
- if err != nil {
- return nil, err
- }
if data != nil {
- if mediaType == "" || mediaType == cloudevents.ApplicationJSON {
- b["data"] = datab
- } else if datab[0] != byte('"') {
- b["data"] = []byte(strconv.QuoteToASCII(string(datab)))
+ // data is passed in as an encoded []byte. That slice might be any
+ // number of things but for json encoding of the envelope all we care
+ // is if the payload is either a string or a json object. If it is a
+ // json object, it can be inserted into the body without modification.
+ // Otherwise we need to quote it if not already quoted.
+ mediaType, err := ctx.GetDataMediaType()
+ if err != nil {
+ return nil, err
+ }
+ isBase64 := ctx.GetDataContentEncoding() == cloudevents.Base64
+ isJson := mediaType == "" || mediaType == cloudevents.ApplicationJSON || mediaType == cloudevents.TextJSON
+ // TODO(#60): we do not support json values at the moment, only objects and lists.
+ if isJson && !isBase64 {
+ b["data"] = data
+ } else if data[0] != byte('"') {
+ b["data"] = []byte(strconv.QuoteToASCII(string(data)))
} else {
// already quoted
- b["data"] = datab
+ b["data"] = data
}
}
@@ -140,8 +159,9 @@ func obsJsonDecodeV01(body []byte) (*cloudevents.Event, error) {
}
return &cloudevents.Event{
- Context: ec,
- Data: data,
+ Context: &ec,
+ Data: data,
+ DataEncoded: true,
}, nil
}
@@ -175,8 +195,9 @@ func obsJsonDecodeV02(body []byte) (*cloudevents.Event, error) {
}
return &cloudevents.Event{
- Context: ec,
- Data: data,
+ Context: &ec,
+ Data: data,
+ DataEncoded: true,
}, nil
}
@@ -210,16 +231,13 @@ func obsJsonDecodeV03(body []byte) (*cloudevents.Event, error) {
}
return &cloudevents.Event{
- Context: ec,
- Data: data,
+ Context: &ec,
+ Data: data,
+ DataEncoded: true,
}, nil
}
func marshalEvent(event interface{}) ([]byte, error) {
- if b, ok := event.([]byte); ok {
- log.Printf("json.marshalEvent asked to encode bytes... wrong? %s", string(b))
- }
-
b, err := json.Marshal(event)
if err != nil {
return nil, err
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go
index 077ff2bd970..e4e0e17f2b7 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go
@@ -1,6 +1,7 @@
package cloudevents
const (
+ TextJSON = "text/json"
ApplicationJSON = "application/json"
ApplicationXML = "application/xml"
ApplicationCloudEventsJSON = "application/cloudevents+json"
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go
new file mode 100644
index 00000000000..38d46069664
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go
@@ -0,0 +1,42 @@
+package context
+
+import (
+ "context"
+ "go.uber.org/zap"
+)
+
+// Opaque key type used to store logger
+type loggerKeyType struct{}
+
+var loggerKey = loggerKeyType{}
+
+// fallbackLogger is the logger is used when there is no logger attached to the context.
+var fallbackLogger *zap.SugaredLogger
+
+func init() {
+ if logger, err := zap.NewProduction(); err != nil {
+ // We failed to create a fallback logger.
+ fallbackLogger = zap.NewNop().Sugar()
+ } else {
+ fallbackLogger = logger.Named("fallback").Sugar()
+ }
+}
+
+// WithLogger returns a new context with the logger injected into the given context.
+func WithLogger(ctx context.Context, logger *zap.SugaredLogger) context.Context {
+ if logger == nil {
+ return context.WithValue(ctx, loggerKey, fallbackLogger)
+ }
+ return context.WithValue(ctx, loggerKey, logger)
+}
+
+// LoggerFrom returns the logger stored in context.
+func LoggerFrom(ctx context.Context) *zap.SugaredLogger {
+ l := ctx.Value(loggerKey)
+ if l != nil {
+ if logger, ok := l.(*zap.SugaredLogger); ok {
+ return logger
+ }
+ }
+ return fallbackLogger
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go
new file mode 100644
index 00000000000..180102ee3fa
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go
@@ -0,0 +1,11 @@
+package cloudevents
+
+const (
+ Base64 = "base64"
+)
+
+// StringOfBase64 returns a string pointer to "Base64"
+func StringOfBase64() *string {
+ a := Base64
+ return &a
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go
index 8deeb9d6674..7b88e627ccd 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go
@@ -26,11 +26,15 @@ func init() {
AddDecoder("", json.Decode)
AddDecoder("application/json", json.Decode)
+ AddDecoder("text/json", json.Decode)
AddDecoder("application/xml", xml.Decode)
+ AddDecoder("text/xml", xml.Decode)
AddEncoder("", json.Encode)
AddEncoder("application/json", json.Encode)
+ AddEncoder("text/json", json.Encode)
AddEncoder("application/xml", xml.Encode)
+ AddEncoder("text/xml", xml.Encode)
}
// AddDecoder registers a decoder for a given content type. The codecs will use
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go
index 367c333b70d..0f81258d209 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go
@@ -4,41 +4,30 @@ import (
"bytes"
"encoding/json"
"fmt"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec"
- "sort"
"strings"
)
// Event represents the canonical representation of a CloudEvent.
type Event struct {
- Context EventContext
- Data interface{}
+ Context EventContext
+ Data interface{}
+ DataEncoded bool
}
-// DataAs attempts to populate the provided data object with the event payload.
-// data should be a pointer type.
-func (e Event) DataAs(data interface{}) error {
- return datacodec.Decode(e.Context.GetDataMediaType(), e.Data, data)
-}
-
-// SpecVersion returns Context.GetSpecVersion()
-func (e Event) SpecVersion() string {
- return e.Context.GetSpecVersion()
-}
-
-// Type returns Context.GetType()
-func (e Event) Type() string {
- return e.Context.GetType()
-}
-
-// Source returns Context.GetSource()
-func (e Event) Source() string {
- return e.Context.GetSource()
-}
+const (
+ defaultEventVersion = CloudEventsVersionV02
+)
-// SchemaURL returns Context.GetSchemaURL()
-func (e Event) SchemaURL() string {
- return e.Context.GetSchemaURL()
+// New returns a new Event, an optional version can be passed to change the
+// default spec version from 0.2 to the provided version.
+func New(version ...string) Event {
+ specVersion := defaultEventVersion // TODO: should there be a default? or set a default?
+ if len(version) >= 1 {
+ specVersion = version[0]
+ }
+ e := &Event{}
+ e.SetSpecVersion(specVersion)
+ return *e
}
// ExtensionAs returns Context.ExtensionAs(name, obj)
@@ -46,11 +35,6 @@ func (e Event) ExtensionAs(name string, obj interface{}) error {
return e.Context.ExtensionAs(name, obj)
}
-// DataContentType returns Context.getDataContentType()
-func (e Event) DataContentType() string {
- return e.Context.GetDataContentType()
-}
-
// Validate performs a spec based validation on this event.
// Validation is dependent on the spec version specified in the event context.
func (e Event) Validate() error {
@@ -83,90 +67,22 @@ func (e Event) String() string {
b.WriteString(fmt.Sprintf("Validation Error: \n%s\n", valid.Error()))
}
- b.WriteString("Context Attributes,\n")
-
- var extensions map[string]interface{}
-
- // TODO: This impl detail should be pushed into the impl structs.
-
- switch e.SpecVersion() {
- case CloudEventsVersionV01:
- if ec, ok := e.Context.(EventContextV01); ok {
- b.WriteString(" cloudEventsVersion: " + ec.CloudEventsVersion + "\n")
- b.WriteString(" eventType: " + ec.EventType + "\n")
- if ec.EventTypeVersion != nil {
- b.WriteString(" eventTypeVersion: " + *ec.EventTypeVersion + "\n")
- }
- b.WriteString(" source: " + ec.Source.String() + "\n")
- b.WriteString(" eventID: " + ec.EventID + "\n")
- if ec.EventTime != nil {
- b.WriteString(" eventTime: " + ec.EventTime.String() + "\n")
- }
- if ec.SchemaURL != nil {
- b.WriteString(" schemaURL: " + ec.SchemaURL.String() + "\n")
- }
- if ec.ContentType != nil {
- b.WriteString(" contentType: " + *ec.ContentType + "\n")
- }
- extensions = ec.Extensions
- }
-
- case CloudEventsVersionV02:
- if ec, ok := e.Context.(EventContextV02); ok {
- b.WriteString(" specversion: " + ec.SpecVersion + "\n")
- b.WriteString(" type: " + ec.Type + "\n")
- b.WriteString(" source: " + ec.Source.String() + "\n")
- b.WriteString(" id: " + ec.ID + "\n")
- if ec.Time != nil {
- b.WriteString(" time: " + ec.Time.String() + "\n")
- }
- if ec.SchemaURL != nil {
- b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n")
- }
- if ec.ContentType != nil {
- b.WriteString(" contenttype: " + *ec.ContentType + "\n")
- }
- extensions = ec.Extensions
- }
-
- case CloudEventsVersionV03:
- if ec, ok := e.Context.(EventContextV03); ok {
- b.WriteString(" specversion: " + ec.SpecVersion + "\n")
- b.WriteString(" type: " + ec.Type + "\n")
- b.WriteString(" source: " + ec.Source.String() + "\n")
- b.WriteString(" id: " + ec.ID + "\n")
- if ec.Time != nil {
- b.WriteString(" time: " + ec.Time.String() + "\n")
- }
- if ec.SchemaURL != nil {
- b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n")
- }
- if ec.DataContentType != nil {
- b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n")
- }
- extensions = ec.Extensions
- }
- default:
- b.WriteString(e.String() + "\n")
- }
-
- if extensions != nil && len(extensions) > 0 {
- b.WriteString("Extensions,\n")
- keys := make([]string, 0, len(extensions))
- for k := range extensions {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, key := range keys {
- b.WriteString(fmt.Sprintf(" %s: %v\n", key, extensions[key]))
- }
- }
+ b.WriteString(e.Context.String())
if e.Data != nil {
b.WriteString("Data,\n ")
if strings.HasPrefix(e.DataContentType(), "application/json") {
var prettyJSON bytes.Buffer
- err := json.Indent(&prettyJSON, e.Data.([]byte), " ", " ")
+
+ data, ok := e.Data.([]byte)
+ if !ok {
+ var err error
+ data, err = json.Marshal(e.Data)
+ if err != nil {
+ data = []byte(err.Error())
+ }
+ }
+ err := json.Indent(&prettyJSON, data, " ", " ")
if err != nil {
b.Write(e.Data.([]byte))
} else {
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go
new file mode 100644
index 00000000000..d8929ccde15
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go
@@ -0,0 +1,96 @@
+package cloudevents
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec"
+ "strconv"
+)
+
+// Data is special. Break it out into it's own file.
+
+// SetData implements EventWriter.SetData
+func (e *Event) SetData(obj interface{}) error {
+ data, err := datacodec.Encode(e.DataMediaType(), obj)
+ if err != nil {
+ return err
+ }
+ if e.DataContentEncoding() == Base64 {
+ buf := make([]byte, base64.StdEncoding.EncodedLen(len(data)))
+ base64.StdEncoding.Encode(buf, data)
+ e.Data = string(buf)
+ } else {
+ e.Data = data
+ }
+ e.DataEncoded = true
+ return nil
+}
+
+func (e *Event) DataBytes() ([]byte, error) {
+ if !e.DataEncoded {
+ if err := e.SetData(e.Data); err != nil {
+ return nil, err
+ }
+ }
+
+ b, ok := e.Data.([]byte)
+ if !ok {
+ if s, ok := e.Data.(string); ok {
+ b = []byte(s)
+ } else {
+ return nil, errors.New("data was not a byte slice or string")
+ }
+ }
+ return b, nil
+}
+
+const (
+ quotes = `"'`
+)
+
+// DataAs attempts to populate the provided data object with the event payload.
+// data should be a pointer type.
+func (e Event) DataAs(data interface{}) error { // TODO: Clean this function up
+ if e.Data == nil {
+ return nil
+ }
+ obj, ok := e.Data.([]byte)
+ if !ok {
+ if s, ok := e.Data.(string); ok {
+ obj = []byte(s)
+ } else {
+ return errors.New("data was not a byte slice or string")
+ }
+ }
+ if len(obj) == 0 {
+ // no data.
+ return nil
+ }
+ if e.Context.GetDataContentEncoding() == Base64 {
+ var bs []byte
+ // test to see if we need to unquote the data.
+ if obj[0] == quotes[0] || obj[0] == quotes[1] {
+ str, err := strconv.Unquote(string(obj))
+ if err != nil {
+ return err
+ }
+ bs = []byte(str)
+ } else {
+ bs = obj
+ }
+
+ buf := make([]byte, base64.StdEncoding.DecodedLen(len(bs)))
+ n, err := base64.StdEncoding.Decode(buf, bs)
+ if err != nil {
+ return fmt.Errorf("failed to decode data from base64: %s", err.Error())
+ }
+ obj = buf[:n]
+ }
+
+ mediaType, err := e.Context.GetDataMediaType()
+ if err != nil {
+ return err
+ }
+ return datacodec.Decode(mediaType, obj, data)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go
new file mode 100644
index 00000000000..da3890e9399
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go
@@ -0,0 +1,72 @@
+package cloudevents
+
+import (
+ "time"
+)
+
+// EventWriter is the interface for reading through an event from attributes.
+type EventReader interface {
+ // SpecVersion returns event.Context.GetSpecVersion().
+ SpecVersion() string
+ // Type returns event.Context.GetType().
+ Type() string
+ // Source returns event.Context.GetSource().
+ Source() string
+ // Subject returns event.Context.GetSubject().
+ Subject() string
+ // ID returns event.Context.GetID().
+ ID() string
+ // Time returns event.Context.GetTime().
+ Time() time.Time
+ // SchemaURL returns event.Context.GetSchemaURL().
+ SchemaURL() string
+ // DataContentType returns event.Context.GetDataContentType().
+ DataContentType() string
+ // DataMediaType returns event.Context.GetDataMediaType().
+ DataMediaType() string
+ // DataContentEncoding returns event.Context.GetDataContentEncoding().
+ DataContentEncoding() string
+
+ // Extension Attributes
+
+ // ExtensionAs returns event.Context.ExtensionAs(name, obj).
+ ExtensionAs(string, interface{}) error
+
+ // Data Attribute
+
+ // ExtensionAs returns event.Context.ExtensionAs(name, obj).
+ DataAs(interface{}) error
+}
+
+// EventWriter is the interface for writing through an event onto attributes.
+// If an error is thrown by a sub-component, EventWriter panics.
+type EventWriter interface {
+ // Context Attributes
+
+ // SetSpecVersion performs event.Context.SetSpecVersion.
+ SetSpecVersion(string)
+ // SetType performs event.Context.SetType.
+ SetType(string)
+ // SetSource performs event.Context.SetSource.
+ SetSource(string)
+ // SetSubject( performs event.Context.SetSubject.
+ SetSubject(string)
+ // SetID performs event.Context.SetID.
+ SetID(string)
+ // SetTime performs event.Context.SetTime.
+ SetTime(time.Time)
+ // SetSchemaURL performs event.Context.SetSchemaURL.
+ SetSchemaURL(string)
+ // SetDataContentType performs event.Context.SetDataContentType.
+ SetDataContentType(string)
+ // SetDataContentEncoding performs event.Context.SetDataContentEncoding.
+ SetDataContentEncoding(string)
+
+ // Extension Attributes
+
+ // SetExtension performs event.Context.SetExtension.
+ SetExtension(string, interface{})
+
+ // SetData encodes the given payload with the current encoding settings.
+ SetData(interface{}) error
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go
new file mode 100644
index 00000000000..73beb626304
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go
@@ -0,0 +1,58 @@
+package cloudevents
+
+import (
+ "time"
+)
+
+var _ EventReader = (*Event)(nil)
+
+// SpecVersion implements EventReader.SpecVersion
+func (e Event) SpecVersion() string {
+ return e.Context.GetSpecVersion()
+}
+
+// Type implements EventReader.Type
+func (e Event) Type() string {
+ return e.Context.GetType()
+}
+
+// Source implements EventReader.Source
+func (e Event) Source() string {
+ return e.Context.GetSource()
+}
+
+// Subject implements EventReader.Subject
+func (e Event) Subject() string {
+ return e.Context.GetSubject()
+}
+
+// ID implements EventReader.ID
+func (e Event) ID() string {
+ return e.Context.GetID()
+}
+
+// Time implements EventReader.Time
+func (e Event) Time() time.Time {
+ return e.Context.GetTime()
+}
+
+// SchemaURL implements EventReader.SchemaURL
+func (e Event) SchemaURL() string {
+ return e.Context.GetSchemaURL()
+}
+
+// DataContentType implements EventReader.DataContentType
+func (e Event) DataContentType() string {
+ return e.Context.GetDataContentType()
+}
+
+// DataMediaType implements EventReader.DataMediaType
+func (e Event) DataMediaType() string {
+ mediaType, _ := e.Context.GetDataMediaType()
+ return mediaType
+}
+
+// DataContentEncoding implements EventReader.DataContentEncoding
+func (e Event) DataContentEncoding() string {
+ return e.Context.GetDataContentEncoding()
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go
new file mode 100644
index 00000000000..ce5b3e87673
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go
@@ -0,0 +1,92 @@
+package cloudevents
+
+import (
+ "fmt"
+ "time"
+)
+
+var _ EventWriter = (*Event)(nil)
+
+// SetSpecVersion implements EventWriter.SetSpecVersion
+func (e *Event) SetSpecVersion(v string) {
+ if e.Context == nil {
+ switch v {
+ case CloudEventsVersionV01:
+ e.Context = EventContextV01{}.AsV01()
+ case CloudEventsVersionV02:
+ e.Context = EventContextV02{}.AsV02()
+ case CloudEventsVersionV03:
+ e.Context = EventContextV03{}.AsV03()
+ default:
+ panic(fmt.Errorf("a valid spec version is required: [%s, %s, %s]",
+ CloudEventsVersionV01, CloudEventsVersionV02, CloudEventsVersionV03))
+ }
+ return
+ }
+ if err := e.Context.SetSpecVersion(v); err != nil {
+ panic(err)
+ }
+}
+
+// SetType implements EventWriter.SetType
+func (e *Event) SetType(t string) {
+ if err := e.Context.SetType(t); err != nil {
+ panic(err)
+ }
+}
+
+// SetSource implements EventWriter.SetSource
+func (e *Event) SetSource(s string) {
+ if err := e.Context.SetSource(s); err != nil {
+ panic(err)
+ }
+}
+
+// SetSubject implements EventWriter.SetSubject
+func (e *Event) SetSubject(s string) {
+ if err := e.Context.SetSubject(s); err != nil {
+ panic(err)
+ }
+}
+
+// SetID implements EventWriter.SetID
+func (e *Event) SetID(id string) {
+ if err := e.Context.SetID(id); err != nil {
+ panic(err)
+ }
+}
+
+// SetTime implements EventWriter.SetTime
+func (e *Event) SetTime(t time.Time) {
+ if err := e.Context.SetTime(t); err != nil {
+ panic(err)
+ }
+}
+
+// SetSchemaURL implements EventWriter.SetSchemaURL
+func (e *Event) SetSchemaURL(s string) {
+ if err := e.Context.SetSchemaURL(s); err != nil {
+ panic(err)
+ }
+}
+
+// SetDataContentType implements EventWriter.SetDataContentType
+func (e *Event) SetDataContentType(ct string) {
+ if err := e.Context.SetDataContentType(ct); err != nil {
+ panic(err)
+ }
+}
+
+// SetDataContentEncoding implements EventWriter.SetDataContentEncoding
+func (e *Event) SetDataContentEncoding(enc string) {
+ if err := e.Context.SetDataContentEncoding(enc); err != nil {
+ panic(err)
+ }
+}
+
+// SetDataContentEncoding implements EventWriter.SetDataContentEncoding
+func (e *Event) SetExtension(name string, obj interface{}) {
+ if err := e.Context.SetExtension(name, obj); err != nil {
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go
index b437bf79c9a..7354d25b1f3 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go
@@ -1,48 +1,105 @@
package cloudevents
-// EventContext is conical interface for a CloudEvents Context.
-type EventContext interface {
+import "time"
+
+// EventContextReader are the methods required to be a reader of context
+// attributes.
+type EventContextReader interface {
+ // GetSpecVersion returns the native CloudEvents Spec version of the event
+ // context.
+ GetSpecVersion() string
+ // GetType returns the CloudEvents type from the context.
+ GetType() string
+ // GetSource returns the CloudEvents source from the context.
+ GetSource() string
+ // GetSubject returns the CloudEvents subject from the context.
+ GetSubject() string
+ // GetID returns the CloudEvents ID from the context.
+ GetID() string
+ // GetTime returns the CloudEvents creation time from the context.
+ GetTime() time.Time
+ // GetSchemaURL returns the CloudEvents schema URL (if any) from the
+ // context.
+ GetSchemaURL() string
+ // GetDataContentType returns content type on the context.
+ GetDataContentType() string
+ // GetDataContentEncoding returns content encoding on the context.
+ GetDataContentEncoding() string
+
+ // GetDataMediaType returns the MIME media type for encoded data, which is
+ // needed by both encoding and decoding. This is a processed form of
+ // GetDataContentType and it may return an error.
+ GetDataMediaType() (string, error)
+
+ // ExtensionAs populates the given interface with the CloudEvents extension
+ // of the given name from the extension attributes. It returns an error if
+ // the extension does not exist, the extension's type does not match the
+ // provided type, or if the type is not a supported.
+ ExtensionAs(string, interface{}) error
+}
+
+// EventContextWriter are the methods required to be a writer of context
+// attributes.
+type EventContextWriter interface {
+ // SetSpecVersion sets the spec version of the context.
+ SetSpecVersion(string) error
+ // SetType sets the type of the context.
+ SetType(string) error
+ // SetSource sets the source of the context.
+ SetSource(string) error
+ // SetSubject sets the subject of the context.
+ SetSubject(string) error
+ // SetID sets the ID of the context.
+ SetID(string) error
+ // SetTime sets the time of the context.
+ SetTime(time time.Time) error
+ // SetSchemaURL sets the schema url of the context.
+ SetSchemaURL(string) error
+ // SetDataContentType sets the data content type of the context.
+ SetDataContentType(string) error
+ // SetDataContentEncoding sets the data context encoding of the context.
+ SetDataContentEncoding(string) error
+
+ // SetExtension sets the given interface onto the extension attributes
+ // determined by the provided name.
+ SetExtension(string, interface{}) error
+}
+
+type EventContextConverter interface {
// AsV01 provides a translation from whatever the "native" encoding of the
// CloudEvent was to the equivalent in v0.1 field names, moving fields to or
// from extensions as necessary.
- AsV01() EventContextV01
+ AsV01() *EventContextV01
// AsV02 provides a translation from whatever the "native" encoding of the
// CloudEvent was to the equivalent in v0.2 field names, moving fields to or
// from extensions as necessary.
- AsV02() EventContextV02
+ AsV02() *EventContextV02
// AsV03 provides a translation from whatever the "native" encoding of the
// CloudEvent was to the equivalent in v0.3 field names, moving fields to or
// from extensions as necessary.
- AsV03() EventContextV03
-
- // GetDataContentType returns content type on the context.
- GetDataContentType() string
-
- // GetDataMediaType returns the MIME media type for encoded data, which is
- // needed by both encoding and decoding.
- GetDataMediaType() string
-
- // GetSpecVersion returns the native CloudEvents Spec version of the event
- // context.
- GetSpecVersion() string
-
- // GetType returns the CloudEvents type from the context.
- GetType() string
+ AsV03() *EventContextV03
+}
- // GetSource returns the CloudEvents source from the context.
- GetSource() string
+// EventContext is conical interface for a CloudEvents Context.
+type EventContext interface {
+ // EventContextConverter allows for conversion between versions.
+ EventContextConverter
- // GetSchemaURL returns the CloudEvents schema URL (if any) from the context.
- GetSchemaURL() string
+ // EventContextReader adds methods for reading context.
+ EventContextReader
- // ExtensionAs populates 'obj' with the CloudEvents extension 'name' from the context.
- // It returns an error if the extension 'name' does not exist, the extension's type
- // does not match the 'obj' type, or if the 'obj' type is not a supported.
- ExtensionAs(name string, obj interface{}) error
+ // EventContextWriter adds methods for writing to context.
+ EventContextWriter
// Validate the event based on the specifics of the CloudEvents spec version
// represented by this event context.
Validate() error
+
+ // Clone clones the event context.
+ Clone() EventContext
+
+ // String returns a pretty-printed representation of the EventContext.
+ String() string
}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go
index e4293cfb1b0..b8f9ce570d5 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go
@@ -3,8 +3,7 @@ package cloudevents
import (
"fmt"
"github.com/cloudevents/sdk-go/pkg/cloudevents/types"
- "log"
- "mime"
+ "sort"
"strings"
)
@@ -41,54 +40,7 @@ type EventContextV01 struct {
// Adhere to EventContext
var _ EventContext = (*EventContextV01)(nil)
-// GetSpecVersion implements EventContext.GetSpecVersion
-func (ec EventContextV01) GetSpecVersion() string {
- if ec.CloudEventsVersion != "" {
- return ec.CloudEventsVersion
- }
- return CloudEventsVersionV01
-}
-
-// GetDataContentType implements EventContext.GetDataContentType
-func (ec EventContextV01) GetDataContentType() string {
- if ec.ContentType != nil {
- return *ec.ContentType
- }
- return ""
-}
-
-// GetDataMediaType implements EventContext.GetDataMediaType
-func (ec EventContextV01) GetDataMediaType() string {
- if ec.ContentType != nil {
- mediaType, _, err := mime.ParseMediaType(*ec.ContentType)
- if err != nil {
- log.Printf("failed to parse media type from ContentType: %s", err)
- return ""
- }
- return mediaType
- }
- return ""
-}
-
-// GetType implements EventContext.GetType
-func (ec EventContextV01) GetType() string {
- return ec.EventType
-}
-
-// GetSource implements EventContext.GetSource
-func (ec EventContextV01) GetSource() string {
- return ec.Source.String()
-}
-
-// GetSchemaURL implements EventContext.GetSchemaURL
-func (ec EventContextV01) GetSchemaURL() string {
- if ec.SchemaURL != nil {
- return ec.SchemaURL.String()
- }
- return ""
-}
-
-// ExtensionAs implements EventContext.ExtensionAs
+// ExtensionAs implements EventContextReader.ExtensionAs
func (ec EventContextV01) ExtensionAs(name string, obj interface{}) error {
value, ok := ec.Extensions[name]
if !ok {
@@ -109,21 +61,31 @@ func (ec EventContextV01) ExtensionAs(name string, obj interface{}) error {
}
// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context.
-func (ec *EventContextV01) SetExtension(name string, value interface{}) {
+func (ec *EventContextV01) SetExtension(name string, value interface{}) error {
if ec.Extensions == nil {
ec.Extensions = make(map[string]interface{})
}
- ec.Extensions[name] = value
+ if value == nil {
+ delete(ec.Extensions, name)
+ } else {
+ ec.Extensions[name] = value
+ }
+ return nil
}
-// AsV01 implements EventContext.AsV01
-func (ec EventContextV01) AsV01() EventContextV01 {
+// Clone implements EventContextConverter.Clone
+func (ec EventContextV01) Clone() EventContext {
+ return ec.AsV01()
+}
+
+// AsV01 implements EventContextConverter.AsV01
+func (ec EventContextV01) AsV01() *EventContextV01 {
ec.CloudEventsVersion = CloudEventsVersionV01
- return ec
+ return &ec
}
-// AsV02 implements EventContext.AsV02
-func (ec EventContextV01) AsV02() EventContextV02 {
+// AsV02 implements EventContextConverter.AsV02
+func (ec EventContextV01) AsV02() *EventContextV02 {
ret := EventContextV02{
SpecVersion: CloudEventsVersionV02,
Type: ec.EventType,
@@ -137,7 +99,7 @@ func (ec EventContextV01) AsV02() EventContextV02 {
// eventTypeVersion was retired in v0.2, so put it in an extension.
if ec.EventTypeVersion != nil {
- ret.Extensions["eventTypeVersion"] = *ec.EventTypeVersion
+ ret.SetExtension(EventTypeVersionKey, *ec.EventTypeVersion)
}
if ec.Extensions != nil {
for k, v := range ec.Extensions {
@@ -147,11 +109,11 @@ func (ec EventContextV01) AsV02() EventContextV02 {
if len(ret.Extensions) == 0 {
ret.Extensions = nil
}
- return ret
+ return &ret
}
-// AsV03 implements EventContext.AsV03
-func (ec EventContextV01) AsV03() EventContextV03 {
+// AsV03 implements EventContextConverter.AsV03
+func (ec EventContextV01) AsV03() *EventContextV03 {
ecv2 := ec.AsV02()
return ecv2.AsV03()
}
@@ -265,3 +227,41 @@ func (ec EventContextV01) Validate() error {
}
return nil
}
+
+// String returns a pretty-printed representation of the EventContext.
+func (ec EventContextV01) String() string {
+ b := strings.Builder{}
+
+ b.WriteString("Context Attributes,\n")
+
+ b.WriteString(" cloudEventsVersion: " + ec.CloudEventsVersion + "\n")
+ b.WriteString(" eventType: " + ec.EventType + "\n")
+ if ec.EventTypeVersion != nil {
+ b.WriteString(" eventTypeVersion: " + *ec.EventTypeVersion + "\n")
+ }
+ b.WriteString(" source: " + ec.Source.String() + "\n")
+ b.WriteString(" eventID: " + ec.EventID + "\n")
+ if ec.EventTime != nil {
+ b.WriteString(" eventTime: " + ec.EventTime.String() + "\n")
+ }
+ if ec.SchemaURL != nil {
+ b.WriteString(" schemaURL: " + ec.SchemaURL.String() + "\n")
+ }
+ if ec.ContentType != nil {
+ b.WriteString(" contentType: " + *ec.ContentType + "\n")
+ }
+
+ if ec.Extensions != nil && len(ec.Extensions) > 0 {
+ b.WriteString("Extensions,\n")
+ keys := make([]string, 0, len(ec.Extensions))
+ for k := range ec.Extensions {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key]))
+ }
+ }
+
+ return b.String()
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go
new file mode 100644
index 00000000000..f355c3677be
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go
@@ -0,0 +1,86 @@
+package cloudevents
+
+import (
+ "mime"
+ "time"
+)
+
+// Adhere to EventContextReader
+var _ EventContextReader = (*EventContextV01)(nil)
+
+// GetSpecVersion implements EventContextReader.GetSpecVersion
+func (ec EventContextV01) GetSpecVersion() string {
+ if ec.CloudEventsVersion != "" {
+ return ec.CloudEventsVersion
+ }
+ return CloudEventsVersionV01
+}
+
+// GetDataContentType implements EventContextReader.GetDataContentType
+func (ec EventContextV01) GetDataContentType() string {
+ if ec.ContentType != nil {
+ return *ec.ContentType
+ }
+ return ""
+}
+
+// GetDataMediaType implements EventContextReader.GetDataMediaType
+func (ec EventContextV01) GetDataMediaType() (string, error) {
+ if ec.ContentType != nil {
+ mediaType, _, err := mime.ParseMediaType(*ec.ContentType)
+ if err != nil {
+ return "", err
+ }
+ return mediaType, nil
+ }
+ return "", nil
+}
+
+// GetType implements EventContextReader.GetType
+func (ec EventContextV01) GetType() string {
+ return ec.EventType
+}
+
+// GetSource implements EventContextReader.GetSource
+func (ec EventContextV01) GetSource() string {
+ return ec.Source.String()
+}
+
+// GetSubject implements EventContextReader.GetSubject
+func (ec EventContextV01) GetSubject() string {
+ var sub string
+ if err := ec.ExtensionAs(SubjectKey, &sub); err != nil {
+ return ""
+ }
+ return sub
+}
+
+// GetID implements EventContextReader.GetID
+func (ec EventContextV01) GetID() string {
+ return ec.EventID
+}
+
+// GetTime implements EventContextReader.GetTime
+func (ec EventContextV01) GetTime() time.Time {
+ if ec.EventTime != nil {
+ return ec.EventTime.Time
+ }
+ return time.Time{}
+}
+
+// GetSchemaURL implements EventContextReader.GetSchemaURL
+func (ec EventContextV01) GetSchemaURL() string {
+ if ec.SchemaURL != nil {
+ return ec.SchemaURL.String()
+ }
+ return ""
+}
+
+// GetDataContentEncoding implements EventContextReader.GetDataContentEncoding
+func (ec EventContextV01) GetDataContentEncoding() string {
+ var enc string
+ if err := ec.ExtensionAs(DataContentEncodingKey, &enc); err != nil {
+ return ""
+ }
+ return enc
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go
new file mode 100644
index 00000000000..f3594815614
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go
@@ -0,0 +1,103 @@
+package cloudevents
+
+import (
+ "errors"
+ "fmt"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// Adhere to EventContextWriter
+var _ EventContextWriter = (*EventContextV01)(nil)
+
+// SetSpecVersion implements EventContextWriter.SetSpecVersion
+func (ec *EventContextV01) SetSpecVersion(v string) error {
+ if v != CloudEventsVersionV01 {
+ return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV01)
+ }
+ ec.CloudEventsVersion = CloudEventsVersionV01
+ return nil
+}
+
+// SetDataContentType implements EventContextWriter.SetDataContentType
+func (ec *EventContextV01) SetDataContentType(ct string) error {
+ ct = strings.TrimSpace(ct)
+ if ct == "" {
+ ec.ContentType = nil
+ } else {
+ ec.ContentType = &ct
+ }
+ return nil
+}
+
+// SetType implements EventContextWriter.SetType
+func (ec *EventContextV01) SetType(t string) error {
+ t = strings.TrimSpace(t)
+ ec.EventType = t
+ return nil
+}
+
+// SetSource implements EventContextWriter.SetSource
+func (ec *EventContextV01) SetSource(u string) error {
+ pu, err := url.Parse(u)
+ if err != nil {
+ return err
+ }
+ ec.Source = types.URLRef{URL: *pu}
+ return nil
+}
+
+// SetSubject implements EventContextWriter.SetSubject
+func (ec *EventContextV01) SetSubject(s string) error {
+ s = strings.TrimSpace(s)
+ if s == "" {
+ return ec.SetExtension(SubjectKey, nil)
+ }
+ return ec.SetExtension(SubjectKey, s)
+}
+
+// SetID implements EventContextWriter.SetID
+func (ec *EventContextV01) SetID(id string) error {
+ id = strings.TrimSpace(id)
+ if id == "" {
+ return errors.New("event id is required to be a non-empty string")
+ }
+ ec.EventID = id
+ return nil
+}
+
+// SetTime implements EventContextWriter.SetTime
+func (ec *EventContextV01) SetTime(t time.Time) error {
+ if t.IsZero() {
+ ec.EventTime = nil
+ } else {
+ ec.EventTime = &types.Timestamp{Time: t}
+ }
+ return nil
+}
+
+// SetSchemaURL implements EventContextWriter.SetSchemaURL
+func (ec *EventContextV01) SetSchemaURL(u string) error {
+ u = strings.TrimSpace(u)
+ if u == "" {
+ ec.SchemaURL = nil
+ return nil
+ }
+ pu, err := url.Parse(u)
+ if err != nil {
+ return err
+ }
+ ec.SchemaURL = &types.URLRef{URL: *pu}
+ return nil
+}
+
+// SetDataContentEncoding implements EventContextWriter.SetDataContentEncoding
+func (ec *EventContextV01) SetDataContentEncoding(e string) error {
+ e = strings.ToLower(strings.TrimSpace(e))
+ if e == "" {
+ return ec.SetExtension(DataContentEncodingKey, nil)
+ }
+ return ec.SetExtension(DataContentEncodingKey, e)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go
index 0aa961813f2..332dbff498c 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go
@@ -3,8 +3,7 @@ package cloudevents
import (
"fmt"
"github.com/cloudevents/sdk-go/pkg/cloudevents/types"
- "log"
- "mime"
+ "sort"
"strings"
)
@@ -38,53 +37,6 @@ type EventContextV02 struct {
// Adhere to EventContext
var _ EventContext = (*EventContextV02)(nil)
-// GetSpecVersion implements EventContext.GetSpecVersion
-func (ec EventContextV02) GetSpecVersion() string {
- if ec.SpecVersion != "" {
- return ec.SpecVersion
- }
- return CloudEventsVersionV02
-}
-
-// GetDataContentType implements EventContext.GetDataContentType
-func (ec EventContextV02) GetDataContentType() string {
- if ec.ContentType != nil {
- return *ec.ContentType
- }
- return ""
-}
-
-// GetDataMediaType implements EventContext.GetDataMediaType
-func (ec EventContextV02) GetDataMediaType() string {
- if ec.ContentType != nil {
- mediaType, _, err := mime.ParseMediaType(*ec.ContentType)
- if err != nil {
- log.Printf("failed to parse media type from ContentType: %s", err)
- return ""
- }
- return mediaType
- }
- return ""
-}
-
-// GetType implements EventContext.GetType
-func (ec EventContextV02) GetType() string {
- return ec.Type
-}
-
-// GetSource implements EventContext.GetSource
-func (ec EventContextV02) GetSource() string {
- return ec.Source.String()
-}
-
-// GetSchemaURL implements EventContext.GetSchemaURL
-func (ec EventContextV02) GetSchemaURL() string {
- if ec.SchemaURL != nil {
- return ec.SchemaURL.String()
- }
- return ""
-}
-
// ExtensionAs implements EventContext.ExtensionAs
func (ec EventContextV02) ExtensionAs(name string, obj interface{}) error {
value, ok := ec.Extensions[name]
@@ -106,15 +58,25 @@ func (ec EventContextV02) ExtensionAs(name string, obj interface{}) error {
}
// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context.
-func (ec *EventContextV02) SetExtension(name string, value interface{}) {
+func (ec *EventContextV02) SetExtension(name string, value interface{}) error {
if ec.Extensions == nil {
ec.Extensions = make(map[string]interface{})
}
- ec.Extensions[name] = value
+ if value == nil {
+ delete(ec.Extensions, name)
+ } else {
+ ec.Extensions[name] = value
+ }
+ return nil
+}
+
+// Clone implements EventContextConverter.Clone
+func (ec EventContextV02) Clone() EventContext {
+ return ec.AsV02()
}
-// AsV01 implements EventContext.AsV01
-func (ec EventContextV02) AsV01() EventContextV01 {
+// AsV01 implements EventContextConverter.AsV01
+func (ec EventContextV02) AsV01() *EventContextV01 {
ret := EventContextV01{
CloudEventsVersion: CloudEventsVersionV01,
EventID: ec.ID,
@@ -128,7 +90,7 @@ func (ec EventContextV02) AsV01() EventContextV01 {
for k, v := range ec.Extensions {
// eventTypeVersion was retired in v0.2
- if strings.EqualFold(k, "eventTypeVersion") {
+ if strings.EqualFold(k, EventTypeVersionKey) {
etv, ok := v.(string)
if ok && etv != "" {
ret.EventTypeVersion = &etv
@@ -140,17 +102,17 @@ func (ec EventContextV02) AsV01() EventContextV01 {
if len(ret.Extensions) == 0 {
ret.Extensions = nil
}
- return ret
+ return &ret
}
-// AsV02 implements EventContext.AsV02
-func (ec EventContextV02) AsV02() EventContextV02 {
+// AsV02 implements EventContextConverter.AsV02
+func (ec EventContextV02) AsV02() *EventContextV02 {
ec.SpecVersion = CloudEventsVersionV02
- return ec
+ return &ec
}
-// AsV03 implements EventContext.AsV03
-func (ec EventContextV02) AsV03() EventContextV03 {
+// AsV03 implements EventContextConverter.AsV03
+func (ec EventContextV02) AsV03() *EventContextV03 {
ret := EventContextV03{
SpecVersion: CloudEventsVersionV03,
ID: ec.ID,
@@ -159,9 +121,33 @@ func (ec EventContextV02) AsV03() EventContextV03 {
SchemaURL: ec.SchemaURL,
DataContentType: ec.ContentType,
Source: ec.Source,
- Extensions: ec.Extensions,
+ Extensions: make(map[string]interface{}),
+ }
+
+ for k, v := range ec.Extensions {
+ // Subject was introduced in 0.3
+ if strings.EqualFold(k, SubjectKey) {
+ sub, ok := v.(string)
+ if ok && sub != "" {
+ ret.Subject = &sub
+ }
+ continue
+ }
+ // DataContentEncoding was introduced in 0.3
+ if strings.EqualFold(k, DataContentEncodingKey) {
+ etv, ok := v.(string)
+ if ok && etv != "" {
+ ret.DataContentEncoding = &etv
+ }
+ continue
+ }
+ ret.Extensions[k] = v
}
- return ret
+ if len(ret.Extensions) == 0 {
+ ret.Extensions = nil
+ }
+
+ return &ret
}
// Validate returns errors based on requirements from the CloudEvents spec.
@@ -250,3 +236,38 @@ func (ec EventContextV02) Validate() error {
}
return nil
}
+
+// String returns a pretty-printed representation of the EventContext.
+func (ec EventContextV02) String() string {
+ b := strings.Builder{}
+
+ b.WriteString("Context Attributes,\n")
+
+ b.WriteString(" specversion: " + ec.SpecVersion + "\n")
+ b.WriteString(" type: " + ec.Type + "\n")
+ b.WriteString(" source: " + ec.Source.String() + "\n")
+ b.WriteString(" id: " + ec.ID + "\n")
+ if ec.Time != nil {
+ b.WriteString(" time: " + ec.Time.String() + "\n")
+ }
+ if ec.SchemaURL != nil {
+ b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n")
+ }
+ if ec.ContentType != nil {
+ b.WriteString(" contenttype: " + *ec.ContentType + "\n")
+ }
+
+ if ec.Extensions != nil && len(ec.Extensions) > 0 {
+ b.WriteString("Extensions,\n")
+ keys := make([]string, 0, len(ec.Extensions))
+ for k := range ec.Extensions {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key]))
+ }
+ }
+
+ return b.String()
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go
new file mode 100644
index 00000000000..72772eb5307
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go
@@ -0,0 +1,86 @@
+package cloudevents
+
+import (
+ "mime"
+ "time"
+)
+
+// Adhere to EventContextReader
+var _ EventContextReader = (*EventContextV02)(nil)
+
+// GetSpecVersion implements EventContextReader.GetSpecVersion
+func (ec EventContextV02) GetSpecVersion() string {
+ if ec.SpecVersion != "" {
+ return ec.SpecVersion
+ }
+ return CloudEventsVersionV02
+}
+
+// GetType implements EventContextReader.GetType
+func (ec EventContextV02) GetType() string {
+ return ec.Type
+}
+
+// GetSource implements EventContextReader.GetSource
+func (ec EventContextV02) GetSource() string {
+ return ec.Source.String()
+}
+
+// GetSubject implements EventContextReader.GetSubject
+func (ec EventContextV02) GetSubject() string {
+ var sub string
+ if err := ec.ExtensionAs(SubjectKey, &sub); err != nil {
+ return ""
+ }
+ return sub
+}
+
+// GetID implements EventContextReader.GetID
+func (ec EventContextV02) GetID() string {
+ return ec.ID
+}
+
+// GetTime implements EventContextReader.GetTime
+func (ec EventContextV02) GetTime() time.Time {
+ if ec.Time != nil {
+ return ec.Time.Time
+ }
+ return time.Time{}
+}
+
+// GetSchemaURL implements EventContextReader.GetSchemaURL
+func (ec EventContextV02) GetSchemaURL() string {
+ if ec.SchemaURL != nil {
+ return ec.SchemaURL.String()
+ }
+ return ""
+}
+
+// GetDataContentType implements EventContextReader.GetDataContentType
+func (ec EventContextV02) GetDataContentType() string {
+ if ec.ContentType != nil {
+ return *ec.ContentType
+ }
+ return ""
+}
+
+// GetDataMediaType implements EventContextReader.GetDataMediaType
+func (ec EventContextV02) GetDataMediaType() (string, error) {
+ if ec.ContentType != nil {
+ mediaType, _, err := mime.ParseMediaType(*ec.ContentType)
+ if err != nil {
+ return "", err
+ }
+ return mediaType, nil
+ }
+ return "", nil
+}
+
+// GetDataContentEncoding implements EventContextReader.GetDataContentEncoding
+func (ec EventContextV02) GetDataContentEncoding() string {
+ var enc string
+ if err := ec.ExtensionAs(DataContentEncodingKey, &enc); err != nil {
+ return ""
+ }
+ return enc
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go
new file mode 100644
index 00000000000..a67cff01e66
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go
@@ -0,0 +1,103 @@
+package cloudevents
+
+import (
+ "errors"
+ "fmt"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// Adhere to EventContextWriter
+var _ EventContextWriter = (*EventContextV02)(nil)
+
+// SetSpecVersion implements EventContextWriter.SetSpecVersion
+func (ec *EventContextV02) SetSpecVersion(v string) error {
+ if v != CloudEventsVersionV02 {
+ return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV02)
+ }
+ ec.SpecVersion = CloudEventsVersionV02
+ return nil
+}
+
+// SetDataContentType implements EventContextWriter.SetDataContentType
+func (ec *EventContextV02) SetDataContentType(ct string) error {
+ ct = strings.TrimSpace(ct)
+ if ct == "" {
+ ec.ContentType = nil
+ } else {
+ ec.ContentType = &ct
+ }
+ return nil
+}
+
+// SetType implements EventContextWriter.SetType
+func (ec *EventContextV02) SetType(t string) error {
+ t = strings.TrimSpace(t)
+ ec.Type = t
+ return nil
+}
+
+// SetSource implements EventContextWriter.SetSource
+func (ec *EventContextV02) SetSource(u string) error {
+ pu, err := url.Parse(u)
+ if err != nil {
+ return err
+ }
+ ec.Source = types.URLRef{URL: *pu}
+ return nil
+}
+
+// SetSubject implements EventContextWriter.SetSubject
+func (ec *EventContextV02) SetSubject(s string) error {
+ s = strings.TrimSpace(s)
+ if s == "" {
+ return ec.SetExtension(SubjectKey, nil)
+ }
+ return ec.SetExtension(SubjectKey, s)
+}
+
+// SetID implements EventContextWriter.SetID
+func (ec *EventContextV02) SetID(id string) error {
+ id = strings.TrimSpace(id)
+ if id == "" {
+ return errors.New("id is required to be a non-empty string")
+ }
+ ec.ID = id
+ return nil
+}
+
+// SetTime implements EventContextWriter.SetTime
+func (ec *EventContextV02) SetTime(t time.Time) error {
+ if t.IsZero() {
+ ec.Time = nil
+ } else {
+ ec.Time = &types.Timestamp{Time: t}
+ }
+ return nil
+}
+
+// SetSchemaURL implements EventContextWriter.SetSchemaURL
+func (ec *EventContextV02) SetSchemaURL(u string) error {
+ u = strings.TrimSpace(u)
+ if u == "" {
+ ec.SchemaURL = nil
+ return nil
+ }
+ pu, err := url.Parse(u)
+ if err != nil {
+ return err
+ }
+ ec.SchemaURL = &types.URLRef{URL: *pu}
+ return nil
+}
+
+// SetDataContentEncoding implements EventContextWriter.SetDataContentEncoding
+func (ec *EventContextV02) SetDataContentEncoding(e string) error {
+ e = strings.ToLower(strings.TrimSpace(e))
+ if e == "" {
+ return ec.SetExtension(DataContentEncodingKey, nil)
+ }
+ return ec.SetExtension(DataContentEncodingKey, e)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go
index cbb923d1dcd..67af3a00e7a 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go
@@ -3,8 +3,7 @@ package cloudevents
import (
"fmt"
"github.com/cloudevents/sdk-go/pkg/cloudevents/types"
- "log"
- "mime"
+ "sort"
"strings"
)
@@ -24,6 +23,9 @@ type EventContextV03 struct {
Type string `json:"type"`
// Source - A URI describing the event producer.
Source types.URLRef `json:"source"`
+ // Subject - The subject of the event in the context of the event producer
+ // (identified by `source`).
+ Subject *string `json:"subject,omitempty"`
// ID of the event; must be non-empty and unique within the scope of the producer.
ID string `json:"id"`
// Time - A Timestamp when the event happened.
@@ -33,6 +35,8 @@ type EventContextV03 struct {
// GetDataMediaType - A MIME (RFC2046) string describing the media type of `data`.
// TODO: Should an empty string assume `application/json`, `application/octet-stream`, or auto-detect the content?
DataContentType *string `json:"datacontenttype,omitempty"`
+ // DataContentEncoding describes the content encoding for the `data` attribute. Valid: nil, `Base64`.
+ DataContentEncoding *string `json:"datacontentencoding,omitempty"`
// Extensions - Additional extension metadata beyond the base spec.
Extensions map[string]interface{} `json:"-,omitempty"` // TODO: decide how we want extensions to be inserted
}
@@ -40,53 +44,6 @@ type EventContextV03 struct {
// Adhere to EventContext
var _ EventContext = (*EventContextV03)(nil)
-// GetSpecVersion implements EventContext.GetSpecVersion
-func (ec EventContextV03) GetSpecVersion() string {
- if ec.SpecVersion != "" {
- return ec.SpecVersion
- }
- return CloudEventsVersionV03
-}
-
-// GetDataContentType implements EventContext.GetDataContentType
-func (ec EventContextV03) GetDataContentType() string {
- if ec.DataContentType != nil {
- return *ec.DataContentType
- }
- return ""
-}
-
-// GetDataMediaType implements EventContext.GetDataMediaType
-func (ec EventContextV03) GetDataMediaType() string {
- if ec.DataContentType != nil {
- mediaType, _, err := mime.ParseMediaType(*ec.DataContentType)
- if err != nil {
- log.Printf("failed to parse media type from DataContentType: %s", err)
- return ""
- }
- return mediaType
- }
- return ""
-}
-
-// GetType implements EventContext.GetType
-func (ec EventContextV03) GetType() string {
- return ec.Type
-}
-
-// GetSource implements EventContext.GetSource
-func (ec EventContextV03) GetSource() string {
- return ec.Source.String()
-}
-
-// GetSchemaURL implements EventContext.GetSchemaURL
-func (ec EventContextV03) GetSchemaURL() string {
- if ec.SchemaURL != nil {
- return ec.SchemaURL.String()
- }
- return ""
-}
-
// ExtensionAs implements EventContext.ExtensionAs
func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error {
value, ok := ec.Extensions[name]
@@ -108,21 +65,31 @@ func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error {
}
// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context.
-func (ec *EventContextV03) SetExtension(name string, value interface{}) {
+func (ec *EventContextV03) SetExtension(name string, value interface{}) error {
if ec.Extensions == nil {
ec.Extensions = make(map[string]interface{})
}
- ec.Extensions[name] = value
+ if value == nil {
+ delete(ec.Extensions, name)
+ } else {
+ ec.Extensions[name] = value
+ }
+ return nil
}
-// AsV01 implements EventContext.AsV01
-func (ec EventContextV03) AsV01() EventContextV01 {
+// Clone implements EventContextConverter.Clone
+func (ec EventContextV03) Clone() EventContext {
+ return ec.AsV03()
+}
+
+// AsV01 implements EventContextConverter.AsV01
+func (ec EventContextV03) AsV01() *EventContextV01 {
ecv2 := ec.AsV02()
return ecv2.AsV01()
}
-// AsV02 implements EventContext.AsV02
-func (ec EventContextV03) AsV02() EventContextV02 {
+// AsV02 implements EventContextConverter.AsV02
+func (ec EventContextV03) AsV02() *EventContextV02 {
ret := EventContextV02{
SpecVersion: CloudEventsVersionV02,
ID: ec.ID,
@@ -131,20 +98,38 @@ func (ec EventContextV03) AsV02() EventContextV02 {
SchemaURL: ec.SchemaURL,
ContentType: ec.DataContentType,
Source: ec.Source,
- Extensions: ec.Extensions,
+ Extensions: make(map[string]interface{}),
+ }
+ // Subject was introduced in 0.3, so put it in an extension for 0.2.
+ if ec.Subject != nil {
+ ret.SetExtension(SubjectKey, *ec.Subject)
+ }
+ // DataContentEncoding was introduced in 0.3, so put it in an extension for 0.2.
+ if ec.DataContentEncoding != nil {
+ ret.SetExtension(DataContentEncodingKey, *ec.DataContentEncoding)
+ }
+ if ec.Extensions != nil {
+ for k, v := range ec.Extensions {
+ ret.Extensions[k] = v
+ }
+ }
+ if len(ret.Extensions) == 0 {
+ ret.Extensions = nil
}
- return ret
+ return &ret
}
-// AsV03 implements EventContext.AsV03
-func (ec EventContextV03) AsV03() EventContextV03 {
+// AsV03 implements EventContextConverter.AsV03
+func (ec EventContextV03) AsV03() *EventContextV03 {
ec.SpecVersion = CloudEventsVersionV03
- return ec
+ return &ec
}
// Validate returns errors based on requirements from the CloudEvents spec.
// For more details, see https://github.com/cloudevents/spec/blob/master/spec.md
// As of Feb 26, 2019, commit 17c32ea26baf7714ad027d9917d03d2fff79fc7e
+// + https://github.com/cloudevents/spec/pull/387 -> datacontentencoding
+// + https://github.com/cloudevents/spec/pull/406 -> subject
func (ec EventContextV03) Validate() error {
errors := []string(nil)
@@ -178,6 +163,18 @@ func (ec EventContextV03) Validate() error {
errors = append(errors, "source: REQUIRED")
}
+ // subject
+ // Type: String
+ // Constraints:
+ // OPTIONAL
+ // MUST be a non-empty string
+ if ec.Subject != nil {
+ subject := strings.TrimSpace(*ec.Subject)
+ if subject == "" {
+ errors = append(errors, "subject: if present, MUST be a non-empty string")
+ }
+ }
+
// id
// Type: String
// Constraints:
@@ -224,8 +221,63 @@ func (ec EventContextV03) Validate() error {
}
}
+ // datacontentencoding
+ // Type: String per RFC 2045 Section 6.1
+ // Constraints:
+ // The attribute MUST be set if the data attribute contains string-encoded binary data.
+ // Otherwise the attribute MUST NOT be set.
+ // If present, MUST adhere to RFC 2045 Section 6.1
+ if ec.DataContentEncoding != nil {
+ dataContentEncoding := strings.ToLower(strings.TrimSpace(*ec.DataContentEncoding))
+ if dataContentEncoding != Base64 {
+ // TODO: need to test for RFC 2046
+ errors = append(errors, "datacontentencoding: if present, MUST adhere to RFC 2045 Section 6.1")
+ }
+ }
+
if len(errors) > 0 {
return fmt.Errorf(strings.Join(errors, "\n"))
}
return nil
}
+
+// String returns a pretty-printed representation of the EventContext.
+func (ec EventContextV03) String() string {
+ b := strings.Builder{}
+
+ b.WriteString("Context Attributes,\n")
+
+ b.WriteString(" specversion: " + ec.SpecVersion + "\n")
+ b.WriteString(" type: " + ec.Type + "\n")
+ b.WriteString(" source: " + ec.Source.String() + "\n")
+ if ec.Subject != nil {
+ b.WriteString(" subject: " + *ec.Subject + "\n")
+ }
+ b.WriteString(" id: " + ec.ID + "\n")
+ if ec.Time != nil {
+ b.WriteString(" time: " + ec.Time.String() + "\n")
+ }
+ if ec.SchemaURL != nil {
+ b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n")
+ }
+ if ec.DataContentType != nil {
+ b.WriteString(" datacontenttype: " + *ec.DataContentType + "\n")
+ }
+ if ec.DataContentEncoding != nil {
+ b.WriteString(" datacontentencoding: " + *ec.DataContentEncoding + "\n")
+ }
+
+ if ec.Extensions != nil && len(ec.Extensions) > 0 {
+ b.WriteString("Extensions,\n")
+ keys := make([]string, 0, len(ec.Extensions))
+ for k := range ec.Extensions {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, key := range keys {
+ b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key]))
+ }
+ }
+
+ return b.String()
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go
new file mode 100644
index 00000000000..0aa8d74a384
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go
@@ -0,0 +1,81 @@
+package cloudevents
+
+import (
+ "mime"
+ "time"
+)
+
+// GetSpecVersion implements EventContextReader.GetSpecVersion
+func (ec EventContextV03) GetSpecVersion() string {
+ if ec.SpecVersion != "" {
+ return ec.SpecVersion
+ }
+ return CloudEventsVersionV03
+}
+
+// GetDataContentType implements EventContextReader.GetDataContentType
+func (ec EventContextV03) GetDataContentType() string {
+ if ec.DataContentType != nil {
+ return *ec.DataContentType
+ }
+ return ""
+}
+
+// GetDataMediaType implements EventContextReader.GetDataMediaType
+func (ec EventContextV03) GetDataMediaType() (string, error) {
+ if ec.DataContentType != nil {
+ mediaType, _, err := mime.ParseMediaType(*ec.DataContentType)
+ if err != nil {
+ return "", err
+ }
+ return mediaType, nil
+ }
+ return "", nil
+}
+
+// GetType implements EventContextReader.GetType
+func (ec EventContextV03) GetType() string {
+ return ec.Type
+}
+
+// GetSource implements EventContextReader.GetSource
+func (ec EventContextV03) GetSource() string {
+ return ec.Source.String()
+}
+
+// GetSubject implements EventContextReader.GetSubject
+func (ec EventContextV03) GetSubject() string {
+ if ec.Subject != nil {
+ return *ec.Subject
+ }
+ return ""
+}
+
+// GetTime implements EventContextReader.GetTime
+func (ec EventContextV03) GetTime() time.Time {
+ if ec.Time != nil {
+ return ec.Time.Time
+ }
+ return time.Time{}
+}
+
+// GetID implements EventContextReader.GetID
+func (ec EventContextV03) GetID() string {
+ return ec.ID
+}
+
+// GetSchemaURL implements EventContextReader.GetSchemaURL
+func (ec EventContextV03) GetSchemaURL() string {
+ if ec.SchemaURL != nil {
+ return ec.SchemaURL.String()
+ }
+ return ""
+}
+
+// GetDataContentEncoding implements EventContextReader.GetDataContentEncoding
+func (ec EventContextV03) GetDataContentEncoding() string {
+ if ec.DataContentEncoding != nil {
+ return *ec.DataContentEncoding
+ }
+ return ""
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go
new file mode 100644
index 00000000000..16a06803043
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go
@@ -0,0 +1,107 @@
+package cloudevents
+
+import (
+ "errors"
+ "fmt"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// Adhere to EventContextWriter
+var _ EventContextWriter = (*EventContextV03)(nil)
+
+// SetSpecVersion implements EventContextWriter.SetSpecVersion
+func (ec *EventContextV03) SetSpecVersion(v string) error {
+ if v != CloudEventsVersionV03 {
+ return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV03)
+ }
+ ec.SpecVersion = CloudEventsVersionV03
+ return nil
+}
+
+// SetDataContentType implements EventContextWriter.SetDataContentType
+func (ec *EventContextV03) SetDataContentType(ct string) error {
+ ct = strings.TrimSpace(ct)
+ if ct == "" {
+ ec.DataContentType = nil
+ } else {
+ ec.DataContentType = &ct
+ }
+ return nil
+}
+
+// SetType implements EventContextWriter.SetType
+func (ec *EventContextV03) SetType(t string) error {
+ t = strings.TrimSpace(t)
+ ec.Type = t
+ return nil
+}
+
+// SetSource implements EventContextWriter.SetSource
+func (ec *EventContextV03) SetSource(u string) error {
+ pu, err := url.Parse(u)
+ if err != nil {
+ return err
+ }
+ ec.Source = types.URLRef{URL: *pu}
+ return nil
+}
+
+// SetSubject implements EventContextWriter.SetSubject
+func (ec *EventContextV03) SetSubject(s string) error {
+ s = strings.TrimSpace(s)
+ if s == "" {
+ ec.Subject = nil
+ } else {
+ ec.Subject = &s
+ }
+ return nil
+}
+
+// SetID implements EventContextWriter.SetID
+func (ec *EventContextV03) SetID(id string) error {
+ id = strings.TrimSpace(id)
+ if id == "" {
+ return errors.New("id is required to be a non-empty string")
+ }
+ ec.ID = id
+ return nil
+}
+
+// SetTime implements EventContextWriter.SetTime
+func (ec *EventContextV03) SetTime(t time.Time) error {
+ if t.IsZero() {
+ ec.Time = nil
+ } else {
+ ec.Time = &types.Timestamp{Time: t}
+ }
+ return nil
+}
+
+// SetSchemaURL implements EventContextWriter.SetSchemaURL
+func (ec *EventContextV03) SetSchemaURL(u string) error {
+ u = strings.TrimSpace(u)
+ if u == "" {
+ ec.SchemaURL = nil
+ return nil
+ }
+ pu, err := url.Parse(u)
+ if err != nil {
+ return err
+ }
+ ec.SchemaURL = &types.URLRef{URL: *pu}
+ return nil
+}
+
+// SetDataContentEncoding implements EventContextWriter.SetDataContentEncoding
+func (ec *EventContextV03) SetDataContentEncoding(e string) error {
+ e = strings.ToLower(strings.TrimSpace(e))
+ if e == "" {
+ ec.DataContentEncoding = nil
+ } else {
+ ec.DataContentEncoding = &e
+ }
+ return nil
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go
new file mode 100644
index 00000000000..e33205fc8b5
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go
@@ -0,0 +1,13 @@
+package cloudevents
+
+const (
+ // DataContentEncodingKey is the key to DataContentEncoding for versions that do not support data content encoding
+ // directly.
+ DataContentEncodingKey = "datacontentencoding"
+
+ // EventTypeVersionKey is the key to EventTypeVersion for versions that do not support event type version directly.
+ EventTypeVersionKey = "eventTypeVersion"
+
+ // SubjectKey is the key to Subject for versions that do not support subject directly.
+ SubjectKey = "subject"
+)
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go
index f6faf3c49f1..4ef207ccb05 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go
@@ -131,7 +131,7 @@ func (c *Codec) Decode(msg transport.Message) (*cloudevents.Event, error) {
return c.convertEvent(event), nil
}
default:
- return nil, fmt.Errorf("unknown encoding for message %v", msg)
+ return nil, fmt.Errorf("unknown encoding")
}
}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go
index d8171370c2c..93eb3539c9d 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go
@@ -9,7 +9,6 @@ import (
"github.com/cloudevents/sdk-go/pkg/cloudevents/observability"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport"
"github.com/cloudevents/sdk-go/pkg/cloudevents/types"
- "log"
"net/http"
"net/textproto"
"strings"
@@ -69,7 +68,7 @@ func (v CodecV01) obsDecode(msg transport.Message) (*cloudevents.Event, error) {
case StructuredV01:
return v.decodeStructured(msg)
default:
- return nil, fmt.Errorf("unknown encoding for message %v", msg)
+ return nil, fmt.Errorf("unknown encoding")
}
}
@@ -79,9 +78,9 @@ func (v CodecV01) encodeBinary(e cloudevents.Event) (transport.Message, error) {
return nil, err
}
- body, err := marshalEventData(e.Context.GetDataMediaType(), e.Data)
+ body, err := e.DataBytes()
if err != nil {
- return nil, err
+ panic("encode")
}
msg := &Message{
@@ -92,7 +91,7 @@ func (v CodecV01) encodeBinary(e cloudevents.Event) (transport.Message, error) {
return msg, nil
}
-func (v CodecV01) toHeaders(ec cloudevents.EventContextV01) (http.Header, error) {
+func (v CodecV01) toHeaders(ec *cloudevents.EventContextV01) (http.Header, error) {
// Preserve case in v0.1, even though HTTP headers are case-insensitive.
h := http.Header{}
h["CE-CloudEventsVersion"] = []string{ec.CloudEventsVersion}
@@ -161,8 +160,9 @@ func (v CodecV01) decodeBinary(msg transport.Message) (*cloudevents.Event, error
body = m.Body
}
return &cloudevents.Event{
- Context: ctx,
- Data: body,
+ Context: &ctx,
+ Data: body,
+ DataEncoded: true,
}, nil
}
@@ -171,25 +171,31 @@ func (v CodecV01) fromHeaders(h http.Header) (cloudevents.EventContextV01, error
for k, v := range h {
ck := textproto.CanonicalMIMEHeaderKey(k)
if k != ck {
- log.Printf("[warn] received header with non-canonical form; canonical: %q, got %q", ck, k)
h[ck] = v
}
}
ec := cloudevents.EventContextV01{}
ec.CloudEventsVersion = h.Get("CE-CloudEventsVersion")
+ h.Del("CE-CloudEventsVersion")
ec.EventID = h.Get("CE-EventID")
+ h.Del("CE-EventID")
ec.EventType = h.Get("CE-EventType")
+ h.Del("CE-EventType")
source := types.ParseURLRef(h.Get("CE-Source"))
+ h.Del("CE-Source")
if source != nil {
ec.Source = *source
}
ec.EventTime = types.ParseTimestamp(h.Get("CE-EventTime"))
+ h.Del("CE-EventTime")
etv := h.Get("CE-EventTypeVersion")
+ h.Del("CE-EventTypeVersion")
if etv != "" {
ec.EventTypeVersion = &etv
}
ec.SchemaURL = types.ParseURLRef(h.Get("CE-SchemaURL"))
+ h.Del("CE-SchemaURL")
et := h.Get("Content-Type")
ec.ContentType = &et
@@ -204,6 +210,7 @@ func (v CodecV01) fromHeaders(h http.Header) (cloudevents.EventContextV01, error
// If we can't unmarshal the data, treat it as a string.
extensions[key] = v[0]
}
+ h.Del(k)
}
}
if len(extensions) > 0 {
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go
index 3c04b9aabc1..1e1d9996d9b 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go
@@ -9,7 +9,6 @@ import (
"github.com/cloudevents/sdk-go/pkg/cloudevents/observability"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport"
"github.com/cloudevents/sdk-go/pkg/cloudevents/types"
- "log"
"net/http"
"net/textproto"
"strings"
@@ -69,7 +68,7 @@ func (v CodecV02) obsDecode(msg transport.Message) (*cloudevents.Event, error) {
case StructuredV02:
return v.decodeStructured(msg)
default:
- return nil, fmt.Errorf("unknown encoding for message %v", msg)
+ return nil, fmt.Errorf("unknown encoding")
}
}
@@ -78,8 +77,7 @@ func (v CodecV02) encodeBinary(e cloudevents.Event) (transport.Message, error) {
if err != nil {
return nil, err
}
-
- body, err := marshalEventData(e.Context.GetDataMediaType(), e.Data)
+ body, err := e.DataBytes()
if err != nil {
return nil, err
}
@@ -92,7 +90,7 @@ func (v CodecV02) encodeBinary(e cloudevents.Event) (transport.Message, error) {
return msg, nil
}
-func (v CodecV02) toHeaders(ec cloudevents.EventContextV02) (http.Header, error) {
+func (v CodecV02) toHeaders(ec *cloudevents.EventContextV02) (http.Header, error) {
h := http.Header{}
h.Set("ce-specversion", ec.SpecVersion)
h.Set("ce-type", ec.Type)
@@ -166,8 +164,9 @@ func (v CodecV02) decodeBinary(msg transport.Message) (*cloudevents.Event, error
body = m.Body
}
return &cloudevents.Event{
- Context: ctx,
- Data: body,
+ Context: &ctx,
+ Data: body,
+ DataEncoded: true,
}, nil
}
@@ -176,7 +175,6 @@ func (v CodecV02) fromHeaders(h http.Header) (cloudevents.EventContextV02, error
for k, v := range h {
ck := textproto.CanonicalMIMEHeaderKey(k)
if k != ck {
- log.Printf("[warn] received header with non-canonical form; canonical: %q, got %q", ck, k)
delete(h, k)
h[ck] = v
}
@@ -274,8 +272,9 @@ func (v CodecV02) decodeStructured(msg transport.Message) (*cloudevents.Event, e
}
return &cloudevents.Event{
- Context: ec,
- Data: data,
+ Context: &ec,
+ Data: data,
+ DataEncoded: true,
}, nil
}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go
index f2964ed1bb1..e932dc740bf 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go
@@ -4,15 +4,15 @@ import (
"context"
"encoding/json"
"fmt"
+ "net/http"
+ "net/textproto"
+ "strings"
+
"github.com/cloudevents/sdk-go/pkg/cloudevents"
"github.com/cloudevents/sdk-go/pkg/cloudevents/codec"
"github.com/cloudevents/sdk-go/pkg/cloudevents/observability"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport"
"github.com/cloudevents/sdk-go/pkg/cloudevents/types"
- "log"
- "net/http"
- "net/textproto"
- "strings"
)
// CodecV03 represents a http transport codec that uses CloudEvents spec v0.3
@@ -73,7 +73,7 @@ func (v CodecV03) obsDecode(msg transport.Message) (*cloudevents.Event, error) {
case BatchedV03:
return nil, fmt.Errorf("not implemented")
default:
- return nil, fmt.Errorf("unknown encoding for message %v", msg)
+ return nil, fmt.Errorf("unknown encoding")
}
}
@@ -83,7 +83,7 @@ func (v CodecV03) encodeBinary(e cloudevents.Event) (transport.Message, error) {
return nil, err
}
- body, err := marshalEventData(e.Context.GetDataMediaType(), e.Data)
+ body, err := e.DataBytes()
if err != nil {
return nil, err
}
@@ -96,11 +96,14 @@ func (v CodecV03) encodeBinary(e cloudevents.Event) (transport.Message, error) {
return msg, nil
}
-func (v CodecV03) toHeaders(ec cloudevents.EventContextV03) (http.Header, error) {
+func (v CodecV03) toHeaders(ec *cloudevents.EventContextV03) (http.Header, error) {
h := http.Header{}
h.Set("ce-specversion", ec.SpecVersion)
h.Set("ce-type", ec.Type)
h.Set("ce-source", ec.Source.String())
+ if ec.Subject != nil {
+ h.Set("ce-subject", *ec.Subject)
+ }
h.Set("ce-id", ec.ID)
if ec.Time != nil && !ec.Time.IsZero() {
h.Set("ce-time", ec.Time.String())
@@ -108,7 +111,6 @@ func (v CodecV03) toHeaders(ec cloudevents.EventContextV03) (http.Header, error)
if ec.SchemaURL != nil {
h.Set("ce-schemaurl", ec.SchemaURL.String())
}
-
if ec.DataContentType != nil {
h.Set("Content-Type", *ec.DataContentType)
} else if v.Encoding == Default || v.Encoding == BinaryV03 {
@@ -117,6 +119,10 @@ func (v CodecV03) toHeaders(ec cloudevents.EventContextV03) (http.Header, error)
// TODO: not sure what the default should be?
h.Set("Content-Type", cloudevents.ApplicationJSON)
}
+ if ec.DataContentEncoding != nil {
+ h.Set("ce-datacontentencoding", *ec.DataContentEncoding)
+ }
+
for k, v := range ec.Extensions {
// Per spec, map-valued extensions are converted to a list of headers as:
// CE-attrib-key
@@ -171,8 +177,9 @@ func (v CodecV03) decodeBinary(msg transport.Message) (*cloudevents.Event, error
body = m.Body
}
return &cloudevents.Event{
- Context: ctx,
- Data: body,
+ Context: &ctx,
+ Data: body,
+ DataEncoded: true,
}, nil
}
@@ -181,7 +188,6 @@ func (v CodecV03) fromHeaders(h http.Header) (cloudevents.EventContextV03, error
for k, v := range h {
ck := textproto.CanonicalMIMEHeaderKey(k)
if k != ck {
- log.Printf("[warn] received header with non-canonical form; canonical: %q, got %q", ck, k)
delete(h, k)
h[ck] = v
}
@@ -204,6 +210,12 @@ func (v CodecV03) fromHeaders(h http.Header) (cloudevents.EventContextV03, error
}
h.Del("ce-source")
+ subject := h.Get("ce-subject")
+ if subject != "" {
+ ec.Subject = &subject
+ }
+ h.Del("ce-subject")
+
ec.Time = types.ParseTimestamp(h.Get("ce-time"))
h.Del("ce-time")
@@ -216,6 +228,12 @@ func (v CodecV03) fromHeaders(h http.Header) (cloudevents.EventContextV03, error
}
h.Del("Content-Type")
+ dataContentEncoding := h.Get("ce-datacontentencoding")
+ if dataContentEncoding != "" {
+ ec.DataContentEncoding = &dataContentEncoding
+ }
+ h.Del("ce-datacontentencoding")
+
// At this point, we have deleted all the known headers.
// Everything left is assumed to be an extension.
@@ -279,8 +297,9 @@ func (v CodecV03) decodeStructured(msg transport.Message) (*cloudevents.Event, e
}
return &cloudevents.Event{
- Context: ec,
- Data: data,
+ Context: &ec,
+ Data: data,
+ DataEncoded: true,
}, nil
}
diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go
index 5deeb69e53c..258691c4815 100644
--- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go
+++ b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go
@@ -4,17 +4,19 @@ import (
"bytes"
"context"
"fmt"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/observability"
"io/ioutil"
- "log"
"net"
"net/http"
+ "strconv"
"strings"
"sync"
"time"
+ "go.uber.org/zap"
+
"github.com/cloudevents/sdk-go/pkg/cloudevents"
cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/observability"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport"
)
@@ -32,6 +34,7 @@ const (
type Transport struct {
// The encoding used to select the codec for outbound events.
Encoding Encoding
+
// DefaultEncodingSelectionFn allows for other encoding selection strategies to be injected.
DefaultEncodingSelectionFn EncodingSelector
@@ -45,7 +48,7 @@ type Transport struct {
// If nil, the Transport will create a one.
Client *http.Client
// Req is the base http request that is used for http.Do.
- // Only .Method, .URL, and .Header is considered.
+ // Only .Method, .URL, .Close, and .Header is considered.
// If not set, Req.Method defaults to POST.
// Req.URL or context.WithTarget(url) are required for sending.
Req *http.Request
@@ -93,15 +96,21 @@ func (t *Transport) applyOptions(opts ...Option) error {
return nil
}
-func (t *Transport) loadCodec() bool {
+func (t *Transport) loadCodec(ctx context.Context) bool {
if t.codec == nil {
t.crMu.Lock()
if t.DefaultEncodingSelectionFn != nil && t.Encoding != Default {
- log.Printf("[warn] Transport has a DefaultEncodingSelectionFn set but Encoding is not Default. DefaultEncodingSelectionFn will be ignored.")
- }
- t.codec = &Codec{
- Encoding: t.Encoding,
- DefaultEncodingSelectionFn: t.DefaultEncodingSelectionFn,
+ logger := cecontext.LoggerFrom(ctx)
+ logger.Warn("transport has a DefaultEncodingSelectionFn set but Encoding is not Default. DefaultEncodingSelectionFn will be ignored.")
+
+ t.codec = &Codec{
+ Encoding: t.Encoding,
+ }
+ } else {
+ t.codec = &Codec{
+ Encoding: t.Encoding,
+ DefaultEncodingSelectionFn: t.DefaultEncodingSelectionFn,
+ }
}
t.crMu.Unlock()
}
@@ -144,6 +153,7 @@ func (t *Transport) obsSend(ctx context.Context, event cloudevents.Event) (*clou
if t.Req != nil {
req.Method = t.Req.Method
req.URL = t.Req.URL
+ req.Close = t.Req.Close
copyHeaders(t.Req.Header, req.Header)
}
@@ -152,7 +162,7 @@ func (t *Transport) obsSend(ctx context.Context, event cloudevents.Event) (*clou
req.URL = target
}
- if ok := t.loadCodec(); !ok {
+ if ok := t.loadCodec(ctx); !ok {
return nil, fmt.Errorf("unknown encoding set on transport: %d", t.Encoding)
}
@@ -166,9 +176,9 @@ func (t *Transport) obsSend(ctx context.Context, event cloudevents.Event) (*clou
req.Body = ioutil.NopCloser(bytes.NewBuffer(m.Body))
req.ContentLength = int64(len(m.Body))
- req.Close = true
return httpDo(ctx, t.Client, &req, func(resp *http.Response, err error) (*cloudevents.Event, error) {
+ logger := cecontext.LoggerFrom(ctx)
if err != nil {
return nil, err
}
@@ -182,19 +192,19 @@ func (t *Transport) obsSend(ctx context.Context, event cloudevents.Event) (*clou
var respEvent *cloudevents.Event
if msg.CloudEventsVersion() != "" {
- if ok := t.loadCodec(); !ok {
+ if ok := t.loadCodec(ctx); !ok {
err := fmt.Errorf("unknown encoding set on transport: %d", t.Encoding)
- log.Printf("failed to load codec: %s", err)
+ logger.Error("failed to load codec", zap.Error(err))
}
if respEvent, err = t.codec.Decode(msg); err != nil {
- log.Printf("failed to decode message: %s %v", err, resp)
+ logger.Error("failed to decode message", zap.Error(err))
}
}
if accepted(resp) {
return respEvent, nil
}
- return respEvent, fmt.Errorf("error sending cloudevent: %s", status(resp))
+ return respEvent, fmt.Errorf("error sending cloudevent: %s", resp.Status)
})
}
return nil, fmt.Errorf("failed to encode Event into a Message")
@@ -290,16 +300,6 @@ func accepted(resp *http.Response) bool {
return false
}
-// status is a helper method to read the response of the target.
-func status(resp *http.Response) string {
- status := resp.Status
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return fmt.Sprintf("Status[%s] error reading response body: %v", status, err)
- }
- return fmt.Sprintf("Status[%s] %s", status, body)
-}
-
func (t *Transport) invokeReceiver(ctx context.Context, event cloudevents.Event) (*Response, error) {
ctx, r := observability.NewReporter(ctx, reportReceive)
resp, err := t.obsInvokeReceiver(ctx, event)
@@ -312,6 +312,7 @@ func (t *Transport) invokeReceiver(ctx context.Context, event cloudevents.Event)
}
func (t *Transport) obsInvokeReceiver(ctx context.Context, event cloudevents.Event) (*Response, error) {
+ logger := cecontext.LoggerFrom(ctx)
if t.Receiver != nil {
// Note: http does not use eventResp.Reason
eventResp := cloudevents.EventResponse{}
@@ -319,21 +320,21 @@ func (t *Transport) obsInvokeReceiver(ctx context.Context, event cloudevents.Eve
err := t.Receiver.Receive(ctx, event, &eventResp)
if err != nil {
- log.Printf("got an error from receiver fn: %s", err.Error())
+ logger.Warnw("got an error from receiver fn: %s", zap.Error(err))
resp.StatusCode = http.StatusInternalServerError
return &resp, err
}
if eventResp.Event != nil {
- if t.loadCodec() {
+ if t.loadCodec(ctx) {
if m, err := t.codec.Encode(*eventResp.Event); err != nil {
- log.Printf("failed to encode response from receiver fn: %s", err.Error())
+ logger.Errorw("failed to encode response from receiver fn", zap.Error(err))
} else if msg, ok := m.(*Message); ok {
resp.Header = msg.Header
resp.Body = msg.Body
}
} else {
- log.Printf("failed to load codec")
+ logger.Error("failed to load codec")
resp.StatusCode = http.StatusInternalServerError
return &resp, err
}
@@ -365,10 +366,11 @@ func (t *Transport) obsInvokeReceiver(ctx context.Context, event cloudevents.Eve
// ServeHTTP implements http.Handler
func (t *Transport) ServeHTTP(w http.ResponseWriter, req *http.Request) {
ctx, r := observability.NewReporter(req.Context(), reportServeHTTP)
+ logger := cecontext.LoggerFrom(ctx)
body, err := ioutil.ReadAll(req.Body)
if err != nil {
- log.Printf("failed to handle request: %s %v", err, req)
+ logger.Errorw("failed to handle request", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(`{"error":"Invalid request"}`))
r.Error()
@@ -379,9 +381,9 @@ func (t *Transport) ServeHTTP(w http.ResponseWriter, req *http.Request) {
Body: body,
}
- if ok := t.loadCodec(); !ok {
+ if ok := t.loadCodec(ctx); !ok {
err := fmt.Errorf("unknown encoding set on transport: %d", t.Encoding)
- log.Printf("failed to load codec: %s", err)
+ logger.Errorw("failed to load codec", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(fmt.Sprintf(`{"error":%q}`, err.Error())))
r.Error()
@@ -389,7 +391,7 @@ func (t *Transport) ServeHTTP(w http.ResponseWriter, req *http.Request) {
}
event, err := t.codec.Decode(msg)
if err != nil {
- log.Printf("failed to decode message: %s %v", err, req)
+ logger.Errorw("failed to decode message", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(fmt.Sprintf(`{"error":%q}`, err.Error())))
r.Error()
@@ -402,11 +404,13 @@ func (t *Transport) ServeHTTP(w http.ResponseWriter, req *http.Request) {
resp, err := t.invokeReceiver(ctx, *event)
if err != nil {
+ logger.Warnw("error returned from invokeReceiver", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(fmt.Sprintf(`{"error":%q}`, err.Error())))
r.Error()
return
}
+
if resp != nil {
if t.Req != nil {
copyHeaders(t.Req.Header, w.Header())
@@ -414,17 +418,18 @@ func (t *Transport) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if len(resp.Header) > 0 {
copyHeaders(resp.Header, w.Header())
}
- status := http.StatusAccepted
- if resp.StatusCode >= 200 && resp.StatusCode < 600 {
- status = resp.StatusCode
- }
- w.WriteHeader(status)
+ w.Header().Add("Content-Length", strconv.Itoa(len(resp.Body)))
if len(resp.Body) > 0 {
if _, err := w.Write(resp.Body); err != nil {
r.Error()
return
}
}
+ status := http.StatusAccepted
+ if resp.StatusCode >= 200 && resp.StatusCode < 600 {
+ status = resp.StatusCode
+ }
+ w.WriteHeader(status)
r.OK()
return
From 37107b20e9bd6e2d75fee82069c4374933cd596b Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 15 Apr 2019 10:46:13 -0700
Subject: [PATCH 15/76] Be specific when CloudEvents is assumed
When we have documentation and features that assume events will be
formatted according to the CloudEvents spec, say that clearly in the
docs.
---
pkg/apis/eventing/v1alpha1/trigger_types.go | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/pkg/apis/eventing/v1alpha1/trigger_types.go b/pkg/apis/eventing/v1alpha1/trigger_types.go
index d6ff789aaec..91e70cc783b 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_types.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_types.go
@@ -64,9 +64,11 @@ type TriggerSpec struct {
Subscriber *SubscriberSpec `json:"subscriber,omitempty"`
}
+// TriggerFilter specifies the event filtering strategy for the Trigger. Only
+// one field may be set.
type TriggerFilter struct {
- // SourceAndType filters events based on exact matches on the type and source
- // attributes.
+ // SourceAndType filters events based on exact matches on the CloudEvents
+ // type and source attributes.
//
// +optional
SourceAndType *TriggerFilterSourceAndType `json:"sourceAndType,omitempty"`
@@ -91,15 +93,15 @@ type TriggerFilterSourceAndType struct {
type TriggerFilterCEL struct {
// Expression is the CEL expression to evaluate. Required.
Expression string `json:"expression,omitempty"`
- // ParseExtensions enables parsing of dynamic extensions attached to the event
- // and makes the extensions available in the CEL environment. If extensions
- // cannot be parsed they will be ignored. Defaults to false.
+ // ParseExtensions enables parsing of dynamic CloudEvents extensions attached
+ //to the event and makes the extensions available in the CEL environment. If
+ // extensions cannot be parsed they will be ignored. Defaults to false.
//
// +optional
ParseExtensions bool `json:"parseExtensions"`
// ParseData enables parsing of the event data and makes the parsed data
// available in the CEL environment. Currently this is only available for
- // JSON data. Defaults to false.
+ // the `application/json` data content type. Defaults to false.
//
// +optional
ParseData bool `json:"parseData"`
From d69672aa83ea2cc4b9ba096f1fa1e09eeb1372c1 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 15 Apr 2019 10:49:53 -0700
Subject: [PATCH 16/76] Use omitempty for boolean fields in Trigger
These fields are optional and default false so they can be safely
omitted.
---
pkg/apis/eventing/v1alpha1/trigger_types.go | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/pkg/apis/eventing/v1alpha1/trigger_types.go b/pkg/apis/eventing/v1alpha1/trigger_types.go
index 91e70cc783b..89884d68c11 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_types.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_types.go
@@ -98,13 +98,13 @@ type TriggerFilterCEL struct {
// extensions cannot be parsed they will be ignored. Defaults to false.
//
// +optional
- ParseExtensions bool `json:"parseExtensions"`
+ ParseExtensions bool `json:"parseExtensions,omitempty"`
// ParseData enables parsing of the event data and makes the parsed data
// available in the CEL environment. Currently this is only available for
// the `application/json` data content type. Defaults to false.
//
// +optional
- ParseData bool `json:"parseData"`
+ ParseData bool `json:"parseData,omitempty"`
}
// TriggerStatus represents the current state of a Trigger.
From 0b8927acec584760bca244f637e7a6fb65e855b5 Mon Sep 17 00:00:00 2001
From: Adam Harwayne
Date: Mon, 15 Apr 2019 13:12:00 -0700
Subject: [PATCH 17/76] Triggers working via path, rather than host (#1045)
* Initial work on Triggers working via path, rather than host (which requires Isito).
* Move path manipulation functions to a single package.
* Fix the unit tests.
* Add a unit test for the Broker Filter service not being found.
* Update the comment.
* Add a test for mapBrokerToTriggers.
* More unit tests around mapBrokerToTriggers.Map.
* Switch provisioner.ChannelReference to types.NamespacedName.
---
.../eventing/v1alpha1/trigger_lifecycle.go | 14 +-
.../v1alpha1/trigger_lifecycle_test.go | 60 +---
pkg/broker/receiver.go | 24 +-
pkg/broker/receiver_test.go | 33 +-
.../v1alpha1/broker/resources/filter.go | 14 +-
.../v1alpha1/subscription/subscription.go | 2 +-
pkg/reconciler/v1alpha1/trigger/path/path.go | 53 +++
.../v1alpha1/trigger/resources/labels.go | 38 ---
.../v1alpha1/trigger/resources/service.go | 52 ---
.../trigger/resources/subscription.go | 21 +-
.../trigger/resources/virtual_service.go | 67 ----
pkg/reconciler/v1alpha1/trigger/trigger.go | 137 ++------
.../v1alpha1/trigger/trigger_test.go | 308 +++++++++++-------
13 files changed, 331 insertions(+), 492 deletions(-)
create mode 100644 pkg/reconciler/v1alpha1/trigger/path/path.go
delete mode 100644 pkg/reconciler/v1alpha1/trigger/resources/labels.go
delete mode 100644 pkg/reconciler/v1alpha1/trigger/resources/service.go
delete mode 100644 pkg/reconciler/v1alpha1/trigger/resources/virtual_service.go
diff --git a/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go b/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go
index dee7a8cf8c9..07c20ffb99c 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go
@@ -18,17 +18,13 @@ package v1alpha1
import duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
-var triggerCondSet = duckv1alpha1.NewLivingConditionSet(TriggerConditionBrokerExists, TriggerConditionKubernetesService, TriggerConditionVirtualService, TriggerConditionSubscribed)
+var triggerCondSet = duckv1alpha1.NewLivingConditionSet(TriggerConditionBrokerExists, TriggerConditionSubscribed)
const (
// TriggerConditionReady has status True when all subconditions below have been set to True.
TriggerConditionReady = duckv1alpha1.ConditionReady
TriggerConditionBrokerExists duckv1alpha1.ConditionType = "BrokerExists"
- TriggerConditionKubernetesService duckv1alpha1.ConditionType = "KubernetesServiceReady"
-
- TriggerConditionVirtualService duckv1alpha1.ConditionType = "VirtualServiceReady"
-
TriggerConditionSubscribed duckv1alpha1.ConditionType = "Subscribed"
// TriggerAnyFilter Constant to represent that we should allow anything.
@@ -58,14 +54,6 @@ func (ts *TriggerStatus) MarkBrokerDoesNotExist() {
triggerCondSet.Manage(ts).MarkFalse(TriggerConditionBrokerExists, "doesNotExist", "Broker does not exist")
}
-func (ts *TriggerStatus) MarkKubernetesServiceExists() {
- triggerCondSet.Manage(ts).MarkTrue(TriggerConditionKubernetesService)
-}
-
-func (ts *TriggerStatus) MarkVirtualServiceExists() {
- triggerCondSet.Manage(ts).MarkTrue(TriggerConditionVirtualService)
-}
-
func (ts *TriggerStatus) MarkSubscribed() {
triggerCondSet.Manage(ts).MarkTrue(TriggerConditionSubscribed)
}
diff --git a/pkg/apis/eventing/v1alpha1/trigger_lifecycle_test.go b/pkg/apis/eventing/v1alpha1/trigger_lifecycle_test.go
index 0b37671b77c..86759afbd12 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_lifecycle_test.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_lifecycle_test.go
@@ -35,16 +35,6 @@ var (
Status: corev1.ConditionTrue,
}
- triggerConditionKubernetesService = duckv1alpha1.Condition{
- Type: TriggerConditionKubernetesService,
- Status: corev1.ConditionTrue,
- }
-
- triggerConditionVirtualService = duckv1alpha1.Condition{
- Type: TriggerConditionVirtualService,
- Status: corev1.ConditionTrue,
- }
-
triggerConditionSubscribed = duckv1alpha1.Condition{
Type: TriggerConditionSubscribed,
Status: corev1.ConditionFalse,
@@ -74,19 +64,18 @@ func TestTriggerGetCondition(t *testing.T) {
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{
triggerConditionBrokerExists,
- triggerConditionKubernetesService,
+ triggerConditionSubscribed,
},
},
},
- condQuery: TriggerConditionKubernetesService,
- want: &triggerConditionKubernetesService,
+ condQuery: TriggerConditionSubscribed,
+ want: &triggerConditionSubscribed,
}, {
name: "multiple conditions, condition false",
ts: &TriggerStatus{
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{
triggerConditionBrokerExists,
- triggerConditionKubernetesService,
triggerConditionSubscribed,
},
},
@@ -98,7 +87,6 @@ func TestTriggerGetCondition(t *testing.T) {
ts: &TriggerStatus{
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{
- triggerConditionVirtualService,
triggerConditionSubscribed,
},
},
@@ -130,18 +118,12 @@ func TestTriggerInitializeConditions(t *testing.T) {
Conditions: []duckv1alpha1.Condition{{
Type: TriggerConditionBrokerExists,
Status: corev1.ConditionUnknown,
- }, {
- Type: TriggerConditionKubernetesService,
- Status: corev1.ConditionUnknown,
}, {
Type: TriggerConditionReady,
Status: corev1.ConditionUnknown,
}, {
Type: TriggerConditionSubscribed,
Status: corev1.ConditionUnknown,
- }, {
- Type: TriggerConditionVirtualService,
- Status: corev1.ConditionUnknown,
}},
},
},
@@ -150,7 +132,7 @@ func TestTriggerInitializeConditions(t *testing.T) {
ts: &TriggerStatus{
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{{
- Type: TriggerConditionVirtualService,
+ Type: TriggerConditionBrokerExists,
Status: corev1.ConditionFalse,
}},
},
@@ -159,19 +141,13 @@ func TestTriggerInitializeConditions(t *testing.T) {
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{{
Type: TriggerConditionBrokerExists,
- Status: corev1.ConditionUnknown,
- }, {
- Type: TriggerConditionKubernetesService,
- Status: corev1.ConditionUnknown,
+ Status: corev1.ConditionFalse,
}, {
Type: TriggerConditionReady,
Status: corev1.ConditionUnknown,
}, {
Type: TriggerConditionSubscribed,
Status: corev1.ConditionUnknown,
- }, {
- Type: TriggerConditionVirtualService,
- Status: corev1.ConditionFalse,
}},
},
},
@@ -190,18 +166,12 @@ func TestTriggerInitializeConditions(t *testing.T) {
Conditions: []duckv1alpha1.Condition{{
Type: TriggerConditionBrokerExists,
Status: corev1.ConditionUnknown,
- }, {
- Type: TriggerConditionKubernetesService,
- Status: corev1.ConditionUnknown,
}, {
Type: TriggerConditionReady,
Status: corev1.ConditionUnknown,
}, {
Type: TriggerConditionSubscribed,
Status: corev1.ConditionTrue,
- }, {
- Type: TriggerConditionVirtualService,
- Status: corev1.ConditionUnknown,
}},
},
},
@@ -239,20 +209,6 @@ func TestTriggerIsReady(t *testing.T) {
markVirtualServiceExists: true,
markSubscribed: true,
wantReady: false,
- }, {
- name: "k8s service sad",
- markBrokerExists: true,
- markKubernetesServiceExists: false,
- markVirtualServiceExists: true,
- markSubscribed: true,
- wantReady: false,
- }, {
- name: "virtual service sad",
- markBrokerExists: true,
- markKubernetesServiceExists: true,
- markVirtualServiceExists: false,
- markSubscribed: true,
- wantReady: false,
}, {
name: "subscribed sad",
markBrokerExists: true,
@@ -274,12 +230,6 @@ func TestTriggerIsReady(t *testing.T) {
if test.markBrokerExists {
ts.MarkBrokerExists()
}
- if test.markKubernetesServiceExists {
- ts.MarkKubernetesServiceExists()
- }
- if test.markVirtualServiceExists {
- ts.MarkVirtualServiceExists()
- }
if test.markSubscribed {
ts.MarkSubscribed()
}
diff --git a/pkg/broker/receiver.go b/pkg/broker/receiver.go
index 632cc024b56..b9797a78277 100644
--- a/pkg/broker/receiver.go
+++ b/pkg/broker/receiver.go
@@ -27,7 +27,7 @@ import (
ceclient "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
cehttp "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "github.com/knative/eventing/pkg/provisioners"
+ "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -120,15 +120,10 @@ func (r *Receiver) serveHTTP(ctx context.Context, event cloudevents.Event, resp
}
// tctx.URI is actually the path...
- if tctx.URI != "/" {
- resp.Status = http.StatusNotFound
- return nil
- }
-
- triggerRef, err := provisioners.ParseChannel(tctx.Host)
+ triggerRef, err := path.Parse(tctx.URI)
if err != nil {
- r.logger.Error("Unable to parse host as a trigger", zap.Error(err), zap.String("host", tctx.Host))
- return errors.New("unable to parse host as a Trigger")
+ r.logger.Info("Unable to parse path as a trigger", zap.Error(err), zap.String("path", tctx.URI))
+ return errors.New("unable to parse path as a Trigger")
}
// Remove the TTL attribute that is used by the Broker.
@@ -170,7 +165,7 @@ func (r *Receiver) serveHTTP(ctx context.Context, event cloudevents.Event, resp
}
// sendEvent sends an event to a subscriber if the trigger filter passes.
-func (r *Receiver) sendEvent(ctx context.Context, tctx cehttp.TransportContext, trigger provisioners.ChannelReference, event *cloudevents.Event) (*cloudevents.Event, error) {
+func (r *Receiver) sendEvent(ctx context.Context, tctx cehttp.TransportContext, trigger types.NamespacedName, event *cloudevents.Event) (*cloudevents.Event, error) {
t, err := r.getTrigger(ctx, trigger)
if err != nil {
r.logger.Info("Unable to get the Trigger", zap.Error(err), zap.Any("triggerRef", trigger))
@@ -199,14 +194,9 @@ func (r *Receiver) sendEvent(ctx context.Context, tctx cehttp.TransportContext,
return r.ceClient.Send(sendingCTX, *event)
}
-func (r *Receiver) getTrigger(ctx context.Context, ref provisioners.ChannelReference) (*eventingv1alpha1.Trigger, error) {
+func (r *Receiver) getTrigger(ctx context.Context, ref types.NamespacedName) (*eventingv1alpha1.Trigger, error) {
t := &eventingv1alpha1.Trigger{}
- err := r.client.Get(ctx,
- types.NamespacedName{
- Namespace: ref.Namespace,
- Name: ref.Name,
- },
- t)
+ err := r.client.Get(ctx, ref, t)
return t, err
}
diff --git a/pkg/broker/receiver_test.go b/pkg/broker/receiver_test.go
index cfa26f49a32..5a46740c376 100644
--- a/pkg/broker/receiver_test.go
+++ b/pkg/broker/receiver_test.go
@@ -51,7 +51,8 @@ const (
)
var (
- host = fmt.Sprintf("%s.%s.triggers.%s", triggerName, testNS, utils.GetClusterDomainName())
+ host = fmt.Sprintf("%s.%s.triggers.%s", triggerName, testNS, utils.GetClusterDomainName())
+ validPath = fmt.Sprintf("/triggers/%s/%s", testNS, triggerName)
)
func init() {
@@ -87,23 +88,39 @@ func TestReceiver(t *testing.T) {
tctx: &cehttp.TransportContext{
Method: "GET",
Host: host,
- URI: "/",
+ URI: validPath,
},
expectedStatus: http.StatusMethodNotAllowed,
},
- "Other path": {
+ "Path too short": {
tctx: &cehttp.TransportContext{
Method: "POST",
Host: host,
- URI: "/someotherEndpoint",
+ URI: "/test-namespace/test-trigger",
},
- expectedStatus: http.StatusNotFound,
+ expectedErr: true,
+ },
+ "Path too long": {
+ tctx: &cehttp.TransportContext{
+ Method: "POST",
+ Host: host,
+ URI: "/triggers/test-namespace/test-trigger/extra",
+ },
+ expectedErr: true,
+ },
+ "Path without prefix": {
+ tctx: &cehttp.TransportContext{
+ Method: "POST",
+ Host: host,
+ URI: "/something/test-namespace/test-trigger",
+ },
+ expectedErr: true,
},
"Bad host": {
tctx: &cehttp.TransportContext{
Method: "POST",
Host: "badhost-cant-be-parsed-as-a-trigger-name-plus-namespace",
- URI: "/",
+ URI: validPath,
},
expectedErr: true,
},
@@ -178,7 +195,7 @@ func TestReceiver(t *testing.T) {
tctx: &cehttp.TransportContext{
Method: "POST",
Host: host,
- URI: "/",
+ URI: validPath,
Header: http.Header{
// foo won't pass filtering.
"foo": []string{"bar"},
@@ -248,7 +265,7 @@ func TestReceiver(t *testing.T) {
tctx = &cehttp.TransportContext{
Method: http.MethodPost,
Host: host,
- URI: "/",
+ URI: validPath,
}
}
ctx := cehttp.WithTransportContext(context.Background(), *tctx)
diff --git a/pkg/reconciler/v1alpha1/broker/resources/filter.go b/pkg/reconciler/v1alpha1/broker/resources/filter.go
index 62177330b5d..73903b986eb 100644
--- a/pkg/reconciler/v1alpha1/broker/resources/filter.go
+++ b/pkg/reconciler/v1alpha1/broker/resources/filter.go
@@ -48,15 +48,15 @@ func MakeFilterDeployment(args *FilterArgs) *appsv1.Deployment {
Kind: "Broker",
}),
},
- Labels: filterLabels(args.Broker),
+ Labels: FilterLabels(args.Broker),
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
- MatchLabels: filterLabels(args.Broker),
+ MatchLabels: FilterLabels(args.Broker),
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: filterLabels(args.Broker),
+ Labels: FilterLabels(args.Broker),
Annotations: map[string]string{
"sidecar.istio.io/inject": "true",
},
@@ -91,7 +91,7 @@ func MakeFilterService(b *eventingv1alpha1.Broker) *corev1.Service {
ObjectMeta: metav1.ObjectMeta{
Namespace: b.Namespace,
Name: fmt.Sprintf("%s-broker-filter", b.Name),
- Labels: filterLabels(b),
+ Labels: FilterLabels(b),
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(b, schema.GroupVersionKind{
Group: eventingv1alpha1.SchemeGroupVersion.Group,
@@ -101,7 +101,7 @@ func MakeFilterService(b *eventingv1alpha1.Broker) *corev1.Service {
},
},
Spec: corev1.ServiceSpec{
- Selector: filterLabels(b),
+ Selector: FilterLabels(b),
Ports: []corev1.ServicePort{
{
Name: "http",
@@ -113,7 +113,9 @@ func MakeFilterService(b *eventingv1alpha1.Broker) *corev1.Service {
}
}
-func filterLabels(b *eventingv1alpha1.Broker) map[string]string {
+// FilterLabels generates the labels present on all resources representing the filter of the given
+// Broker.
+func FilterLabels(b *eventingv1alpha1.Broker) map[string]string {
return map[string]string{
"eventing.knative.dev/broker": b.Name,
"eventing.knative.dev/brokerRole": "filter",
diff --git a/pkg/reconciler/v1alpha1/subscription/subscription.go b/pkg/reconciler/v1alpha1/subscription/subscription.go
index 5b67d66ccfb..12e1cb1a821 100644
--- a/pkg/reconciler/v1alpha1/subscription/subscription.go
+++ b/pkg/reconciler/v1alpha1/subscription/subscription.go
@@ -102,7 +102,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
err := r.client.Get(ctx, request.NamespacedName, subscription)
if errors.IsNotFound(err) {
- logging.FromContext(ctx).Error("Could not find Subscription")
+ logging.FromContext(ctx).Info("Could not find Subscription")
return reconcile.Result{}, nil
}
diff --git a/pkg/reconciler/v1alpha1/trigger/path/path.go b/pkg/reconciler/v1alpha1/trigger/path/path.go
new file mode 100644
index 00000000000..83a1dce4e4d
--- /dev/null
+++ b/pkg/reconciler/v1alpha1/trigger/path/path.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package path
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+const (
+ prefix = "triggers"
+)
+
+// Generate generates the Path portion of a URI to send events to the given Trigger.
+func Generate(t *v1alpha1.Trigger) string {
+ return fmt.Sprintf("/%s/%s/%s", prefix, t.Namespace, t.Name)
+}
+
+// Parse parses the Path portion of a URI to determine which Trigger the request corresponds to. It
+// is expected to be in the form "/triggers/namespace/name".
+func Parse(path string) (types.NamespacedName, error) {
+ parts := strings.Split(path, "/")
+ if len(parts) != 4 {
+ return types.NamespacedName{}, fmt.Errorf("incorrect number of parts in the path, expected 4, actual %d, '%s'", len(parts), path)
+ }
+ if parts[0] != "" {
+ return types.NamespacedName{}, fmt.Errorf("text before the first slash, actual '%s'", path)
+ }
+ if parts[1] != prefix {
+ return types.NamespacedName{}, fmt.Errorf("incorrect prefix, expected '%s', actual '%s'", prefix, path)
+ }
+ return types.NamespacedName{
+ Namespace: parts[2],
+ Name: parts[3],
+ }, nil
+}
diff --git a/pkg/reconciler/v1alpha1/trigger/resources/labels.go b/pkg/reconciler/v1alpha1/trigger/resources/labels.go
deleted file mode 100644
index 1e60cdb91ea..00000000000
--- a/pkg/reconciler/v1alpha1/trigger/resources/labels.go
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resources
-
-import eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
-
-func ServiceLabels(t *eventingv1alpha1.Trigger) map[string]string {
- return map[string]string{
- "eventing.knative.dev/trigger": t.Name,
- }
-}
-
-func SubscriptionLabels(t *eventingv1alpha1.Trigger) map[string]string {
- return map[string]string{
- "eventing.knative.dev/broker": t.Spec.Broker,
- "eventing.knative.dev/trigger": t.Name,
- }
-}
-
-func VirtualServiceLabels(t *eventingv1alpha1.Trigger) map[string]string {
- return map[string]string{
- "eventing.knative.dev/trigger": t.Name,
- }
-}
diff --git a/pkg/reconciler/v1alpha1/trigger/resources/service.go b/pkg/reconciler/v1alpha1/trigger/resources/service.go
deleted file mode 100644
index 3cd686926cc..00000000000
--- a/pkg/reconciler/v1alpha1/trigger/resources/service.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resources
-
-import (
- "fmt"
-
- eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// NewService returns a K8s placeholder service for trigger 't'.
-func NewService(t *eventingv1alpha1.Trigger) *corev1.Service {
- return &corev1.Service{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: t.Namespace,
- GenerateName: fmt.Sprintf("trigger-%s-", t.Name),
- Labels: ServiceLabels(t),
- OwnerReferences: []metav1.OwnerReference{
- *metav1.NewControllerRef(t, schema.GroupVersionKind{
- Group: eventingv1alpha1.SchemeGroupVersion.Group,
- Version: eventingv1alpha1.SchemeGroupVersion.Version,
- Kind: "Trigger",
- }),
- },
- },
- Spec: corev1.ServiceSpec{
- Ports: []corev1.ServicePort{
- {
- Name: "http",
- Port: 80,
- },
- },
- },
- }
-}
diff --git a/pkg/reconciler/v1alpha1/trigger/resources/subscription.go b/pkg/reconciler/v1alpha1/trigger/resources/subscription.go
index 1cfb3dd7f27..dc53d8b42a3 100644
--- a/pkg/reconciler/v1alpha1/trigger/resources/subscription.go
+++ b/pkg/reconciler/v1alpha1/trigger/resources/subscription.go
@@ -18,6 +18,7 @@ package resources
import (
"fmt"
+ "net/url"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
corev1 "k8s.io/api/core/v1"
@@ -25,9 +26,10 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
-// NewSubscription returns a placeholder subscription for trigger 't', from brokerTrigger to 'svc'
+// NewSubscription returns a placeholder subscription for trigger 't', from brokerTrigger to 'uri'
// replying to brokerIngress.
-func NewSubscription(t *eventingv1alpha1.Trigger, brokerTrigger, brokerIngress *eventingv1alpha1.Channel, svc *corev1.Service) *eventingv1alpha1.Subscription {
+func NewSubscription(t *eventingv1alpha1.Trigger, brokerTrigger, brokerIngress *eventingv1alpha1.Channel, uri *url.URL) *eventingv1alpha1.Subscription {
+ uriString := uri.String()
return &eventingv1alpha1.Subscription{
ObjectMeta: metav1.ObjectMeta{
Namespace: t.Namespace,
@@ -48,11 +50,7 @@ func NewSubscription(t *eventingv1alpha1.Trigger, brokerTrigger, brokerIngress *
Name: brokerTrigger.Name,
},
Subscriber: &eventingv1alpha1.SubscriberSpec{
- Ref: &corev1.ObjectReference{
- APIVersion: "v1",
- Kind: "Service",
- Name: svc.Name,
- },
+ URI: &uriString,
},
Reply: &eventingv1alpha1.ReplyStrategy{
Channel: &corev1.ObjectReference{
@@ -64,3 +62,12 @@ func NewSubscription(t *eventingv1alpha1.Trigger, brokerTrigger, brokerIngress *
},
}
}
+
+// SubscriptionLabels generates the labels present on the Subscription linking this Trigger to the
+// Broker's Channels.
+func SubscriptionLabels(t *eventingv1alpha1.Trigger) map[string]string {
+ return map[string]string{
+ "eventing.knative.dev/broker": t.Spec.Broker,
+ "eventing.knative.dev/trigger": t.Name,
+ }
+}
diff --git a/pkg/reconciler/v1alpha1/trigger/resources/virtual_service.go b/pkg/reconciler/v1alpha1/trigger/resources/virtual_service.go
deleted file mode 100644
index 45e4b80272a..00000000000
--- a/pkg/reconciler/v1alpha1/trigger/resources/virtual_service.go
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package resources
-
-import (
- "fmt"
-
- "github.com/knative/eventing/pkg/reconciler/names"
- "github.com/knative/eventing/pkg/utils"
-
- eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// NewVirtualService returns a placeholder virtual service object for trigger 't' and service 'svc'.
-func NewVirtualService(t *eventingv1alpha1.Trigger, svc *corev1.Service) *istiov1alpha3.VirtualService {
- destinationHost := fmt.Sprintf("%s-broker-filter.%s.svc.%s", t.Spec.Broker, t.Namespace, utils.GetClusterDomainName())
- return &istiov1alpha3.VirtualService{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: fmt.Sprintf("%s-", t.Name),
- Namespace: t.Namespace,
- Labels: VirtualServiceLabels(t),
- OwnerReferences: []metav1.OwnerReference{
- *metav1.NewControllerRef(t, schema.GroupVersionKind{
- Group: eventingv1alpha1.SchemeGroupVersion.Group,
- Version: eventingv1alpha1.SchemeGroupVersion.Version,
- Kind: "Trigger",
- }),
- },
- },
- Spec: istiov1alpha3.VirtualServiceSpec{
- Hosts: []string{
- names.ServiceHostName(svc.Name, svc.Namespace),
- },
- HTTP: []istiov1alpha3.HTTPRoute{{
- Rewrite: &istiov1alpha3.HTTPRewrite{
- Authority: fmt.Sprintf("%s.%s.triggers.%s", t.Name, t.Namespace, utils.GetClusterDomainName()),
- },
- Route: []istiov1alpha3.DestinationWeight{{
- Destination: istiov1alpha3.Destination{
- Host: destinationHost,
- Port: istiov1alpha3.PortSelector{
- Number: 80,
- },
- }},
- }},
- },
- },
- }
-}
diff --git a/pkg/reconciler/v1alpha1/trigger/trigger.go b/pkg/reconciler/v1alpha1/trigger/trigger.go
index b1dc6c459e6..ff60d792c16 100644
--- a/pkg/reconciler/v1alpha1/trigger/trigger.go
+++ b/pkg/reconciler/v1alpha1/trigger/trigger.go
@@ -18,12 +18,15 @@ package trigger
import (
"context"
-
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources"
+ "net/url"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/reconciler/names"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
+ brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources"
+ "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path"
+ "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources"
"github.com/knative/eventing/pkg/utils/resolve"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"go.uber.org/zap"
@@ -210,10 +213,13 @@ func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
t.Status.InitializeConditions()
// 1. Verify the Broker exists.
- // 2. Find the Subscriber's URI.
- // 2. Creates a K8s Service uniquely named for this Trigger.
- // 3. Creates a VirtualService that routes the K8s Service to the Broker's filter service on an identifiable host name.
- // 4. Creates a Subscription from the Broker's single Channel to this Trigger's K8s Service, with reply set to the Broker.
+ // 2. Get the Broker's:
+ // - Filter Channel
+ // - Ingress Channel
+ // - Filter Service
+ // 3. Find the Subscriber's URI.
+ // 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's
+ // Filter Service with a specific path, and reply set to the Broker's Ingress Channel.
if t.DeletionTimestamp != nil {
// Everything is cleaned up by the garbage collector.
@@ -238,29 +244,21 @@ func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err))
return err
}
-
- subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber)
+ // Get Broker filter service.
+ filterSvc, err := r.getBrokerFilterService(ctx, b)
if err != nil {
- logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
+ logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err))
return err
}
- t.Status.SubscriberURI = subscriberURI
- svc, err := r.reconcileK8sService(ctx, t)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to reconcile the K8s Service", zap.Error(err))
- return err
- }
- t.Status.MarkKubernetesServiceExists()
-
- _, err = r.reconcileVirtualService(ctx, t, svc)
+ subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber)
if err != nil {
- logging.FromContext(ctx).Error("Unable to reconcile the VirtualService", zap.Error(err))
+ logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
return err
}
- t.Status.MarkVirtualServiceExists()
+ t.Status.SubscriberURI = subscriberURI
- _, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, svc)
+ _, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc)
if err != nil {
logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err))
t.Status.MarkNotSubscribed("notSubscribed", "%v", err)
@@ -360,11 +358,11 @@ func (r *reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labe
// getService returns the K8s service for trigger 't' if exists,
// otherwise it returns an error.
-func (r *reconciler) getService(ctx context.Context, t *v1alpha1.Trigger) (*corev1.Service, error) {
+func (r *reconciler) getBrokerFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) {
list := &corev1.ServiceList{}
opts := &runtimeclient.ListOptions{
- Namespace: t.Namespace,
- LabelSelector: labels.SelectorFromSet(resources.ServiceLabels(t)),
+ Namespace: b.Namespace,
+ LabelSelector: labels.SelectorFromSet(brokerresources.FilterLabels(b)),
// Set Raw because if we need to get more than one page, then we will put the continue token
// into opts.Raw.Continue.
Raw: &metav1.ListOptions{},
@@ -375,7 +373,7 @@ func (r *reconciler) getService(ctx context.Context, t *v1alpha1.Trigger) (*core
return nil, err
}
for _, svc := range list.Items {
- if metav1.IsControlledBy(&svc, t) {
+ if metav1.IsControlledBy(&svc, b) {
return &svc, nil
}
}
@@ -383,91 +381,14 @@ func (r *reconciler) getService(ctx context.Context, t *v1alpha1.Trigger) (*core
return nil, k8serrors.NewNotFound(schema.GroupResource{}, "")
}
-// reconcileK8sService reconciles the K8s service for trigger 't'.
-func (r *reconciler) reconcileK8sService(ctx context.Context, t *v1alpha1.Trigger) (*corev1.Service, error) {
- current, err := r.getService(ctx, t)
-
- // If the resource doesn't exist, we'll create it
- if k8serrors.IsNotFound(err) {
- svc := resources.NewService(t)
- err = r.client.Create(ctx, svc)
- if err != nil {
- return nil, err
- }
- return svc, nil
- } else if err != nil {
- return nil, err
- }
-
- expected := resources.NewService(t)
- // spec.clusterIP is immutable and is set on existing services. If we don't set this to the same value, we will
- // encounter an error while updating.
- expected.Spec.ClusterIP = current.Spec.ClusterIP
- if !equality.Semantic.DeepDerivative(expected.Spec, current.Spec) {
- current.Spec = expected.Spec
- err = r.client.Update(ctx, current)
- if err != nil {
- return nil, err
- }
- }
- return current, nil
-}
-
-// getVirtualService returns the virtual service for trigger 't' if exists,
-// otherwise it returns an error.
-func (r *reconciler) getVirtualService(ctx context.Context, t *v1alpha1.Trigger) (*istiov1alpha3.VirtualService, error) {
- list := &istiov1alpha3.VirtualServiceList{}
- opts := &runtimeclient.ListOptions{
- Namespace: t.Namespace,
- LabelSelector: labels.SelectorFromSet(resources.VirtualServiceLabels(t)),
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
-
- err := r.client.List(ctx, opts, list)
- if err != nil {
- return nil, err
- }
- for _, vs := range list.Items {
- if metav1.IsControlledBy(&vs, t) {
- return &vs, nil
- }
- }
-
- return nil, k8serrors.NewNotFound(schema.GroupResource{}, "")
-}
-
-// reconcileVirtualService reconciles the virtual service for trigger 't' and service 'svc'.
-func (r *reconciler) reconcileVirtualService(ctx context.Context, t *v1alpha1.Trigger, svc *corev1.Service) (*istiov1alpha3.VirtualService, error) {
- virtualService, err := r.getVirtualService(ctx, t)
-
- // If the resource doesn't exist, we'll create it
- if k8serrors.IsNotFound(err) {
- virtualService = resources.NewVirtualService(t, svc)
- err = r.client.Create(ctx, virtualService)
- if err != nil {
- return nil, err
- }
- return virtualService, nil
- } else if err != nil {
- return nil, err
- }
-
- expected := resources.NewVirtualService(t, svc)
- if !equality.Semantic.DeepDerivative(expected.Spec, virtualService.Spec) {
- virtualService.Spec = expected.Spec
- err = r.client.Update(ctx, virtualService)
- if err != nil {
- return nil, err
- }
- }
- return virtualService, nil
-}
-
// subscribeToBrokerChannel subscribes service 'svc' to the Broker's channels.
func (r *reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.Trigger, brokerTrigger, brokerIngress *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) {
- expected := resources.NewSubscription(t, brokerTrigger, brokerIngress, svc)
+ uri := &url.URL{
+ Scheme: "http",
+ Host: names.ServiceHostName(svc.Name, svc.Namespace),
+ Path: path.Generate(t),
+ }
+ expected := resources.NewSubscription(t, brokerTrigger, brokerIngress, uri)
sub, err := r.getSubscription(ctx, t)
// If the resource doesn't exist, we'll create it
diff --git a/pkg/reconciler/v1alpha1/trigger/trigger_test.go b/pkg/reconciler/v1alpha1/trigger/trigger_test.go
index 00a3ea3f784..e0e28920b68 100644
--- a/pkg/reconciler/v1alpha1/trigger/trigger_test.go
+++ b/pkg/reconciler/v1alpha1/trigger/trigger_test.go
@@ -20,14 +20,15 @@ import (
"context"
"errors"
"fmt"
+ "net/url"
"testing"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources"
-
+ "github.com/google/go-cmp/cmp"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "github.com/knative/eventing/pkg/reconciler/names"
controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
+ brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources"
+ "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources"
"github.com/knative/eventing/pkg/utils"
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
@@ -37,11 +38,14 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
@@ -52,6 +56,8 @@ const (
subscriberAPIVersion = "v1"
subscriberKind = "Service"
subscriberName = "subscriberName"
+
+ continueToken = "continueToken"
)
var (
@@ -143,8 +149,7 @@ func TestReconcile(t *testing.T) {
Name: "Trigger not found",
},
{
- Name: "Get Trigger error",
- Scheme: scheme.Scheme,
+ Name: "Get Trigger error",
Mocks: controllertesting.Mocks{
MockGets: []controllertesting.MockGet{
func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
@@ -158,16 +163,14 @@ func TestReconcile(t *testing.T) {
WantErrMsg: "test error getting the Trigger",
},
{
- Name: "Trigger being deleted",
- Scheme: scheme.Scheme,
+ Name: "Trigger being deleted",
InitialState: []runtime.Object{
makeDeletingTrigger(),
},
WantEvent: []corev1.Event{events[triggerReconciled]},
},
{
- Name: "Get Broker error",
- Scheme: scheme.Scheme,
+ Name: "Get Broker error",
InitialState: []runtime.Object{
makeTrigger(),
},
@@ -185,8 +188,7 @@ func TestReconcile(t *testing.T) {
WantEvent: []corev1.Event{events[triggerReconcileFailed]},
},
{
- Name: "Get Broker Trigger channel error",
- Scheme: scheme.Scheme,
+ Name: "Get Broker Trigger channel error",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
@@ -209,8 +211,16 @@ func TestReconcile(t *testing.T) {
WantEvent: []corev1.Event{events[triggerReconcileFailed]},
},
{
- Name: "Get Broker Ingress channel error",
- Scheme: scheme.Scheme,
+ Name: "Broker Trigger channel not found",
+ InitialState: []runtime.Object{
+ makeTrigger(),
+ makeBroker(),
+ },
+ WantErrMsg: ` "" not found`,
+ WantEvent: []corev1.Event{events[triggerReconcileFailed]},
+ },
+ {
+ Name: "Get Broker Ingress channel error",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
@@ -234,136 +244,98 @@ func TestReconcile(t *testing.T) {
WantEvent: []corev1.Event{events[triggerReconcileFailed]},
},
{
- Name: "Resolve subscriberURI error",
- Scheme: scheme.Scheme,
+ Name: "Broker Ingress channel not found",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
},
- DynamicMocks: controllertesting.DynamicMocks{
- MockGets: []controllertesting.MockDynamicGet{
- func(ctx *controllertesting.MockDynamicContext, name string, options metav1.GetOptions, subresources ...string) (handled controllertesting.MockHandled, i *unstructured.Unstructured, e error) {
- if ctx.Resource.Group == "" && ctx.Resource.Version == "v1" && ctx.Resource.Resource == "services" {
-
- return controllertesting.Handled, nil, errors.New("test error resolving subscriber URI")
- }
- return controllertesting.Unhandled, nil, nil
- },
- },
- },
- WantErrMsg: "test error resolving subscriber URI",
+ WantErrMsg: ` "" not found`,
WantEvent: []corev1.Event{events[triggerReconcileFailed]},
},
{
- Name: "Create K8s Service error",
- Scheme: scheme.Scheme,
+ Name: "Broker Filter Service not found",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
},
- Objects: []runtime.Object{
- makeSubscriberServiceAsUnstructured(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*corev1.Service); ok {
- return controllertesting.Handled, errors.New("test error creating k8s service")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error creating k8s service",
+ WantErrMsg: ` "" not found`,
WantEvent: []corev1.Event{events[triggerReconcileFailed]},
},
{
- Name: "Update K8s Service error",
- Scheme: scheme.Scheme,
+ Name: "Get Broker Filter Service error",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
- makeDifferentK8sService(),
- },
- Objects: []runtime.Object{
- makeSubscriberServiceAsUnstructured(),
},
Mocks: controllertesting.Mocks{
- MockUpdates: []controllertesting.MockUpdate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*corev1.Service); ok {
- return controllertesting.Handled, errors.New("test error updating k8s service")
+ MockLists: []controllertesting.MockList{
+ func(_ client.Client, _ context.Context, opts *client.ListOptions, list runtime.Object) (handled controllertesting.MockHandled, e error) {
+ if _, ok := list.(*corev1.ServiceList); ok {
+ return controllertesting.Handled, errors.New("test error getting Broker's filter Service")
}
return controllertesting.Unhandled, nil
},
},
},
- WantErrMsg: "test error updating k8s service",
+ WantErrMsg: "test error getting Broker's filter Service",
WantEvent: []corev1.Event{events[triggerReconcileFailed]},
},
{
- Name: "Create Virtual Service error",
- Scheme: scheme.Scheme,
+ Name: "Resolve subscriberURI error",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
- makeService(),
- },
- Objects: []runtime.Object{
- makeSubscriberServiceAsUnstructured(),
+ makeBrokerFilterService(),
},
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*istiov1alpha3.VirtualService); ok {
- return controllertesting.Handled, errors.New("test error creating virtual service")
+ DynamicMocks: controllertesting.DynamicMocks{
+ MockGets: []controllertesting.MockDynamicGet{
+ func(ctx *controllertesting.MockDynamicContext, name string, options metav1.GetOptions, subresources ...string) (handled controllertesting.MockHandled, i *unstructured.Unstructured, e error) {
+ if ctx.Resource.Group == "" && ctx.Resource.Version == "v1" && ctx.Resource.Resource == "services" {
+
+ return controllertesting.Handled, nil, errors.New("test error resolving subscriber URI")
}
- return controllertesting.Unhandled, nil
+ return controllertesting.Unhandled, nil, nil
},
},
},
- WantErrMsg: "test error creating virtual service",
+ WantErrMsg: "test error resolving subscriber URI",
WantEvent: []corev1.Event{events[triggerReconcileFailed]},
},
{
- Name: "Update Virtual Service error",
- Scheme: scheme.Scheme,
+ Name: "Get Subscription error",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
- makeService(),
- makeDifferentVirtualService(),
+ makeBrokerFilterService(),
},
Objects: []runtime.Object{
makeSubscriberServiceAsUnstructured(),
},
Mocks: controllertesting.Mocks{
- MockUpdates: []controllertesting.MockUpdate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*istiov1alpha3.VirtualService); ok {
- return controllertesting.Handled, errors.New("test error updating virtual service")
+ MockLists: []controllertesting.MockList{
+ func(_ client.Client, _ context.Context, _ *client.ListOptions, list runtime.Object) (controllertesting.MockHandled, error) {
+ if _, ok := list.(*v1alpha1.SubscriptionList); ok {
+ return controllertesting.Handled, errors.New("test error listing subscription")
}
return controllertesting.Unhandled, nil
},
},
},
- WantErrMsg: "test error updating virtual service",
+ WantErrMsg: "test error listing subscription",
WantEvent: []corev1.Event{events[triggerReconcileFailed]},
},
{
- Name: "Create Subscription error",
- Scheme: scheme.Scheme,
+ Name: "Create Subscription error",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
- makeService(),
- makeVirtualService(),
+ makeBrokerFilterService(),
},
Objects: []runtime.Object{
makeSubscriberServiceAsUnstructured(),
@@ -382,14 +354,12 @@ func TestReconcile(t *testing.T) {
WantEvent: []corev1.Event{events[triggerReconcileFailed]},
},
{
- Name: "Delete Subscription error",
- Scheme: scheme.Scheme,
+ Name: "Delete Subscription error",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
- makeService(),
- makeVirtualService(),
+ makeBrokerFilterService(),
makeDifferentSubscription(),
},
Objects: []runtime.Object{
@@ -409,14 +379,12 @@ func TestReconcile(t *testing.T) {
WantEvent: []corev1.Event{events[subscriptionDeleteFailed], events[triggerReconcileFailed]},
},
{
- Name: "Re-create Subscription error",
- Scheme: scheme.Scheme,
+ Name: "Re-create Subscription error",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
- makeService(),
- makeVirtualService(),
+ makeBrokerFilterService(),
makeDifferentSubscription(),
},
Objects: []runtime.Object{
@@ -436,14 +404,12 @@ func TestReconcile(t *testing.T) {
WantEvent: []corev1.Event{events[subscriptionCreateFailed], events[triggerReconcileFailed]},
},
{
- Name: "Update status error",
- Scheme: scheme.Scheme,
+ Name: "Update status error",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
- makeService(),
- makeVirtualService(),
+ makeBrokerFilterService(),
makeSameSubscription(),
},
Objects: []runtime.Object{
@@ -463,14 +429,12 @@ func TestReconcile(t *testing.T) {
WantEvent: []corev1.Event{events[triggerReconciled], events[triggerUpdateStatusFailed]},
},
{
- Name: "Trigger reconciliation success",
- Scheme: scheme.Scheme,
+ Name: "Trigger reconciliation success",
InitialState: []runtime.Object{
makeTrigger(),
makeBroker(),
makeTriggerChannel(),
- makeService(),
- makeVirtualService(),
+ makeBrokerFilterService(),
makeSameSubscription(),
},
Objects: []runtime.Object{
@@ -495,10 +459,122 @@ func TestReconcile(t *testing.T) {
}
tc.ReconcileKey = fmt.Sprintf("%s/%s", testNS, triggerName)
tc.IgnoreTimes = true
+ tc.Scheme = scheme.Scheme
t.Run(tc.Name, tc.Runner(t, r, c, recorder))
}
}
+func TestMapBrokerToTriggers(t *testing.T) {
+ testCases := map[string]struct {
+ initialState []runtime.Object
+ mocks controllertesting.Mocks
+ expected []reconcile.Request
+ }{
+ "List error": {
+ mocks: controllertesting.Mocks{
+ MockLists: []controllertesting.MockList{
+ func(_ client.Client, _ context.Context, _ *client.ListOptions, list runtime.Object) (controllertesting.MockHandled, error) {
+ return controllertesting.Handled, errors.New("test induced error")
+ },
+ },
+ },
+ expected: []reconcile.Request{},
+ },
+ "One Trigger": {
+ initialState: []runtime.Object{
+ makeTrigger(),
+ },
+ expected: []reconcile.Request{
+ {
+ NamespacedName: types.NamespacedName{
+ Namespace: testNS,
+ Name: triggerName,
+ },
+ },
+ },
+ },
+ "Only from this namespace": {
+ initialState: []runtime.Object{
+ makeTriggerWithNamespaceAndName(testNS, "one"),
+ makeTriggerWithNamespaceAndName("some-other-namespace", "will-be-ignored"),
+ makeTriggerWithNamespaceAndName(testNS, "two"),
+ },
+ expected: []reconcile.Request{
+ {
+ NamespacedName: types.NamespacedName{
+ Namespace: testNS,
+ Name: "one",
+ },
+ },
+ {
+ NamespacedName: types.NamespacedName{
+ Namespace: testNS,
+ Name: "two",
+ },
+ },
+ },
+ },
+ "Follows pagination": {
+ initialState: []runtime.Object{
+ makeTrigger(),
+ },
+ mocks: controllertesting.Mocks{
+ MockLists: []controllertesting.MockList{
+ func(innerClient client.Client, ctx context.Context, opts *client.ListOptions, list runtime.Object) (handled controllertesting.MockHandled, e error) {
+ // The first request won't have a continue token. Add it and immediately
+ // return. The subsequent request will have the token, remove it and send
+ // the request to the inner client.
+ tl := list.(*v1alpha1.TriggerList)
+ if opts.Raw.Continue != continueToken {
+ tl.Continue = continueToken
+ return controllertesting.Handled, nil
+ } else {
+ tl.Continue = ""
+ return controllertesting.Handled, innerClient.List(ctx, opts, list)
+ }
+ },
+ },
+ },
+ expected: []reconcile.Request{
+ {
+ NamespacedName: types.NamespacedName{
+ Namespace: testNS,
+ Name: triggerName,
+ },
+ },
+ },
+ },
+ }
+
+ for n, tc := range testCases {
+ t.Run(n, func(t *testing.T) {
+ c := (&controllertesting.TestCase{
+ Scheme: scheme.Scheme,
+ InitialState: tc.initialState,
+ Mocks: tc.mocks,
+ }).GetClient()
+
+ b := &mapBrokerToTriggers{
+ // client and logger are the only fields that are used by the Map function.
+ r: &reconciler{
+ client: c,
+ logger: zap.NewNop(),
+ },
+ }
+ o := handler.MapObject{
+ Meta: &metav1.ObjectMeta{
+ Namespace: testNS,
+ Name: brokerName,
+ },
+ }
+ actual := b.Map(o)
+ if diff := cmp.Diff(tc.expected, actual); diff != "" {
+ t.Errorf("Unexpected results (-want +got): %s", diff)
+ }
+ })
+ }
+}
+
func makeTrigger() *v1alpha1.Trigger {
return &v1alpha1.Trigger{
TypeMeta: metav1.TypeMeta{
@@ -533,8 +609,6 @@ func makeReadyTrigger() *v1alpha1.Trigger {
t.Status.InitializeConditions()
t.Status.MarkBrokerExists()
t.Status.SubscriberURI = fmt.Sprintf("http://%s.%s.svc.%s/", subscriberName, testNS, utils.GetClusterDomainName())
- t.Status.MarkKubernetesServiceExists()
- t.Status.MarkVirtualServiceExists()
t.Status.MarkSubscribed()
return t
}
@@ -545,6 +619,13 @@ func makeDeletingTrigger() *v1alpha1.Trigger {
return b
}
+func makeTriggerWithNamespaceAndName(namespace, name string) *v1alpha1.Trigger {
+ t := makeTrigger()
+ t.Namespace = namespace
+ t.Name = name
+ return t
+}
+
func makeBroker() *v1alpha1.Broker {
return &v1alpha1.Broker{
TypeMeta: metav1.TypeMeta{
@@ -616,37 +697,24 @@ func makeSubscriberServiceAsUnstructured() *unstructured.Unstructured {
}
}
-func makeService() *corev1.Service {
- return resources.NewService(makeTrigger())
-}
-
-func makeDifferentK8sService() *corev1.Service {
- svc := makeService()
- svc.Spec.Ports = []corev1.ServicePort{{
- Name: "http",
- Port: 9999,
- }}
- return svc
-}
-
-func makeVirtualService() *istiov1alpha3.VirtualService {
- return resources.NewVirtualService(makeTrigger(), makeService())
+func makeBrokerFilterService() *corev1.Service {
+ return brokerresources.MakeFilterService(makeBroker())
}
-func makeDifferentVirtualService() *istiov1alpha3.VirtualService {
- vsvc := makeVirtualService()
- vsvc.Spec.Hosts = []string{
- names.ServiceHostName("other_svc_name", "other_svc_namespace"),
+func makeServiceURI() *url.URL {
+ return &url.URL{
+ Scheme: "http",
+ Host: "service-uri",
+ Path: "/path",
}
- return vsvc
}
func makeSameSubscription() *v1alpha1.Subscription {
- return resources.NewSubscription(makeTrigger(), makeTriggerChannel(), makeTriggerChannel(), makeService())
+ return resources.NewSubscription(makeTrigger(), makeTriggerChannel(), makeTriggerChannel(), makeServiceURI())
}
func makeDifferentSubscription() *v1alpha1.Subscription {
- return resources.NewSubscription(makeTrigger(), makeTriggerChannel(), makeDifferentChannel(), makeService())
+ return resources.NewSubscription(makeTrigger(), makeTriggerChannel(), makeDifferentChannel(), makeServiceURI())
}
func getOwnerReference() metav1.OwnerReference {
From ac7c33797133d44d46040db3be636628eb83757e Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 15 Apr 2019 13:33:44 -0700
Subject: [PATCH 18/76] Use CloudEvents instead of CloudEvent
---
pkg/broker/cel.go | 12 +-
pkg/broker/dev_knative/ce_context.pb.go | 122 ------------------
.../dev_knative/cloud_events_context.pb.go | 122 ++++++++++++++++++
...ntext.proto => cloud_events_context.proto} | 2 +-
4 files changed, 129 insertions(+), 129 deletions(-)
delete mode 100644 pkg/broker/dev_knative/ce_context.pb.go
create mode 100644 pkg/broker/dev_knative/cloud_events_context.pb.go
rename pkg/broker/dev_knative/{ce_context.proto => cloud_events_context.proto} (86%)
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index 07ee078d98f..46b92d31b03 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -16,13 +16,13 @@ import (
)
const (
- // CELVarKeyContext is the CEL variable key used for the CloudEvent event
+ // CELVarKeyContext is the CEL variable key used for the CloudEvents event
// context fields defined in the spec.
CELVarKeyContext = "ce"
- // CELVarKeyExtensions is the CEL variable key used for the CloudEvent event
+ // CELVarKeyExtensions is the CEL variable key used for the CloudEvents event
// context extensions.
CELVarKeyExtensions = "ext"
- // CELVarKeyData is the CEL variable key used for the CloudEvent event data.
+ // CELVarKeyData is the CEL variable key used for the CloudEvents event data.
CELVarKeyData = "data"
//TODO add a key that contains both the extensions and the baseline context
// vars so extensions can be future proofed.
@@ -30,9 +30,9 @@ const (
func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) (bool, error) {
e, err := cel.NewEnv(
- cel.Types(&celprotos.CloudEventContext{}),
+ cel.Types(&celprotos.CloudEventsContext{}),
cel.Declarations(
- decls.NewIdent(CELVarKeyContext, decls.NewObjectType("dev.knative.CloudEventContext"), nil),
+ decls.NewIdent(CELVarKeyContext, decls.NewObjectType("dev.knative.CloudEventsContext"), nil),
decls.NewIdent(CELVarKeyExtensions, decls.NewObjectType("google.protobuf.Struct"), nil),
decls.NewIdent(CELVarKeyData, decls.NewObjectType("google.protobuf.Struct"), nil),
),
@@ -59,7 +59,7 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
vars := map[string]interface{}{}
// Set baseline context fields
- ceCtx := &celprotos.CloudEventContext{
+ ceCtx := &celprotos.CloudEventsContext{
Specversion: event.Context.GetSpecVersion(),
Type: event.Context.GetType(),
Source: event.Context.GetSource(),
diff --git a/pkg/broker/dev_knative/ce_context.pb.go b/pkg/broker/dev_knative/ce_context.pb.go
deleted file mode 100644
index f6c36fed60f..00000000000
--- a/pkg/broker/dev_knative/ce_context.pb.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: ce_context.proto
-
-package dev_knative
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type CloudEventContext struct {
- Specversion string `protobuf:"bytes,1,opt,name=specversion,proto3" json:"specversion,omitempty"`
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
- Schemaurl string `protobuf:"bytes,4,opt,name=schemaurl,proto3" json:"schemaurl,omitempty"`
- Datamediatype string `protobuf:"bytes,5,opt,name=datamediatype,proto3" json:"datamediatype,omitempty"`
- Datacontenttype string `protobuf:"bytes,6,opt,name=datacontenttype,proto3" json:"datacontenttype,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CloudEventContext) Reset() { *m = CloudEventContext{} }
-func (m *CloudEventContext) String() string { return proto.CompactTextString(m) }
-func (*CloudEventContext) ProtoMessage() {}
-func (*CloudEventContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_fc676048f2e074ad, []int{0}
-}
-
-func (m *CloudEventContext) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CloudEventContext.Unmarshal(m, b)
-}
-func (m *CloudEventContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CloudEventContext.Marshal(b, m, deterministic)
-}
-func (m *CloudEventContext) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CloudEventContext.Merge(m, src)
-}
-func (m *CloudEventContext) XXX_Size() int {
- return xxx_messageInfo_CloudEventContext.Size(m)
-}
-func (m *CloudEventContext) XXX_DiscardUnknown() {
- xxx_messageInfo_CloudEventContext.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CloudEventContext proto.InternalMessageInfo
-
-func (m *CloudEventContext) GetSpecversion() string {
- if m != nil {
- return m.Specversion
- }
- return ""
-}
-
-func (m *CloudEventContext) GetType() string {
- if m != nil {
- return m.Type
- }
- return ""
-}
-
-func (m *CloudEventContext) GetSource() string {
- if m != nil {
- return m.Source
- }
- return ""
-}
-
-func (m *CloudEventContext) GetSchemaurl() string {
- if m != nil {
- return m.Schemaurl
- }
- return ""
-}
-
-func (m *CloudEventContext) GetDatamediatype() string {
- if m != nil {
- return m.Datamediatype
- }
- return ""
-}
-
-func (m *CloudEventContext) GetDatacontenttype() string {
- if m != nil {
- return m.Datacontenttype
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*CloudEventContext)(nil), "dev.knative.CloudEventContext")
-}
-
-func init() { proto.RegisterFile("ce_context.proto", fileDescriptor_fc676048f2e074ad) }
-
-var fileDescriptor_fc676048f2e074ad = []byte{
- // 188 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x4e, 0x8d, 0x4f,
- 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0x49,
- 0x2d, 0xd3, 0xcb, 0xce, 0x4b, 0x2c, 0xc9, 0x2c, 0x4b, 0x55, 0x3a, 0xcf, 0xc8, 0x25, 0xe8, 0x9c,
- 0x93, 0x5f, 0x9a, 0xe2, 0x5a, 0x96, 0x9a, 0x57, 0xe2, 0x0c, 0x51, 0x28, 0xa4, 0xc0, 0xc5, 0x5d,
- 0x5c, 0x90, 0x9a, 0x5c, 0x96, 0x5a, 0x54, 0x9c, 0x99, 0x9f, 0x27, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1,
- 0x19, 0x84, 0x2c, 0x24, 0x24, 0xc4, 0xc5, 0x52, 0x52, 0x59, 0x90, 0x2a, 0xc1, 0x04, 0x96, 0x02,
- 0xb3, 0x85, 0xc4, 0xb8, 0xd8, 0x8a, 0xf3, 0x4b, 0x8b, 0x92, 0x53, 0x25, 0x98, 0xc1, 0xa2, 0x50,
- 0x9e, 0x90, 0x0c, 0x17, 0x67, 0x71, 0x72, 0x46, 0x6a, 0x6e, 0x62, 0x69, 0x51, 0x8e, 0x04, 0x0b,
- 0x58, 0x0a, 0x21, 0x20, 0xa4, 0xc2, 0xc5, 0x9b, 0x92, 0x58, 0x92, 0x98, 0x9b, 0x9a, 0x92, 0x99,
- 0x08, 0x36, 0x92, 0x15, 0xac, 0x02, 0x55, 0x50, 0x48, 0x83, 0x8b, 0x1f, 0x24, 0x00, 0xf6, 0x49,
- 0x5e, 0x09, 0x58, 0x1d, 0x1b, 0x58, 0x1d, 0xba, 0x70, 0x12, 0x1b, 0xd8, 0x97, 0xc6, 0x80, 0x00,
- 0x00, 0x00, 0xff, 0xff, 0x9f, 0x0a, 0x80, 0x22, 0xf9, 0x00, 0x00, 0x00,
-}
diff --git a/pkg/broker/dev_knative/cloud_events_context.pb.go b/pkg/broker/dev_knative/cloud_events_context.pb.go
new file mode 100644
index 00000000000..03d3135614b
--- /dev/null
+++ b/pkg/broker/dev_knative/cloud_events_context.pb.go
@@ -0,0 +1,122 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: cloud_events_context.proto
+
+package dev_knative
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type CloudEventsContext struct {
+ Specversion string `protobuf:"bytes,1,opt,name=specversion,proto3" json:"specversion,omitempty"`
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
+ Schemaurl string `protobuf:"bytes,4,opt,name=schemaurl,proto3" json:"schemaurl,omitempty"`
+ Datamediatype string `protobuf:"bytes,5,opt,name=datamediatype,proto3" json:"datamediatype,omitempty"`
+ Datacontenttype string `protobuf:"bytes,6,opt,name=datacontenttype,proto3" json:"datacontenttype,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CloudEventsContext) Reset() { *m = CloudEventsContext{} }
+func (m *CloudEventsContext) String() string { return proto.CompactTextString(m) }
+func (*CloudEventsContext) ProtoMessage() {}
+func (*CloudEventsContext) Descriptor() ([]byte, []int) {
+ return fileDescriptor_a3df9ee1ac825df6, []int{0}
+}
+
+func (m *CloudEventsContext) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CloudEventsContext.Unmarshal(m, b)
+}
+func (m *CloudEventsContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CloudEventsContext.Marshal(b, m, deterministic)
+}
+func (m *CloudEventsContext) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CloudEventsContext.Merge(m, src)
+}
+func (m *CloudEventsContext) XXX_Size() int {
+ return xxx_messageInfo_CloudEventsContext.Size(m)
+}
+func (m *CloudEventsContext) XXX_DiscardUnknown() {
+ xxx_messageInfo_CloudEventsContext.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CloudEventsContext proto.InternalMessageInfo
+
+func (m *CloudEventsContext) GetSpecversion() string {
+ if m != nil {
+ return m.Specversion
+ }
+ return ""
+}
+
+func (m *CloudEventsContext) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *CloudEventsContext) GetSource() string {
+ if m != nil {
+ return m.Source
+ }
+ return ""
+}
+
+func (m *CloudEventsContext) GetSchemaurl() string {
+ if m != nil {
+ return m.Schemaurl
+ }
+ return ""
+}
+
+func (m *CloudEventsContext) GetDatamediatype() string {
+ if m != nil {
+ return m.Datamediatype
+ }
+ return ""
+}
+
+func (m *CloudEventsContext) GetDatacontenttype() string {
+ if m != nil {
+ return m.Datacontenttype
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*CloudEventsContext)(nil), "dev.knative.CloudEventsContext")
+}
+
+func init() { proto.RegisterFile("cloud_events_context.proto", fileDescriptor_a3df9ee1ac825df6) }
+
+var fileDescriptor_a3df9ee1ac825df6 = []byte{
+ // 192 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x8f, 0x4d, 0x6a, 0xc3, 0x30,
+ 0x10, 0x85, 0x71, 0xeb, 0x1a, 0x3c, 0xa6, 0x14, 0x66, 0x51, 0x44, 0xe9, 0xc2, 0x94, 0x2e, 0xbc,
+ 0xf2, 0xa6, 0x47, 0x30, 0xbd, 0x40, 0x2e, 0x60, 0x14, 0x69, 0x20, 0x26, 0xb6, 0x64, 0xa4, 0xb1,
+ 0x48, 0x6e, 0x99, 0x23, 0x05, 0x4f, 0x02, 0xf9, 0xd9, 0xcd, 0x7c, 0xef, 0xe3, 0xc1, 0x83, 0x2f,
+ 0x33, 0xfa, 0xc5, 0xf6, 0x94, 0xc8, 0x71, 0xec, 0x8d, 0x77, 0x4c, 0x07, 0x6e, 0xe7, 0xe0, 0xd9,
+ 0x63, 0x65, 0x29, 0xb5, 0x7b, 0xa7, 0x79, 0x48, 0xf4, 0x73, 0xca, 0x00, 0xbb, 0xd5, 0xfd, 0x17,
+ 0xb5, 0xbb, 0x98, 0x58, 0x43, 0x15, 0x67, 0x32, 0x89, 0x42, 0x1c, 0xbc, 0x53, 0x59, 0x9d, 0x35,
+ 0xe5, 0xe6, 0x1e, 0x21, 0x42, 0xce, 0xc7, 0x99, 0xd4, 0x8b, 0x44, 0x72, 0xe3, 0x27, 0x14, 0xd1,
+ 0x2f, 0xc1, 0x90, 0x7a, 0x15, 0x7a, 0xfd, 0xf0, 0x1b, 0xca, 0x68, 0x76, 0x34, 0xe9, 0x25, 0x8c,
+ 0x2a, 0x97, 0xe8, 0x06, 0xf0, 0x17, 0xde, 0xad, 0x66, 0x3d, 0x91, 0x1d, 0xb4, 0x54, 0xbe, 0x89,
+ 0xf1, 0x08, 0xb1, 0x81, 0x8f, 0x15, 0xc8, 0x14, 0xc7, 0xe2, 0x15, 0xe2, 0x3d, 0xe3, 0x6d, 0x21,
+ 0x33, 0xff, 0xce, 0x01, 0x00, 0x00, 0xff, 0xff, 0x21, 0xcb, 0xd7, 0x29, 0x04, 0x01, 0x00, 0x00,
+}
diff --git a/pkg/broker/dev_knative/ce_context.proto b/pkg/broker/dev_knative/cloud_events_context.proto
similarity index 86%
rename from pkg/broker/dev_knative/ce_context.proto
rename to pkg/broker/dev_knative/cloud_events_context.proto
index b5dac07e6b0..fda378d5027 100644
--- a/pkg/broker/dev_knative/ce_context.proto
+++ b/pkg/broker/dev_knative/cloud_events_context.proto
@@ -2,7 +2,7 @@ syntax = "proto3";
package dev.knative;
-message CloudEventContext {
+message CloudEventsContext {
string specversion = 1;
string type = 2;
string source = 3;
From 4d8a37b50dc39285f7fda113db0e780f222dba18 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 15 Apr 2019 13:38:17 -0700
Subject: [PATCH 19/76] Handle errors in GetDataMediaType
GetDataMediaType now returns an error because it tries to parse the
content type. If an error occurs, log it and use the empty string as the
data media type.
---
pkg/broker/cel.go | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index 46b92d31b03..779cdd84ef9 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -59,12 +59,17 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
vars := map[string]interface{}{}
// Set baseline context fields
+ dmt, err := event.Context.GetDataMediaType()
+ if err != nil {
+ r.logger.Error("Failed to parse data media type", zap.Error(err))
+ }
+
ceCtx := &celprotos.CloudEventsContext{
Specversion: event.Context.GetSpecVersion(),
Type: event.Context.GetType(),
Source: event.Context.GetSource(),
Schemaurl: event.Context.GetSchemaURL(),
- Datamediatype: event.Context.GetDataMediaType(),
+ Datamediatype: dmt,
Datacontenttype: event.Context.GetDataContentType(),
}
vars[CELVarKeyContext] = ceCtx
From ea90696d8f755226bbc2bf63e0ad471080dec5f2 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 15 Apr 2019 13:39:55 -0700
Subject: [PATCH 20/76] Format TODOs correctly
---
pkg/broker/cel.go | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index 779cdd84ef9..aa75f9c1796 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -24,7 +24,7 @@ const (
CELVarKeyExtensions = "ext"
// CELVarKeyData is the CEL variable key used for the CloudEvents event data.
CELVarKeyData = "data"
- //TODO add a key that contains both the extensions and the baseline context
+ // TODO add a key that contains both the extensions and the baseline context
// vars so extensions can be future proofed.
)
@@ -50,7 +50,7 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
return false, iss.Err()
}
- //TODO cache these by hash of expression. Programs are thread-safe so it's
+ // TODO cache these by hash of expression. Programs are thread-safe so it's
// ok to share them between triggers and events.
prg, err := e.Program(c)
if err != nil {
@@ -77,7 +77,7 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
// If the Trigger has requested parsing of extensions, attempt to turn them
// into a dynamic struct.
if ts.Filter.CEL.ParseExtensions {
- //TODO should this coerce to V02?
+ // TODO should this coerce to V02?
extStruct, err := ceParsedExtensionsStruct(event.Context.AsV02().Extensions)
if err != nil {
r.logger.Error("Failed to parse event context for CEL filtering", zap.String("id", event.Context.AsV02().ID), zap.Error(err))
@@ -119,7 +119,7 @@ func ceParsedExtensionsStruct(ext map[string]interface{}) (*structpb.Struct, err
}
func ceParsedDataStruct(event *cloudevents.Event) (*structpb.Struct, error) {
- //TODO CloudEvents SDK might have a better way to do this with data codecs
+ // TODO CloudEvents SDK might have a better way to do this with data codecs
if event.Context.GetDataContentType() == "application/json" {
var decodedData map[string]interface{}
err := event.DataAs(&decodedData)
@@ -132,7 +132,7 @@ func ceParsedDataStruct(event *cloudevents.Event) (*structpb.Struct, error) {
}
dataStruct := &structpb.Struct{}
- //TODO is there a way to convert a map into a structpb.Struct?
+ // TODO is there a way to convert a map into a structpb.Struct?
if err := jsonpb.Unmarshal(bytes.NewBuffer(dataJSON), dataStruct); err != nil {
return nil, err
}
From 7816d99fb60caa917a5634f34ae78d8de36d2b3c Mon Sep 17 00:00:00 2001
From: Dan Gerdesmeier
Date: Mon, 15 Apr 2019 14:40:00 -0700
Subject: [PATCH 21/76] Add custom reporter for ImmutableField validation
errors (#1048)
* Add custom reporter for ImmutableField validation errors
* Upgrades go-cmp to latest commit of master
* Replaces cmp with kmp for producing diffs at runtime
* Adds use of custom reporter for kmp
* Fixes build warnings and errors
* Fix build
* Run codegen
* Grab latest kn/pkg
---
Gopkg.lock | 11 +-
Gopkg.toml | 4 +-
.../eventing/v1alpha1/channel_validation.go | 10 +-
...ubscribable_channelable_validation_test.go | 4 +-
.../v1alpha1/subscription_validation.go | 10 +-
.../eventing/v1alpha1/trigger_validation.go | 10 +-
test/e2e/channel_chain_test.go | 2 +-
test/e2e/e2e.go | 8 +-
test/e2e/event_transformation_test.go | 2 +-
test/e2e/single_event_test.go | 6 +-
.../google/go-cmp/cmp/cmpopts/ignore.go | 62 ++
.../google/go-cmp/cmp/cmpopts/sort.go | 33 +-
.../google/go-cmp/cmp/cmpopts/sort_go17.go | 46 --
.../google/go-cmp/cmp/cmpopts/sort_go18.go | 31 -
.../google/go-cmp/cmp/cmpopts/xform.go | 35 ++
.../github.com/google/go-cmp/cmp/compare.go | 557 ++++++++++--------
.../cmp/{unsafe_panic.go => export_panic.go} | 6 +-
.../{unsafe_reflect.go => export_unsafe.go} | 8 +-
.../go-cmp/cmp/internal/diff/debug_disable.go | 2 +-
.../go-cmp/cmp/internal/diff/debug_enable.go | 4 +-
.../google/go-cmp/cmp/internal/diff/diff.go | 31 +-
.../google/go-cmp/cmp/internal/flags/flags.go | 9 +
.../cmp/internal/flags/toolchain_legacy.go | 10 +
.../cmp/internal/flags/toolchain_recent.go | 10 +
.../go-cmp/cmp/internal/function/func.go | 64 +-
.../go-cmp/cmp/internal/value/format.go | 277 ---------
.../cmp/internal/value/pointer_purego.go | 23 +
.../cmp/internal/value/pointer_unsafe.go | 26 +
.../google/go-cmp/cmp/internal/value/sort.go | 9 +-
.../google/go-cmp/cmp/internal/value/zero.go | 45 ++
.../github.com/google/go-cmp/cmp/options.go | 255 +++++---
vendor/github.com/google/go-cmp/cmp/path.go | 339 ++++++-----
vendor/github.com/google/go-cmp/cmp/report.go | 51 ++
.../google/go-cmp/cmp/report_compare.go | 296 ++++++++++
.../google/go-cmp/cmp/report_reflect.go | 279 +++++++++
.../google/go-cmp/cmp/report_slices.go | 333 +++++++++++
.../google/go-cmp/cmp/report_text.go | 382 ++++++++++++
.../google/go-cmp/cmp/report_value.go | 121 ++++
.../github.com/google/go-cmp/cmp/reporter.go | 53 --
.../knative/pkg/apis/condition_set.go | 41 +-
.../github.com/knative/pkg/apis/contexts.go | 75 +++
.../knative/pkg/apis/field_error.go | 16 +-
.../github.com/knative/pkg/apis/interfaces.go | 4 +-
vendor/github.com/knative/pkg/kmp/diff.go | 27 +
.../github.com/knative/pkg/kmp/reporters.go | 136 +++++
vendor/github.com/knative/pkg/test/clients.go | 6 +-
.../knative/pkg/test/kube_checks.go | 23 +-
.../knative/pkg/test/spoof/error_checks.go | 16 +-
.../github.com/knative/pkg/webhook/webhook.go | 35 +-
49 files changed, 2799 insertions(+), 1044 deletions(-)
delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go
delete mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go
rename vendor/github.com/google/go-cmp/cmp/{unsafe_panic.go => export_panic.go} (60%)
rename vendor/github.com/google/go-cmp/cmp/{unsafe_reflect.go => export_unsafe.go} (64%)
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/format.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go
create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go
delete mode 100644 vendor/github.com/google/go-cmp/cmp/reporter.go
create mode 100644 vendor/github.com/knative/pkg/kmp/reporters.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 7e30139f683..14b73872dea 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -219,18 +219,18 @@
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
[[projects]]
- digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
+ digest = "1:010d46ea3c1e730897e53058d1013a963f3f987675dda87df64f891b945281db"
name = "github.com/google/go-cmp"
packages = [
"cmp",
"cmp/cmpopts",
"cmp/internal/diff",
+ "cmp/internal/flags",
"cmp/internal/function",
"cmp/internal/value",
]
pruneopts = "NUT"
- revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
- version = "v0.2.0"
+ revision = "6f77996f0c42f7b84e5a2b252227263f93432e9b"
[[projects]]
branch = "master"
@@ -363,7 +363,7 @@
revision = "dd3ceb3323922b899a0a913f885fcf59943e7b59"
[[projects]]
- digest = "1:24c03867a9e3a7c469c927a50c9bcb07d3bceb7e2bcb706ddceb9f57c5297dde"
+ digest = "1:244b7861fc13a97a1c937ce845f36311dfa00284cd25599831559843cba86ded"
name = "github.com/knative/pkg"
packages = [
"apis",
@@ -390,7 +390,7 @@
"webhook",
]
pruneopts = "NUT"
- revision = "2b574edcd712e848556c69cc95a2622145284882"
+ revision = "1095a4eab01cb7f5bbfa4dd5d048d730a00980e3"
[[projects]]
branch = "master"
@@ -1254,6 +1254,7 @@
"github.com/knative/pkg/apis/duck/v1alpha1",
"github.com/knative/pkg/apis/istio/v1alpha3",
"github.com/knative/pkg/configmap",
+ "github.com/knative/pkg/kmp",
"github.com/knative/pkg/logging",
"github.com/knative/pkg/logging/logkey",
"github.com/knative/pkg/signals",
diff --git a/Gopkg.toml b/Gopkg.toml
index 684a7b0e669..bed42ea2792 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -100,8 +100,8 @@ required = [
# This controls when we upgrade apis independently of Serving.
[[constraint]]
name = "github.com/knative/pkg"
- # HEAD as of 2019-04-08
- revision = "2b574edcd712e848556c69cc95a2622145284882"
+ # HEAD as of 2019-04-15
+ revision = "1095a4eab01cb7f5bbfa4dd5d048d730a00980e3"
# TODO why is this overridden?
[[override]]
diff --git a/pkg/apis/eventing/v1alpha1/channel_validation.go b/pkg/apis/eventing/v1alpha1/channel_validation.go
index 4dfc4a54a9a..d452f31ad40 100644
--- a/pkg/apis/eventing/v1alpha1/channel_validation.go
+++ b/pkg/apis/eventing/v1alpha1/channel_validation.go
@@ -20,9 +20,9 @@ import (
"context"
"fmt"
- "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/knative/pkg/apis"
+ "github.com/knative/pkg/kmp"
)
func (c *Channel) Validate(ctx context.Context) *apis.FieldError {
@@ -57,7 +57,13 @@ func (c *Channel) CheckImmutableFields(ctx context.Context, og apis.Immutable) *
return &apis.FieldError{Message: "The provided resource was not a Channel"}
}
ignoreArguments := cmpopts.IgnoreFields(ChannelSpec{}, "Arguments", "Subscribable")
- if diff := cmp.Diff(original.Spec, c.Spec, ignoreArguments); diff != "" {
+ if diff, err := kmp.ShortDiff(original.Spec, c.Spec, ignoreArguments); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff Channel",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
return &apis.FieldError{
Message: "Immutable fields changed",
Paths: []string{"spec.provisioner"},
diff --git a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go
index 44389711cda..1607bcd5035 100644
--- a/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go
+++ b/pkg/apis/eventing/v1alpha1/subscribable_channelable_validation_test.go
@@ -37,7 +37,7 @@ var validationTests = []struct {
Kind: "Strait",
},
want: &apis.FieldError{
- Message: "invalid value \"Strait\"",
+ Message: "invalid value: Strait",
Paths: []string{"kind"},
Details: "only 'Channel' kind is allowed",
},
@@ -50,7 +50,7 @@ var validationTests = []struct {
Kind: "Channel",
},
want: &apis.FieldError{
- Message: `invalid value "eventing.knative.dev/v1alpha2"`,
+ Message: `invalid value: eventing.knative.dev/v1alpha2`,
Paths: []string{"apiVersion"},
Details: "only eventing.knative.dev/v1alpha1 " +
"is allowed for apiVersion",
diff --git a/pkg/apis/eventing/v1alpha1/subscription_validation.go b/pkg/apis/eventing/v1alpha1/subscription_validation.go
index c62134f4b03..abed237e2bf 100644
--- a/pkg/apis/eventing/v1alpha1/subscription_validation.go
+++ b/pkg/apis/eventing/v1alpha1/subscription_validation.go
@@ -19,9 +19,9 @@ package v1alpha1
import (
"context"
- "github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/knative/pkg/apis"
+ "github.com/knative/pkg/kmp"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
)
@@ -140,7 +140,13 @@ func (s *Subscription) CheckImmutableFields(ctx context.Context, og apis.Immutab
// Only Subscriber and Reply are mutable.
ignoreArguments := cmpopts.IgnoreFields(SubscriptionSpec{}, "Subscriber", "Reply")
- if diff := cmp.Diff(original.Spec, s.Spec, ignoreArguments); diff != "" {
+ if diff, err := kmp.ShortDiff(original.Spec, s.Spec, ignoreArguments); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff Subscription",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
return &apis.FieldError{
Message: "Immutable fields changed (-old +new)",
Paths: []string{"spec"},
diff --git a/pkg/apis/eventing/v1alpha1/trigger_validation.go b/pkg/apis/eventing/v1alpha1/trigger_validation.go
index 1ccaa9c6963..79aed96f714 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_validation.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_validation.go
@@ -19,8 +19,8 @@ package v1alpha1
import (
"context"
- "github.com/google/go-cmp/cmp"
"github.com/knative/pkg/apis"
+ "github.com/knative/pkg/kmp"
)
func (t *Trigger) Validate(ctx context.Context) *apis.FieldError {
@@ -64,7 +64,13 @@ func (t *Trigger) CheckImmutableFields(ctx context.Context, og apis.Immutable) *
return &apis.FieldError{Message: "The provided original was not a Trigger"}
}
- if diff := cmp.Diff(original.Spec.Broker, t.Spec.Broker); diff != "" {
+ if diff, err := kmp.ShortDiff(original.Spec.Broker, t.Spec.Broker); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff Trigger",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
return &apis.FieldError{
Message: "Immutable fields changed (-old +new)",
Paths: []string{"spec", "broker"},
diff --git a/test/e2e/channel_chain_test.go b/test/e2e/channel_chain_test.go
index 0657f505188..a0ce7630231 100644
--- a/test/e2e/channel_chain_test.go
+++ b/test/e2e/channel_chain_test.go
@@ -109,7 +109,7 @@ func TestChannelChain(t *testing.T) {
// check if the logging service receives the correct number of event messages
expectedContentCount := len(subscriptionNames1) * len(subscriptionNames2)
- if err := WaitForLogContentCount(clients, loggerPodName, loggerPod.Spec.Containers[0].Name, body, expectedContentCount); err != nil {
+ if err := WaitForLogContentCount(clients, loggerPodName, loggerPod.Spec.Containers[0].Name, ns, body, expectedContentCount); err != nil {
t.Fatalf("String %q does not appear %d times in logs of logger pod %q: %v", body, expectedContentCount, loggerPodName, err)
}
}
diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go
index 020b6a87f45..400e20a766e 100644
--- a/test/e2e/e2e.go
+++ b/test/e2e/e2e.go
@@ -325,7 +325,7 @@ func SendFakeEventToChannel(clients *test.Clients, event *test.CloudEvent, chann
// If the contents are not present within timeout it returns error.
func WaitForLogContents(clients *test.Clients, logf logging.FormatLogger, podName string, containerName string, namespace string, contents []string) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
- logs, err := clients.Kube.PodLogs(podName, containerName)
+ logs, err := clients.Kube.PodLogs(podName, containerName, namespace)
if err != nil {
return true, err
}
@@ -343,9 +343,9 @@ func WaitForLogContents(clients *test.Clients, logf logging.FormatLogger, podNam
// WaitForLogContentCount checks if the number of substr occur times equals the given number.
// If the content does not appear the given times it returns error.
-func WaitForLogContentCount(client *test.Clients, podName, containerName, content string, appearTimes int) error {
+func WaitForLogContentCount(client *test.Clients, podName, containerName, namespace, content string, appearTimes int) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
- logs, err := client.Kube.PodLogs(podName, containerName)
+ logs, err := client.Kube.PodLogs(podName, containerName, namespace)
if err != nil {
return true, err
}
@@ -357,7 +357,7 @@ func WaitForLogContentCount(client *test.Clients, podName, containerName, conten
// FindAnyLogContents attempts to find logs for given Pod/Container that has 'any' of the given contents.
// It returns an error if it couldn't retrieve the logs. In case 'any' of the contents are there, it returns true.
func FindAnyLogContents(clients *test.Clients, logf logging.FormatLogger, podName string, containerName string, namespace string, contents []string) (bool, error) {
- logs, err := clients.Kube.PodLogs(podName, containerName)
+ logs, err := clients.Kube.PodLogs(podName, containerName, namespace)
if err != nil {
return false, err
}
diff --git a/test/e2e/event_transformation_test.go b/test/e2e/event_transformation_test.go
index deeb8a47e26..095dffd0779 100644
--- a/test/e2e/event_transformation_test.go
+++ b/test/e2e/event_transformation_test.go
@@ -130,7 +130,7 @@ func TestEventTransformation(t *testing.T) {
// check if the logging service receives the correct number of event messages
expectedContent := body + msgPostfix
expectedContentCount := len(subscriptionNames1) * len(subscriptionNames2)
- if err := WaitForLogContentCount(clients, loggerPod.Name, loggerPod.Spec.Containers[0].Name, expectedContent, expectedContentCount); err != nil {
+ if err := WaitForLogContentCount(clients, loggerPod.Name, loggerPod.Spec.Containers[0].Name, ns, expectedContent, expectedContentCount); err != nil {
t.Fatalf("String %q does not appear %d times in logs of logger pod %q: %v", expectedContent, expectedContentCount, loggerPod.Name, err)
}
}
diff --git a/test/e2e/single_event_test.go b/test/e2e/single_event_test.go
index 07515cbbc3a..8d1cc907b36 100644
--- a/test/e2e/single_event_test.go
+++ b/test/e2e/single_event_test.go
@@ -96,9 +96,9 @@ func SingleEvent(t *testing.T, encoding string) {
t.Fatalf("Failed to send fake CloudEvent to the channel %q", channel.Name)
}
- if err := pkgTest.WaitForLogContent(clients.Kube, loggerPodName, loggerPod.Spec.Containers[0].Name, body); err != nil {
- clients.Kube.PodLogs(senderName, "sendevent")
- clients.Kube.PodLogs(senderName, "istio-proxy")
+ if err := pkgTest.WaitForLogContent(clients.Kube, loggerPodName, loggerPod.Spec.Containers[0].Name, ns, body); err != nil {
+ clients.Kube.PodLogs(senderName, "sendevent", ns)
+ clients.Kube.PodLogs(senderName, "istio-proxy", ns)
t.Fatalf("String %q not found in logs of logger pod %q: %v", body, loggerPodName, err)
}
}
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go
index e86554b92b4..ff8e785d4e8 100644
--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go
+++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go
@@ -11,6 +11,7 @@ import (
"unicode/utf8"
"github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/internal/function"
)
// IgnoreFields returns an Option that ignores exported fields of the
@@ -112,6 +113,10 @@ func (tf ifaceFilter) filter(p cmp.Path) bool {
// In particular, unexported fields within the struct's exported fields
// of struct types, including anonymous fields, will not be ignored unless the
// type of the field itself is also passed to IgnoreUnexported.
+//
+// Avoid ignoring unexported fields of a type which you do not control (i.e. a
+// type from another repository), as changes to the implementation of such types
+// may change how the comparison behaves. Prefer a custom Comparer instead.
func IgnoreUnexported(typs ...interface{}) cmp.Option {
ux := newUnexportedFilter(typs...)
return cmp.FilterPath(ux.filter, cmp.Ignore())
@@ -143,3 +148,60 @@ func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}
+
+// IgnoreSliceElements returns an Option that ignores elements of []V.
+// The discard function must be of the form "func(T) bool" which is used to
+// ignore slice elements of type V, where V is assignable to T.
+// Elements are ignored if the function reports true.
+func IgnoreSliceElements(discardFunc interface{}) cmp.Option {
+ vf := reflect.ValueOf(discardFunc)
+ if !function.IsType(vf.Type(), function.ValuePredicate) || vf.IsNil() {
+ panic(fmt.Sprintf("invalid discard function: %T", discardFunc))
+ }
+ return cmp.FilterPath(func(p cmp.Path) bool {
+ si, ok := p.Index(-1).(cmp.SliceIndex)
+ if !ok {
+ return false
+ }
+ if !si.Type().AssignableTo(vf.Type().In(0)) {
+ return false
+ }
+ vx, vy := si.Values()
+ if vx.IsValid() && vf.Call([]reflect.Value{vx})[0].Bool() {
+ return true
+ }
+ if vy.IsValid() && vf.Call([]reflect.Value{vy})[0].Bool() {
+ return true
+ }
+ return false
+ }, cmp.Ignore())
+}
+
+// IgnoreMapEntries returns an Option that ignores entries of map[K]V.
+// The discard function must be of the form "func(T, R) bool" which is used to
+// ignore map entries of type K and V, where K and V are assignable to T and R.
+// Entries are ignored if the function reports true.
+func IgnoreMapEntries(discardFunc interface{}) cmp.Option {
+ vf := reflect.ValueOf(discardFunc)
+ if !function.IsType(vf.Type(), function.KeyValuePredicate) || vf.IsNil() {
+ panic(fmt.Sprintf("invalid discard function: %T", discardFunc))
+ }
+ return cmp.FilterPath(func(p cmp.Path) bool {
+ mi, ok := p.Index(-1).(cmp.MapIndex)
+ if !ok {
+ return false
+ }
+ if !mi.Key().Type().AssignableTo(vf.Type().In(0)) || !mi.Type().AssignableTo(vf.Type().In(1)) {
+ return false
+ }
+ k := mi.Key()
+ vx, vy := mi.Values()
+ if vx.IsValid() && vf.Call([]reflect.Value{k, vx})[0].Bool() {
+ return true
+ }
+ if vy.IsValid() && vf.Call([]reflect.Value{k, vy})[0].Bool() {
+ return true
+ }
+ return false
+ }, cmp.Ignore())
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go
index da17d746938..3a4804621e9 100644
--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go
+++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go
@@ -7,6 +7,7 @@ package cmpopts
import (
"fmt"
"reflect"
+ "sort"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/internal/function"
@@ -25,13 +26,13 @@ import (
// !less(y, x) for two elements x and y, their relative order is maintained.
//
// SortSlices can be used in conjunction with EquateEmpty.
-func SortSlices(less interface{}) cmp.Option {
- vf := reflect.ValueOf(less)
+func SortSlices(lessFunc interface{}) cmp.Option {
+ vf := reflect.ValueOf(lessFunc)
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
- panic(fmt.Sprintf("invalid less function: %T", less))
+ panic(fmt.Sprintf("invalid less function: %T", lessFunc))
}
ss := sliceSorter{vf.Type().In(0), vf}
- return cmp.FilterValues(ss.filter, cmp.Transformer("Sort", ss.sort))
+ return cmp.FilterValues(ss.filter, cmp.Transformer("cmpopts.SortSlices", ss.sort))
}
type sliceSorter struct {
@@ -48,8 +49,8 @@ func (ss sliceSorter) filter(x, y interface{}) bool {
}
// Check whether the slices are already sorted to avoid an infinite
// recursion cycle applying the same transform to itself.
- ok1 := sliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) })
- ok2 := sliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) })
+ ok1 := sort.SliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) })
+ ok2 := sort.SliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) })
return !ok1 || !ok2
}
func (ss sliceSorter) sort(x interface{}) interface{} {
@@ -58,7 +59,7 @@ func (ss sliceSorter) sort(x interface{}) interface{} {
for i := 0; i < src.Len(); i++ {
dst.Index(i).Set(src.Index(i))
}
- sortSliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) })
+ sort.SliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) })
ss.checkSort(dst)
return dst.Interface()
}
@@ -96,13 +97,13 @@ func (ss sliceSorter) less(v reflect.Value, i, j int) bool {
// • Total: if x != y, then either less(x, y) or less(y, x)
//
// SortMaps can be used in conjunction with EquateEmpty.
-func SortMaps(less interface{}) cmp.Option {
- vf := reflect.ValueOf(less)
+func SortMaps(lessFunc interface{}) cmp.Option {
+ vf := reflect.ValueOf(lessFunc)
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
- panic(fmt.Sprintf("invalid less function: %T", less))
+ panic(fmt.Sprintf("invalid less function: %T", lessFunc))
}
ms := mapSorter{vf.Type().In(0), vf}
- return cmp.FilterValues(ms.filter, cmp.Transformer("Sort", ms.sort))
+ return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort))
}
type mapSorter struct {
@@ -118,7 +119,10 @@ func (ms mapSorter) filter(x, y interface{}) bool {
}
func (ms mapSorter) sort(x interface{}) interface{} {
src := reflect.ValueOf(x)
- outType := mapEntryType(src.Type())
+ outType := reflect.StructOf([]reflect.StructField{
+ {Name: "K", Type: src.Type().Key()},
+ {Name: "V", Type: src.Type().Elem()},
+ })
dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len())
for i, k := range src.MapKeys() {
v := reflect.New(outType).Elem()
@@ -126,7 +130,7 @@ func (ms mapSorter) sort(x interface{}) interface{} {
v.Field(1).Set(src.MapIndex(k))
dst.Index(i).Set(v)
}
- sortSlice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) })
+ sort.Slice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) })
ms.checkSort(dst)
return dst.Interface()
}
@@ -139,8 +143,5 @@ func (ms mapSorter) checkSort(v reflect.Value) {
}
func (ms mapSorter) less(v reflect.Value, i, j int) bool {
vx, vy := v.Index(i).Field(0), v.Index(j).Field(0)
- if !hasReflectStructOf {
- vx, vy = vx.Elem(), vy.Elem()
- }
return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
}
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go
deleted file mode 100644
index 839b88ca402..00000000000
--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE.md file.
-
-// +build !go1.8
-
-package cmpopts
-
-import (
- "reflect"
- "sort"
-)
-
-const hasReflectStructOf = false
-
-func mapEntryType(reflect.Type) reflect.Type {
- return reflect.TypeOf(struct{ K, V interface{} }{})
-}
-
-func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool {
- return sort.IsSorted(reflectSliceSorter{reflect.ValueOf(slice), less})
-}
-func sortSlice(slice interface{}, less func(i, j int) bool) {
- sort.Sort(reflectSliceSorter{reflect.ValueOf(slice), less})
-}
-func sortSliceStable(slice interface{}, less func(i, j int) bool) {
- sort.Stable(reflectSliceSorter{reflect.ValueOf(slice), less})
-}
-
-type reflectSliceSorter struct {
- slice reflect.Value
- less func(i, j int) bool
-}
-
-func (ss reflectSliceSorter) Len() int {
- return ss.slice.Len()
-}
-func (ss reflectSliceSorter) Less(i, j int) bool {
- return ss.less(i, j)
-}
-func (ss reflectSliceSorter) Swap(i, j int) {
- vi := ss.slice.Index(i).Interface()
- vj := ss.slice.Index(j).Interface()
- ss.slice.Index(i).Set(reflect.ValueOf(vj))
- ss.slice.Index(j).Set(reflect.ValueOf(vi))
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go
deleted file mode 100644
index 8a59c0d38fd..00000000000
--- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE.md file.
-
-// +build go1.8
-
-package cmpopts
-
-import (
- "reflect"
- "sort"
-)
-
-const hasReflectStructOf = true
-
-func mapEntryType(t reflect.Type) reflect.Type {
- return reflect.StructOf([]reflect.StructField{
- {Name: "K", Type: t.Key()},
- {Name: "V", Type: t.Elem()},
- })
-}
-
-func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool {
- return sort.SliceIsSorted(slice, less)
-}
-func sortSlice(slice interface{}, less func(i, j int) bool) {
- sort.Slice(slice, less)
-}
-func sortSliceStable(slice interface{}, less func(i, j int) bool) {
- sort.SliceStable(slice, less)
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go
new file mode 100644
index 00000000000..9d651553d78
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go
@@ -0,0 +1,35 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmpopts
+
+import (
+ "github.com/google/go-cmp/cmp"
+)
+
+type xformFilter struct{ xform cmp.Option }
+
+func (xf xformFilter) filter(p cmp.Path) bool {
+ for _, ps := range p {
+ if t, ok := ps.(cmp.Transform); ok && t.Option() == xf.xform {
+ return false
+ }
+ }
+ return true
+}
+
+// AcyclicTransformer returns a Transformer with a filter applied that ensures
+// that the transformer cannot be recursively applied upon its own output.
+//
+// An example use case is a transformer that splits a string by lines:
+// AcyclicTransformer("SplitLines", func(s string) []string{
+// return strings.Split(s, "\n")
+// })
+//
+// Had this been an unfiltered Transformer instead, this would result in an
+// infinite cycle converting a string to []string to [][]string and so on.
+func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option {
+ xf := xformFilter{cmp.Transformer(name, xformFunc)}
+ return cmp.FilterPath(xf.filter, xf.xform)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
index 7e215f22029..2133562b01c 100644
--- a/vendor/github.com/google/go-cmp/cmp/compare.go
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -29,26 +29,17 @@ package cmp
import (
"fmt"
"reflect"
+ "strings"
"github.com/google/go-cmp/cmp/internal/diff"
+ "github.com/google/go-cmp/cmp/internal/flags"
"github.com/google/go-cmp/cmp/internal/function"
"github.com/google/go-cmp/cmp/internal/value"
)
-// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to
-// the reflection package's inability to retrieve such entries. Equal will panic
-// anytime it comes across a NaN key, but this behavior may change.
-//
-// See https://golang.org/issue/11104 for more details.
-
-var nothing = reflect.Value{}
-
// Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values:
//
-// • If two values are not of the same type, then they are never equal
-// and the overall result is false.
-//
// • Let S be the set of all Ignore, Transformer, and Comparer options that
// remain after applying all path filters, value filters, and type filters.
// If at least one Ignore exists in S, then the comparison is ignored.
@@ -61,43 +52,79 @@ var nothing = reflect.Value{}
//
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
-// x.Equal(y) even if x or y is nil.
-// Otherwise, no such method exists and evaluation proceeds to the next rule.
+// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+// evaluation proceeds to the next rule.
//
// • Lastly, try to compare x and y based on their basic kinds.
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
// channels are compared using the equivalent of the == operator in Go.
// Functions are only equal if they are both nil, otherwise they are unequal.
-// Pointers are equal if the underlying values they point to are also equal.
-// Interfaces are equal if their underlying concrete values are also equal.
//
-// Structs are equal if all of their fields are equal. If a struct contains
-// unexported fields, Equal panics unless the AllowUnexported option is used or
-// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field.
+// Structs are equal if recursively calling Equal on all fields report equal.
+// If a struct contains unexported fields, Equal panics unless an Ignore option
+// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported
+// option explicitly permits comparing the unexported field.
+//
+// Slices are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored slice or array elements report equal.
+// Empty non-nil slices and nil slices are not equal; to equate empty slices,
+// consider using cmpopts.EquateEmpty.
//
-// Arrays, slices, and maps are equal if they are both nil or both non-nil
-// with the same length and the elements at each index or key are equal.
-// Note that a non-nil empty slice and a nil slice are not equal.
-// To equate empty slices and maps, consider using cmpopts.EquateEmpty.
+// Maps are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored map entries report equal.
// Map keys are equal according to the == operator.
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
+// Empty non-nil maps and nil maps are not equal; to equate empty maps,
+// consider using cmpopts.EquateEmpty.
+//
+// Pointers and interfaces are equal if they are both nil or both non-nil,
+// where they have the same underlying concrete type and recursively
+// calling Equal on the underlying values reports equal.
func Equal(x, y interface{}, opts ...Option) bool {
+ vx := reflect.ValueOf(x)
+ vy := reflect.ValueOf(y)
+
+ // If the inputs are different types, auto-wrap them in an empty interface
+ // so that they have the same parent type.
+ var t reflect.Type
+ if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
+ t = reflect.TypeOf((*interface{})(nil)).Elem()
+ if vx.IsValid() {
+ vvx := reflect.New(t).Elem()
+ vvx.Set(vx)
+ vx = vvx
+ }
+ if vy.IsValid() {
+ vvy := reflect.New(t).Elem()
+ vvy.Set(vy)
+ vy = vvy
+ }
+ } else {
+ t = vx.Type()
+ }
+
s := newState(opts)
- s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y))
+ s.compareAny(&pathStep{t, vx, vy})
return s.result.Equal()
}
// Diff returns a human-readable report of the differences between two values.
// It returns an empty string if and only if Equal returns true for the same
-// input values and options. The output string will use the "-" symbol to
-// indicate elements removed from x, and the "+" symbol to indicate elements
-// added to y.
+// input values and options.
+//
+// The output is displayed as a literal in pseudo-Go syntax.
+// At the start of each line, a "-" prefix indicates an element removed from x,
+// a "+" prefix to indicates an element added to y, and the lack of a prefix
+// indicates an element common to both x and y. If possible, the output
+// uses fmt.Stringer.String or error.Error methods to produce more humanly
+// readable outputs. In such cases, the string is prefixed with either an
+// 's' or 'e' character, respectively, to indicate that the method was called.
//
-// Do not depend on this output being stable.
+// Do not depend on this output being stable. If you need the ability to
+// programmatically interpret the difference, consider using a custom Reporter.
func Diff(x, y interface{}, opts ...Option) string {
r := new(defaultReporter)
- opts = Options{Options(opts), r}
- eq := Equal(x, y, opts...)
+ eq := Equal(x, y, Options(opts), Reporter(r))
d := r.String()
if (d == "") != eq {
panic("inconsistent difference and equality results")
@@ -108,9 +135,13 @@ func Diff(x, y interface{}, opts ...Option) string {
type state struct {
// These fields represent the "comparison state".
// Calling statelessCompare must not result in observable changes to these.
- result diff.Result // The current result of comparison
- curPath Path // The current path in the value tree
- reporter reporter // Optional reporter used for difference formatting
+ result diff.Result // The current result of comparison
+ curPath Path // The current path in the value tree
+ reporters []reporter // Optional reporters
+
+ // recChecker checks for infinite cycles applying the same set of
+ // transformers upon the output of itself.
+ recChecker recChecker
// dynChecker triggers pseudo-random checks for option correctness.
// It is safe for statelessCompare to mutate this value.
@@ -122,10 +153,9 @@ type state struct {
}
func newState(opts []Option) *state {
- s := new(state)
- for _, opt := range opts {
- s.processOption(opt)
- }
+ // Always ensure a validator option exists to validate the inputs.
+ s := &state{opts: Options{validator{}}}
+ s.processOption(Options(opts))
return s
}
@@ -152,10 +182,7 @@ func (s *state) processOption(opt Option) {
s.exporters[t] = true
}
case reporter:
- if s.reporter != nil {
- panic("difference reporter already registered")
- }
- s.reporter = opt
+ s.reporters = append(s.reporters, opt)
default:
panic(fmt.Sprintf("unknown option %T", opt))
}
@@ -164,153 +191,88 @@ func (s *state) processOption(opt Option) {
// statelessCompare compares two values and returns the result.
// This function is stateless in that it does not alter the current result,
// or output to any registered reporters.
-func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result {
+func (s *state) statelessCompare(step PathStep) diff.Result {
// We do not save and restore the curPath because all of the compareX
// methods should properly push and pop from the path.
// It is an implementation bug if the contents of curPath differs from
// when calling this function to when returning from it.
- oldResult, oldReporter := s.result, s.reporter
+ oldResult, oldReporters := s.result, s.reporters
s.result = diff.Result{} // Reset result
- s.reporter = nil // Remove reporter to avoid spurious printouts
- s.compareAny(vx, vy)
+ s.reporters = nil // Remove reporters to avoid spurious printouts
+ s.compareAny(step)
res := s.result
- s.result, s.reporter = oldResult, oldReporter
+ s.result, s.reporters = oldResult, oldReporters
return res
}
-func (s *state) compareAny(vx, vy reflect.Value) {
- // TODO: Support cyclic data structures.
-
- // Rule 0: Differing types are never equal.
- if !vx.IsValid() || !vy.IsValid() {
- s.report(vx.IsValid() == vy.IsValid(), vx, vy)
- return
- }
- if vx.Type() != vy.Type() {
- s.report(false, vx, vy) // Possible for path to be empty
- return
- }
- t := vx.Type()
- if len(s.curPath) == 0 {
- s.curPath.push(&pathStep{typ: t})
- defer s.curPath.pop()
+func (s *state) compareAny(step PathStep) {
+ // Update the path stack.
+ s.curPath.push(step)
+ defer s.curPath.pop()
+ for _, r := range s.reporters {
+ r.PushStep(step)
+ defer r.PopStep()
}
- vx, vy = s.tryExporting(vx, vy)
+ s.recChecker.Check(s.curPath)
+
+ // Obtain the current type and values.
+ t := step.Type()
+ vx, vy := step.Values()
// Rule 1: Check whether an option applies on this node in the value tree.
- if s.tryOptions(vx, vy, t) {
+ if s.tryOptions(t, vx, vy) {
return
}
// Rule 2: Check whether the type has a valid Equal method.
- if s.tryMethod(vx, vy, t) {
+ if s.tryMethod(t, vx, vy) {
return
}
- // Rule 3: Recursively descend into each value's underlying kind.
+ // Rule 3: Compare based on the underlying kind.
switch t.Kind() {
case reflect.Bool:
- s.report(vx.Bool() == vy.Bool(), vx, vy)
- return
+ s.report(vx.Bool() == vy.Bool(), 0)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- s.report(vx.Int() == vy.Int(), vx, vy)
- return
+ s.report(vx.Int() == vy.Int(), 0)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- s.report(vx.Uint() == vy.Uint(), vx, vy)
- return
+ s.report(vx.Uint() == vy.Uint(), 0)
case reflect.Float32, reflect.Float64:
- s.report(vx.Float() == vy.Float(), vx, vy)
- return
+ s.report(vx.Float() == vy.Float(), 0)
case reflect.Complex64, reflect.Complex128:
- s.report(vx.Complex() == vy.Complex(), vx, vy)
- return
+ s.report(vx.Complex() == vy.Complex(), 0)
case reflect.String:
- s.report(vx.String() == vy.String(), vx, vy)
- return
+ s.report(vx.String() == vy.String(), 0)
case reflect.Chan, reflect.UnsafePointer:
- s.report(vx.Pointer() == vy.Pointer(), vx, vy)
- return
+ s.report(vx.Pointer() == vy.Pointer(), 0)
case reflect.Func:
- s.report(vx.IsNil() && vy.IsNil(), vx, vy)
- return
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ case reflect.Struct:
+ s.compareStruct(t, vx, vy)
+ case reflect.Slice, reflect.Array:
+ s.compareSlice(t, vx, vy)
+ case reflect.Map:
+ s.compareMap(t, vx, vy)
case reflect.Ptr:
- if vx.IsNil() || vy.IsNil() {
- s.report(vx.IsNil() && vy.IsNil(), vx, vy)
- return
- }
- s.curPath.push(&indirect{pathStep{t.Elem()}})
- defer s.curPath.pop()
- s.compareAny(vx.Elem(), vy.Elem())
- return
+ s.comparePtr(t, vx, vy)
case reflect.Interface:
- if vx.IsNil() || vy.IsNil() {
- s.report(vx.IsNil() && vy.IsNil(), vx, vy)
- return
- }
- if vx.Elem().Type() != vy.Elem().Type() {
- s.report(false, vx.Elem(), vy.Elem())
- return
- }
- s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}})
- defer s.curPath.pop()
- s.compareAny(vx.Elem(), vy.Elem())
- return
- case reflect.Slice:
- if vx.IsNil() || vy.IsNil() {
- s.report(vx.IsNil() && vy.IsNil(), vx, vy)
- return
- }
- fallthrough
- case reflect.Array:
- s.compareArray(vx, vy, t)
- return
- case reflect.Map:
- s.compareMap(vx, vy, t)
- return
- case reflect.Struct:
- s.compareStruct(vx, vy, t)
- return
+ s.compareInterface(t, vx, vy)
default:
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
}
}
-func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) {
- if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported {
- if sf.force {
- // Use unsafe pointer arithmetic to get read-write access to an
- // unexported field in the struct.
- vx = unsafeRetrieveField(sf.pvx, sf.field)
- vy = unsafeRetrieveField(sf.pvy, sf.field)
- } else {
- // We are not allowed to export the value, so invalidate them
- // so that tryOptions can panic later if not explicitly ignored.
- vx = nothing
- vy = nothing
- }
- }
- return vx, vy
-}
-
-func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool {
- // If there were no FilterValues, we will not detect invalid inputs,
- // so manually check for them and append invalid if necessary.
- // We still evaluate the options since an ignore can override invalid.
- opts := s.opts
- if !vx.IsValid() || !vy.IsValid() {
- opts = Options{opts, invalid{}}
- }
-
+func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
// Evaluate all filters and apply the remaining options.
- if opt := opts.filter(s, vx, vy, t); opt != nil {
+ if opt := s.opts.filter(s, t, vx, vy); opt != nil {
opt.apply(s, vx, vy)
return true
}
return false
}
-func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
+func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
// Check if this type even has an Equal method.
m, ok := t.MethodByName("Equal")
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
@@ -318,11 +280,11 @@ func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
}
eq := s.callTTBFunc(m.Func, vx, vy)
- s.report(eq, vx, vy)
+ s.report(eq, reportByMethod)
return true
}
-func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
+func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
v = sanitizeValue(v, f.Type().In(0))
if !s.dynChecker.Next() {
return f.Call([]reflect.Value{v})[0]
@@ -333,15 +295,15 @@ func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, v)
+ got := <-c
want := f.Call([]reflect.Value{v})[0]
- if got := <-c; !s.statelessCompare(got, want).Equal() {
+ if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
// To avoid false-positives with non-reflexive equality operations,
// we sanity check whether a value is equal to itself.
- if !s.statelessCompare(want, want).Equal() {
+ if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
return want
}
- fn := getFuncName(f.Pointer())
- panic(fmt.Sprintf("non-deterministic function detected: %s", fn))
+ panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
}
return want
}
@@ -359,10 +321,10 @@ func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, y, x)
+ got := <-c
want := f.Call([]reflect.Value{x, y})[0].Bool()
- if got := <-c; !got.IsValid() || got.Bool() != want {
- fn := getFuncName(f.Pointer())
- panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn))
+ if !got.IsValid() || got.Bool() != want {
+ panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
}
return want
}
@@ -380,140 +342,241 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
// assuming that T is assignable to R.
// Otherwise, it returns the input value as is.
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
- // TODO(dsnet): Remove this hacky workaround.
- // See https://golang.org/issue/22143
- if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
- return reflect.New(t).Elem()
+ // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143).
+ if !flags.AtLeastGo110 {
+ if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
+ return reflect.New(t).Elem()
+ }
}
return v
}
-func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) {
- step := &sliceIndex{pathStep{t.Elem()}, 0, 0}
- s.curPath.push(step)
+func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
+ var vax, vay reflect.Value // Addressable versions of vx and vy
- // Compute an edit-script for slices vx and vy.
- es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
- step.xkey, step.ykey = ix, iy
- return s.statelessCompare(vx.Index(ix), vy.Index(iy))
- })
+ step := StructField{&structField{}}
+ for i := 0; i < t.NumField(); i++ {
+ step.typ = t.Field(i).Type
+ step.vx = vx.Field(i)
+ step.vy = vy.Field(i)
+ step.name = t.Field(i).Name
+ step.idx = i
+ step.unexported = !isExported(step.name)
+ if step.unexported {
+ if step.name == "_" {
+ continue
+ }
+ // Defer checking of unexported fields until later to give an
+ // Ignore a chance to ignore the field.
+ if !vax.IsValid() || !vay.IsValid() {
+ // For retrieveUnexportedField to work, the parent struct must
+ // be addressable. Create a new copy of the values if
+ // necessary to make them addressable.
+ vax = makeAddressable(vx)
+ vay = makeAddressable(vy)
+ }
+ step.mayForce = s.exporters[t]
+ step.pvx = vax
+ step.pvy = vay
+ step.field = t.Field(i)
+ }
+ s.compareAny(step)
+ }
+}
- // Report the entire slice as is if the arrays are of primitive kind,
- // and the arrays are different enough.
- isPrimitive := false
- switch t.Elem().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
- reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
- isPrimitive = true
- }
- if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 {
- s.curPath.pop() // Pop first since we are reporting the whole slice
- s.report(false, vx, vy)
+func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
+ isSlice := t.Kind() == reflect.Slice
+ if isSlice && (vx.IsNil() || vy.IsNil()) {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
return
}
- // Replay the edit-script.
+ // TODO: Support cyclic data structures.
+
+ step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}}
+ withIndexes := func(ix, iy int) SliceIndex {
+ if ix >= 0 {
+ step.vx, step.xkey = vx.Index(ix), ix
+ } else {
+ step.vx, step.xkey = reflect.Value{}, -1
+ }
+ if iy >= 0 {
+ step.vy, step.ykey = vy.Index(iy), iy
+ } else {
+ step.vy, step.ykey = reflect.Value{}, -1
+ }
+ return step
+ }
+
+ // Ignore options are able to ignore missing elements in a slice.
+ // However, detecting these reliably requires an optimal differencing
+ // algorithm, for which diff.Difference is not.
+ //
+ // Instead, we first iterate through both slices to detect which elements
+ // would be ignored if standing alone. The index of non-discarded elements
+ // are stored in a separate slice, which diffing is then performed on.
+ var indexesX, indexesY []int
+ var ignoredX, ignoredY []bool
+ for ix := 0; ix < vx.Len(); ix++ {
+ ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
+ if !ignored {
+ indexesX = append(indexesX, ix)
+ }
+ ignoredX = append(ignoredX, ignored)
+ }
+ for iy := 0; iy < vy.Len(); iy++ {
+ ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
+ if !ignored {
+ indexesY = append(indexesY, iy)
+ }
+ ignoredY = append(ignoredY, ignored)
+ }
+
+ // Compute an edit-script for slices vx and vy (excluding ignored elements).
+ edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
+ return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
+ })
+
+ // Replay the ignore-scripts and the edit-script.
var ix, iy int
- for _, e := range es {
+ for ix < vx.Len() || iy < vy.Len() {
+ var e diff.EditType
+ switch {
+ case ix < len(ignoredX) && ignoredX[ix]:
+ e = diff.UniqueX
+ case iy < len(ignoredY) && ignoredY[iy]:
+ e = diff.UniqueY
+ default:
+ e, edits = edits[0], edits[1:]
+ }
switch e {
case diff.UniqueX:
- step.xkey, step.ykey = ix, -1
- s.report(false, vx.Index(ix), nothing)
+ s.compareAny(withIndexes(ix, -1))
ix++
case diff.UniqueY:
- step.xkey, step.ykey = -1, iy
- s.report(false, nothing, vy.Index(iy))
+ s.compareAny(withIndexes(-1, iy))
iy++
default:
- step.xkey, step.ykey = ix, iy
- if e == diff.Identity {
- s.report(true, vx.Index(ix), vy.Index(iy))
- } else {
- s.compareAny(vx.Index(ix), vy.Index(iy))
- }
+ s.compareAny(withIndexes(ix, iy))
ix++
iy++
}
}
- s.curPath.pop()
- return
}
-func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) {
+func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
if vx.IsNil() || vy.IsNil() {
- s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+ s.report(vx.IsNil() && vy.IsNil(), 0)
return
}
+ // TODO: Support cyclic data structures.
+
// We combine and sort the two map keys so that we can perform the
// comparisons in a deterministic order.
- step := &mapIndex{pathStep: pathStep{t.Elem()}}
- s.curPath.push(step)
- defer s.curPath.pop()
+ step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+ step.vx = vx.MapIndex(k)
+ step.vy = vy.MapIndex(k)
step.key = k
- vvx := vx.MapIndex(k)
- vvy := vy.MapIndex(k)
- switch {
- case vvx.IsValid() && vvy.IsValid():
- s.compareAny(vvx, vvy)
- case vvx.IsValid() && !vvy.IsValid():
- s.report(false, vvx, nothing)
- case !vvx.IsValid() && vvy.IsValid():
- s.report(false, nothing, vvy)
- default:
- // It is possible for both vvx and vvy to be invalid if the
- // key contained a NaN value in it. There is no way in
- // reflection to be able to retrieve these values.
- // See https://golang.org/issue/11104
- panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath))
+ if !step.vx.IsValid() && !step.vy.IsValid() {
+ // It is possible for both vx and vy to be invalid if the
+ // key contained a NaN value in it.
+ //
+ // Even with the ability to retrieve NaN keys in Go 1.12,
+ // there still isn't a sensible way to compare the values since
+ // a NaN key may map to multiple unordered values.
+ // The most reasonable way to compare NaNs would be to compare the
+ // set of values. However, this is impossible to do efficiently
+ // since set equality is provably an O(n^2) operation given only
+ // an Equal function. If we had a Less function or Hash function,
+ // this could be done in O(n*log(n)) or O(n), respectively.
+ //
+ // Rather than adding complex logic to deal with NaNs, make it
+ // the user's responsibility to compare such obscure maps.
+ const help = "consider providing a Comparer to compare the map"
+ panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
}
+ s.compareAny(step)
}
}
-func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) {
- var vax, vay reflect.Value // Addressable versions of vx and vy
+func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
- step := &structField{}
- s.curPath.push(step)
- defer s.curPath.pop()
- for i := 0; i < t.NumField(); i++ {
- vvx := vx.Field(i)
- vvy := vy.Field(i)
- step.typ = t.Field(i).Type
- step.name = t.Field(i).Name
- step.idx = i
- step.unexported = !isExported(step.name)
- if step.unexported {
- // Defer checking of unexported fields until later to give an
- // Ignore a chance to ignore the field.
- if !vax.IsValid() || !vay.IsValid() {
- // For unsafeRetrieveField to work, the parent struct must
- // be addressable. Create a new copy of the values if
- // necessary to make them addressable.
- vax = makeAddressable(vx)
- vay = makeAddressable(vy)
- }
- step.force = s.exporters[t]
- step.pvx = vax
- step.pvy = vay
- step.field = t.Field(i)
+ // TODO: Support cyclic data structures.
+
+ vx, vy = vx.Elem(), vy.Elem()
+ s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
+}
+
+func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+ vx, vy = vx.Elem(), vy.Elem()
+ if vx.Type() != vy.Type() {
+ s.report(false, 0)
+ return
+ }
+ s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
+}
+
+func (s *state) report(eq bool, rf resultFlags) {
+ if rf&reportByIgnore == 0 {
+ if eq {
+ s.result.NumSame++
+ rf |= reportEqual
+ } else {
+ s.result.NumDiff++
+ rf |= reportUnequal
}
- s.compareAny(vvx, vvy)
+ }
+ for _, r := range s.reporters {
+ r.Report(Result{flags: rf})
}
}
-// report records the result of a single comparison.
-// It also calls Report if any reporter is registered.
-func (s *state) report(eq bool, vx, vy reflect.Value) {
- if eq {
- s.result.NSame++
- } else {
- s.result.NDiff++
+// recChecker tracks the state needed to periodically perform checks that
+// user provided transformers are not stuck in an infinitely recursive cycle.
+type recChecker struct{ next int }
+
+// Check scans the Path for any recursive transformers and panics when any
+// recursive transformers are detected. Note that the presence of a
+// recursive Transformer does not necessarily imply an infinite cycle.
+// As such, this check only activates after some minimal number of path steps.
+func (rc *recChecker) Check(p Path) {
+ const minLen = 1 << 16
+ if rc.next == 0 {
+ rc.next = minLen
+ }
+ if len(p) < rc.next {
+ return
+ }
+ rc.next <<= 1
+
+ // Check whether the same transformer has appeared at least twice.
+ var ss []string
+ m := map[Option]int{}
+ for _, ps := range p {
+ if t, ok := ps.(Transform); ok {
+ t := t.Option()
+ if m[t] == 1 { // Transformer was used exactly once before
+ tf := t.(*transformer).fnc.Type()
+ ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
+ }
+ m[t]++
+ }
}
- if s.reporter != nil {
- s.reporter.Report(vx, vy, eq, s.curPath)
+ if len(ss) > 0 {
+ const warning = "recursive set of Transformers detected"
+ const help = "consider using cmpopts.AcyclicTransformer"
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
}
}
diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go
similarity index 60%
rename from vendor/github.com/google/go-cmp/cmp/unsafe_panic.go
rename to vendor/github.com/google/go-cmp/cmp/export_panic.go
index d1518eb3a8c..abc3a1c3e76 100644
--- a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go
+++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
-// +build purego appengine js
+// +build purego
package cmp
@@ -10,6 +10,6 @@ import "reflect"
const supportAllowUnexported = false
-func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value {
- panic("unsafeRetrieveField is not implemented")
+func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value {
+ panic("retrieveUnexportedField is not implemented")
}
diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go
similarity index 64%
rename from vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go
rename to vendor/github.com/google/go-cmp/cmp/export_unsafe.go
index 579b65507f6..59d4ee91b47 100644
--- a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go
+++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
-// +build !purego,!appengine,!js
+// +build !purego
package cmp
@@ -13,11 +13,11 @@ import (
const supportAllowUnexported = true
-// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct
-// such that the value has read-write permissions.
+// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
+// a struct such that the value has read-write permissions.
//
// The parent struct, v, must be addressable, while f must be a StructField
// describing the field to retrieve.
-func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value {
+func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value {
return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem()
}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
index 42afa4960ef..fe98dcc6774 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
-// +build !debug
+// +build !cmp_debug
package diff
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
index fd9f7f17739..597b6ae56b1 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
-// +build debug
+// +build cmp_debug
package diff
@@ -14,7 +14,7 @@ import (
)
// The algorithm can be seen running in real-time by enabling debugging:
-// go test -tags=debug -v
+// go test -tags=cmp_debug -v
//
// Example output:
// === RUN TestDifference/#34
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
index 260befea2fd..3d2e42662ca 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -85,22 +85,31 @@ func (es EditScript) LenY() int { return len(es) - es.stats().NX }
type EqualFunc func(ix int, iy int) Result
// Result is the result of comparison.
-// NSame is the number of sub-elements that are equal.
-// NDiff is the number of sub-elements that are not equal.
-type Result struct{ NSame, NDiff int }
+// NumSame is the number of sub-elements that are equal.
+// NumDiff is the number of sub-elements that are not equal.
+type Result struct{ NumSame, NumDiff int }
+
+// BoolResult returns a Result that is either Equal or not Equal.
+func BoolResult(b bool) Result {
+ if b {
+ return Result{NumSame: 1} // Equal, Similar
+ } else {
+ return Result{NumDiff: 2} // Not Equal, not Similar
+ }
+}
// Equal indicates whether the symbols are equal. Two symbols are equal
-// if and only if NDiff == 0. If Equal, then they are also Similar.
-func (r Result) Equal() bool { return r.NDiff == 0 }
+// if and only if NumDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NumDiff == 0 }
// Similar indicates whether two symbols are similar and may be represented
// by using the Modified type. As a special case, we consider binary comparisons
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
//
-// The exact ratio of NSame to NDiff to determine similarity may change.
+// The exact ratio of NumSame to NumDiff to determine similarity may change.
func (r Result) Similar() bool {
- // Use NSame+1 to offset NSame so that binary comparisons are similar.
- return r.NSame+1 >= r.NDiff
+ // Use NumSame+1 to offset NumSame so that binary comparisons are similar.
+ return r.NumSame+1 >= r.NumDiff
}
// Difference reports whether two lists of lengths nx and ny are equal
@@ -191,9 +200,9 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
- // Running the tests with the "debug" build tag prints a visualization of
- // the algorithm running in real-time. This is educational for understanding
- // how the algorithm works. See debug_enable.go.
+ // Running the tests with the "cmp_debug" build tag prints a visualization
+ // of the algorithm running in real-time. This is educational for
+ // understanding how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
for {
// Forward search from the beginning.
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
new file mode 100644
index 00000000000..a9e7fc0b5b3
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package flags
+
+// Deterministic controls whether the output of Diff should be deterministic.
+// This is only used for testing.
+var Deterministic bool
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
new file mode 100644
index 00000000000..01aed0a1532
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
@@ -0,0 +1,10 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !go1.10
+
+package flags
+
+// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
+const AtLeastGo110 = false
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
new file mode 100644
index 00000000000..c0b667f58b0
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
@@ -0,0 +1,10 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build go1.10
+
+package flags
+
+// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
+const AtLeastGo110 = true
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
index 4c35ff11ee1..ace1dbe86e5 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -2,25 +2,34 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
-// Package function identifies function types.
+// Package function provides functionality for identifying function types.
package function
-import "reflect"
+import (
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+)
type funcType int
const (
_ funcType = iota
+ tbFunc // func(T) bool
ttbFunc // func(T, T) bool
+ trbFunc // func(T, R) bool
tibFunc // func(T, I) bool
trFunc // func(T) R
- Equal = ttbFunc // func(T, T) bool
- EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
- Transformer = trFunc // func(T) R
- ValueFilter = ttbFunc // func(T, T) bool
- Less = ttbFunc // func(T, T) bool
+ Equal = ttbFunc // func(T, T) bool
+ EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+ Transformer = trFunc // func(T) R
+ ValueFilter = ttbFunc // func(T, T) bool
+ Less = ttbFunc // func(T, T) bool
+ ValuePredicate = tbFunc // func(T) bool
+ KeyValuePredicate = trbFunc // func(T, R) bool
)
var boolType = reflect.TypeOf(true)
@@ -32,10 +41,18 @@ func IsType(t reflect.Type, ft funcType) bool {
}
ni, no := t.NumIn(), t.NumOut()
switch ft {
+ case tbFunc: // func(T) bool
+ if ni == 1 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
case ttbFunc: // func(T, T) bool
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
return true
}
+ case trbFunc: // func(T, R) bool
+ if ni == 2 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
case tibFunc: // func(T, I) bool
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
return true
@@ -47,3 +64,36 @@ func IsType(t reflect.Type, ft funcType) bool {
}
return false
}
+
+var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
+
+// NameOf returns the name of the function value.
+func NameOf(v reflect.Value) string {
+ fnc := runtime.FuncForPC(v.Pointer())
+ if fnc == nil {
+ return ""
+ }
+ fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
+
+ // Method closures have a "-fm" suffix.
+ fullName = strings.TrimSuffix(fullName, "-fm")
+
+ var name string
+ for len(fullName) > 0 {
+ inParen := strings.HasSuffix(fullName, ")")
+ fullName = strings.TrimSuffix(fullName, ")")
+
+ s := lastIdentRx.FindString(fullName)
+ if s == "" {
+ break
+ }
+ name = s + "." + name
+ fullName = strings.TrimSuffix(fullName, s)
+
+ if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
+ fullName = fullName[:i]
+ }
+ fullName = strings.TrimSuffix(fullName, ".")
+ }
+ return strings.TrimSuffix(name, ".")
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go
deleted file mode 100644
index 657e508779d..00000000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE.md file.
-
-// Package value provides functionality for reflect.Value types.
-package value
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode"
-)
-
-var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-
-// Format formats the value v as a string.
-//
-// This is similar to fmt.Sprintf("%+v", v) except this:
-// * Prints the type unless it can be elided
-// * Avoids printing struct fields that are zero
-// * Prints a nil-slice as being nil, not empty
-// * Prints map entries in deterministic order
-func Format(v reflect.Value, conf FormatConfig) string {
- conf.printType = true
- conf.followPointers = true
- conf.realPointers = true
- return formatAny(v, conf, nil)
-}
-
-type FormatConfig struct {
- UseStringer bool // Should the String method be used if available?
- printType bool // Should we print the type before the value?
- PrintPrimitiveType bool // Should we print the type of primitives?
- followPointers bool // Should we recursively follow pointers?
- realPointers bool // Should we print the real address of pointers?
-}
-
-func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string {
- // TODO: Should this be a multi-line printout in certain situations?
-
- if !v.IsValid() {
- return ""
- }
- if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() {
- if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() {
- return ""
- }
-
- const stringerPrefix = "s" // Indicates that the String method was used
- s := v.Interface().(fmt.Stringer).String()
- return stringerPrefix + formatString(s)
- }
-
- switch v.Kind() {
- case reflect.Bool:
- return formatPrimitive(v.Type(), v.Bool(), conf)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return formatPrimitive(v.Type(), v.Int(), conf)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr {
- // Unnamed uints are usually bytes or words, so use hexadecimal.
- return formatPrimitive(v.Type(), formatHex(v.Uint()), conf)
- }
- return formatPrimitive(v.Type(), v.Uint(), conf)
- case reflect.Float32, reflect.Float64:
- return formatPrimitive(v.Type(), v.Float(), conf)
- case reflect.Complex64, reflect.Complex128:
- return formatPrimitive(v.Type(), v.Complex(), conf)
- case reflect.String:
- return formatPrimitive(v.Type(), formatString(v.String()), conf)
- case reflect.UnsafePointer, reflect.Chan, reflect.Func:
- return formatPointer(v, conf)
- case reflect.Ptr:
- if v.IsNil() {
- if conf.printType {
- return fmt.Sprintf("(%v)(nil)", v.Type())
- }
- return ""
- }
- if visited[v.Pointer()] || !conf.followPointers {
- return formatPointer(v, conf)
- }
- visited = insertPointer(visited, v.Pointer())
- return "&" + formatAny(v.Elem(), conf, visited)
- case reflect.Interface:
- if v.IsNil() {
- if conf.printType {
- return fmt.Sprintf("%v(nil)", v.Type())
- }
- return ""
- }
- return formatAny(v.Elem(), conf, visited)
- case reflect.Slice:
- if v.IsNil() {
- if conf.printType {
- return fmt.Sprintf("%v(nil)", v.Type())
- }
- return ""
- }
- if visited[v.Pointer()] {
- return formatPointer(v, conf)
- }
- visited = insertPointer(visited, v.Pointer())
- fallthrough
- case reflect.Array:
- var ss []string
- subConf := conf
- subConf.printType = v.Type().Elem().Kind() == reflect.Interface
- for i := 0; i < v.Len(); i++ {
- s := formatAny(v.Index(i), subConf, visited)
- ss = append(ss, s)
- }
- s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
- if conf.printType {
- return v.Type().String() + s
- }
- return s
- case reflect.Map:
- if v.IsNil() {
- if conf.printType {
- return fmt.Sprintf("%v(nil)", v.Type())
- }
- return ""
- }
- if visited[v.Pointer()] {
- return formatPointer(v, conf)
- }
- visited = insertPointer(visited, v.Pointer())
-
- var ss []string
- keyConf, valConf := conf, conf
- keyConf.printType = v.Type().Key().Kind() == reflect.Interface
- keyConf.followPointers = false
- valConf.printType = v.Type().Elem().Kind() == reflect.Interface
- for _, k := range SortKeys(v.MapKeys()) {
- sk := formatAny(k, keyConf, visited)
- sv := formatAny(v.MapIndex(k), valConf, visited)
- ss = append(ss, fmt.Sprintf("%s: %s", sk, sv))
- }
- s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
- if conf.printType {
- return v.Type().String() + s
- }
- return s
- case reflect.Struct:
- var ss []string
- subConf := conf
- subConf.printType = true
- for i := 0; i < v.NumField(); i++ {
- vv := v.Field(i)
- if isZero(vv) {
- continue // Elide zero value fields
- }
- name := v.Type().Field(i).Name
- subConf.UseStringer = conf.UseStringer
- s := formatAny(vv, subConf, visited)
- ss = append(ss, fmt.Sprintf("%s: %s", name, s))
- }
- s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
- if conf.printType {
- return v.Type().String() + s
- }
- return s
- default:
- panic(fmt.Sprintf("%v kind not handled", v.Kind()))
- }
-}
-
-func formatString(s string) string {
- // Use quoted string if it the same length as a raw string literal.
- // Otherwise, attempt to use the raw string form.
- qs := strconv.Quote(s)
- if len(qs) == 1+len(s)+1 {
- return qs
- }
-
- // Disallow newlines to ensure output is a single line.
- // Only allow printable runes for readability purposes.
- rawInvalid := func(r rune) bool {
- return r == '`' || r == '\n' || !unicode.IsPrint(r)
- }
- if strings.IndexFunc(s, rawInvalid) < 0 {
- return "`" + s + "`"
- }
- return qs
-}
-
-func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string {
- if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") {
- return fmt.Sprintf("%v(%v)", t, v)
- }
- return fmt.Sprintf("%v", v)
-}
-
-func formatPointer(v reflect.Value, conf FormatConfig) string {
- p := v.Pointer()
- if !conf.realPointers {
- p = 0 // For deterministic printing purposes
- }
- s := formatHex(uint64(p))
- if conf.printType {
- return fmt.Sprintf("(%v)(%s)", v.Type(), s)
- }
- return s
-}
-
-func formatHex(u uint64) string {
- var f string
- switch {
- case u <= 0xff:
- f = "0x%02x"
- case u <= 0xffff:
- f = "0x%04x"
- case u <= 0xffffff:
- f = "0x%06x"
- case u <= 0xffffffff:
- f = "0x%08x"
- case u <= 0xffffffffff:
- f = "0x%010x"
- case u <= 0xffffffffffff:
- f = "0x%012x"
- case u <= 0xffffffffffffff:
- f = "0x%014x"
- case u <= 0xffffffffffffffff:
- f = "0x%016x"
- }
- return fmt.Sprintf(f, u)
-}
-
-// insertPointer insert p into m, allocating m if necessary.
-func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool {
- if m == nil {
- m = make(map[uintptr]bool)
- }
- m[p] = true
- return m
-}
-
-// isZero reports whether v is the zero value.
-// This does not rely on Interface and so can be used on unexported fields.
-func isZero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return v.Bool() == false
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Complex64, reflect.Complex128:
- return v.Complex() == 0
- case reflect.String:
- return v.String() == ""
- case reflect.UnsafePointer:
- return v.Pointer() == 0
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
- return v.IsNil()
- case reflect.Array:
- for i := 0; i < v.Len(); i++ {
- if !isZero(v.Index(i)) {
- return false
- }
- }
- return true
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- if !isZero(v.Field(i)) {
- return false
- }
- }
- return true
- }
- return false
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
new file mode 100644
index 00000000000..0a01c4796f1
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
@@ -0,0 +1,23 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build purego
+
+package value
+
+import "reflect"
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+ p uintptr
+ t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+ // NOTE: Storing a pointer as an uintptr is technically incorrect as it
+ // assumes that the GC implementation does not use a moving collector.
+ return Pointer{v.Pointer(), v.Type()}
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
new file mode 100644
index 00000000000..da134ae2a80
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
@@ -0,0 +1,26 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !purego
+
+package value
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+ p unsafe.Pointer
+ t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+ // The proper representation of a pointer is unsafe.Pointer,
+ // which is necessary if the GC ever uses a moving collector.
+ return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
index fe8aa27a077..938f646f000 100644
--- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -19,7 +19,7 @@ func SortKeys(vs []reflect.Value) []reflect.Value {
}
// Sort the map keys.
- sort.Sort(valueSorter(vs))
+ sort.Slice(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
// Deduplicate keys (fails for NaNs).
vs2 := vs[:1]
@@ -31,13 +31,6 @@ func SortKeys(vs []reflect.Value) []reflect.Value {
return vs2
}
-// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above.
-type valueSorter []reflect.Value
-
-func (vs valueSorter) Len() int { return len(vs) }
-func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) }
-func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
-
// isLess is a generic function for sorting arbitrary map keys.
// The inputs must be of the same type and must be comparable.
func isLess(x, y reflect.Value) bool {
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
new file mode 100644
index 00000000000..d13a12ccfcd
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
@@ -0,0 +1,45 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package value
+
+import "reflect"
+
+// IsZero reports whether v is the zero value.
+// This does not rely on Interface and so can be used on unexported fields.
+func IsZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return v.Bool() == false
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() == 0
+ case reflect.String:
+ return v.String() == ""
+ case reflect.UnsafePointer:
+ return v.Pointer() == 0
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ return v.IsNil()
+ case reflect.Array:
+ for i := 0; i < v.Len(); i++ {
+ if !IsZero(v.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if !IsZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
index 91d4b066e05..793448160ee 100644
--- a/vendor/github.com/google/go-cmp/cmp/options.go
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -7,7 +7,7 @@ package cmp
import (
"fmt"
"reflect"
- "runtime"
+ "regexp"
"strings"
"github.com/google/go-cmp/cmp/internal/function"
@@ -29,11 +29,11 @@ type Option interface {
// An Options is returned only if multiple comparers or transformers
// can apply simultaneously and will only contain values of those types
// or sub-Options containing values of those types.
- filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption
+ filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
}
// applicableOption represents the following types:
-// Fundamental: ignore | invalid | *comparer | *transformer
+// Fundamental: ignore | validator | *comparer | *transformer
// Grouping: Options
type applicableOption interface {
Option
@@ -43,7 +43,7 @@ type applicableOption interface {
}
// coreOption represents the following types:
-// Fundamental: ignore | invalid | *comparer | *transformer
+// Fundamental: ignore | validator | *comparer | *transformer
// Filters: *pathFilter | *valuesFilter
type coreOption interface {
Option
@@ -63,19 +63,19 @@ func (core) isCore() {}
// on all individual options held within.
type Options []Option
-func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) {
+func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
for _, opt := range opts {
- switch opt := opt.filter(s, vx, vy, t); opt.(type) {
+ switch opt := opt.filter(s, t, vx, vy); opt.(type) {
case ignore:
return ignore{} // Only ignore can short-circuit evaluation
- case invalid:
- out = invalid{} // Takes precedence over comparer or transformer
+ case validator:
+ out = validator{} // Takes precedence over comparer or transformer
case *comparer, *transformer, Options:
switch out.(type) {
case nil:
out = opt
- case invalid:
- // Keep invalid
+ case validator:
+ // Keep validator
case *comparer, *transformer, Options:
out = Options{out, opt} // Conflicting comparers or transformers
}
@@ -106,6 +106,11 @@ func (opts Options) String() string {
// FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
//
+// This filter is called even if a slice element or map entry is missing and
+// provides an opportunity to ignore such cases. The filter function must be
+// symmetric such that the filter result is identical regardless of whether the
+// missing value is from x or y.
+//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
func FilterPath(f func(Path) bool, opt Option) Option {
@@ -124,22 +129,22 @@ type pathFilter struct {
opt Option
}
-func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
+func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
if f.fnc(s.curPath) {
- return f.opt.filter(s, vx, vy, t)
+ return f.opt.filter(s, t, vx, vy)
}
return nil
}
func (f pathFilter) String() string {
- fn := getFuncName(reflect.ValueOf(f.fnc).Pointer())
- return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt)
+ return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
-// current pair of values being compared. If the type of the values is not
-// assignable to T, then this filter implicitly returns false.
+// current pair of values being compared. If either value is invalid or
+// the type of the values is not assignable to T, then this filter implicitly
+// returns false.
//
// The filter function must be
// symmetric (i.e., agnostic to the order of the inputs) and
@@ -171,19 +176,18 @@ type valuesFilter struct {
opt Option
}
-func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
- if !vx.IsValid() || !vy.IsValid() {
- return invalid{}
+func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
+ return nil
}
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
- return f.opt.filter(s, vx, vy, t)
+ return f.opt.filter(s, t, vx, vy)
}
return nil
}
func (f valuesFilter) String() string {
- fn := getFuncName(f.fnc.Pointer())
- return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt)
+ return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
}
// Ignore is an Option that causes all comparisons to be ignored.
@@ -194,20 +198,45 @@ func Ignore() Option { return ignore{} }
type ignore struct{ core }
func (ignore) isFiltered() bool { return false }
-func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} }
-func (ignore) apply(_ *state, _, _ reflect.Value) { return }
+func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
+func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
func (ignore) String() string { return "Ignore()" }
-// invalid is a sentinel Option type to indicate that some options could not
-// be evaluated due to unexported fields.
-type invalid struct{ core }
+// validator is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields, missing slice elements, or
+// missing map entries. Both values are validator only for unexported fields.
+type validator struct{ core }
+
+func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vy.IsValid() {
+ return validator{}
+ }
+ if !vx.CanInterface() || !vy.CanInterface() {
+ return validator{}
+ }
+ return nil
+}
+func (validator) apply(s *state, vx, vy reflect.Value) {
+ // Implies missing slice element or map entry.
+ if !vx.IsValid() || !vy.IsValid() {
+ s.report(vx.IsValid() == vy.IsValid(), 0)
+ return
+ }
+
+ // Unable to Interface implies unexported field without visibility access.
+ if !vx.CanInterface() || !vy.CanInterface() {
+ const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported"
+ panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
+ }
-func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} }
-func (invalid) apply(s *state, _, _ reflect.Value) {
- const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported"
- panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
+ panic("not reachable")
}
+// identRx represents a valid identifier according to the Go specification.
+const identRx = `[_\p{L}][_\p{L}\p{N}]*`
+
+var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
+
// Transformer returns an Option that applies a transformation function that
// converts values of a certain type into that of another.
//
@@ -220,18 +249,25 @@ func (invalid) apply(s *state, _, _ reflect.Value) {
// input and output types are the same), an implicit filter is added such that
// a transformer is applicable only if that exact transformer is not already
// in the tail of the Path since the last non-Transform step.
+// For situations where the implicit filter is still insufficient,
+// consider using cmpopts.AcyclicTransformer, which adds a filter
+// to prevent the transformer from being recursively applied upon itself.
//
// The name is a user provided label that is used as the Transform.Name in the
-// transformation PathStep. If empty, an arbitrary name is used.
+// transformation PathStep (and eventually shown in the Diff output).
+// The name must be a valid identifier or qualified identifier in Go syntax.
+// If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
panic(fmt.Sprintf("invalid transformer function: %T", f))
}
if name == "" {
- name = "λ" // Lambda-symbol as place-holder for anonymous transformer
- }
- if !isValid(name) {
+ name = function.NameOf(v)
+ if !identsRx.MatchString(name) {
+ name = "λ" // Lambda-symbol as placeholder name
+ }
+ } else if !identsRx.MatchString(name) {
panic(fmt.Sprintf("invalid name: %q", name))
}
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
@@ -250,9 +286,9 @@ type transformer struct {
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
-func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
for i := len(s.curPath) - 1; i >= 0; i-- {
- if t, ok := s.curPath[i].(*transform); !ok {
+ if t, ok := s.curPath[i].(Transform); !ok {
break // Hit most recent non-Transform step
} else if tr == t.trans {
return nil // Cannot directly use same Transform
@@ -265,18 +301,15 @@ func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) appl
}
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
- // Update path before calling the Transformer so that dynamic checks
- // will use the updated path.
- s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr})
- defer s.curPath.pop()
-
- vx = s.callTRFunc(tr.fnc, vx)
- vy = s.callTRFunc(tr.fnc, vy)
- s.compareAny(vx, vy)
+ step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
+ vvx := s.callTRFunc(tr.fnc, vx, step)
+ vvy := s.callTRFunc(tr.fnc, vy, step)
+ step.vx, step.vy = vvx, vvy
+ s.compareAny(step)
}
func (tr transformer) String() string {
- return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer()))
+ return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
}
// Comparer returns an Option that determines whether two values are equal
@@ -311,7 +344,7 @@ type comparer struct {
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
-func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
if cm.typ == nil || t.AssignableTo(cm.typ) {
return cm
}
@@ -320,11 +353,11 @@ func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applica
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
eq := s.callTTBFunc(cm.fnc, vx, vy)
- s.report(eq, vx, vy)
+ s.report(eq, reportByFunc)
}
func (cm comparer) String() string {
- return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer()))
+ return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
}
// AllowUnexported returns an Option that forcibly allows operations on
@@ -338,7 +371,7 @@ func (cm comparer) String() string {
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
-// For some cases, a custom Comparer should be used instead that defines
+// In many cases, a custom Comparer should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
@@ -370,27 +403,92 @@ func AllowUnexported(types ...interface{}) Option {
type visibleStructs map[reflect.Type]bool
-func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption {
+func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
panic("not implemented")
}
-// reporter is an Option that configures how differences are reported.
-type reporter interface {
- // TODO: Not exported yet.
+// Result represents the comparison result for a single node and
+// is provided by cmp when calling Result (see Reporter).
+type Result struct {
+ _ [0]func() // Make Result incomparable
+ flags resultFlags
+}
+
+// Equal reports whether the node was determined to be equal or not.
+// As a special case, ignored nodes are considered equal.
+func (r Result) Equal() bool {
+ return r.flags&(reportEqual|reportByIgnore) != 0
+}
+
+// ByIgnore reports whether the node is equal because it was ignored.
+// This never reports true if Equal reports false.
+func (r Result) ByIgnore() bool {
+ return r.flags&reportByIgnore != 0
+}
+
+// ByMethod reports whether the Equal method determined equality.
+func (r Result) ByMethod() bool {
+ return r.flags&reportByMethod != 0
+}
+
+// ByFunc reports whether a Comparer function determined equality.
+func (r Result) ByFunc() bool {
+ return r.flags&reportByFunc != 0
+}
+
+type resultFlags uint
+
+const (
+ _ resultFlags = (1 << iota) / 2
+
+ reportEqual
+ reportUnequal
+ reportByIgnore
+ reportByMethod
+ reportByFunc
+)
+
+// Reporter is an Option that can be passed to Equal. When Equal traverses
+// the value trees, it calls PushStep as it descends into each node in the
+// tree and PopStep as it ascend out of the node. The leaves of the tree are
+// either compared (determined to be equal or not equal) or ignored and reported
+// as such by calling the Report method.
+func Reporter(r interface {
+ // PushStep is called when a tree-traversal operation is performed.
+ // The PathStep itself is only valid until the step is popped.
+ // The PathStep.Values are valid for the duration of the entire traversal
+ // and must not be mutated.
+ //
+ // Equal always calls PushStep at the start to provide an operation-less
+ // PathStep used to report the root values.
//
- // Perhaps add PushStep and PopStep and change Report to only accept
- // a PathStep instead of the full-path? Adding a PushStep and PopStep makes
- // it clear that we are traversing the value tree in a depth-first-search
- // manner, which has an effect on how values are printed.
+ // Within a slice, the exact set of inserted, removed, or modified elements
+ // is unspecified and may change in future implementations.
+ // The entries of a map are iterated through in an unspecified order.
+ PushStep(PathStep)
+
+ // Report is called exactly once on leaf nodes to report whether the
+ // comparison identified the node as equal, unequal, or ignored.
+ // A leaf node is one that is immediately preceded by and followed by
+ // a pair of PushStep and PopStep calls.
+ Report(Result)
+
+ // PopStep ascends back up the value tree.
+ // There is always a matching pop call for every push call.
+ PopStep()
+}) Option {
+ return reporter{r}
+}
- Option
+type reporter struct{ reporterIface }
+type reporterIface interface {
+ PushStep(PathStep)
+ Report(Result)
+ PopStep()
+}
- // Report is called for every comparison made and will be provided with
- // the two values being compared, the equality result, and the
- // current path in the value tree. It is possible for x or y to be an
- // invalid reflect.Value if one of the values is non-existent;
- // which is possible with maps and slices.
- Report(x, y reflect.Value, eq bool, p Path)
+func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
}
// normalizeOption normalizes the input options such that all Options groups
@@ -424,30 +522,3 @@ func flattenOptions(dst, src Options) Options {
}
return dst
}
-
-// getFuncName returns a short function name from the pointer.
-// The string parsing logic works up until Go1.9.
-func getFuncName(p uintptr) string {
- fnc := runtime.FuncForPC(p)
- if fnc == nil {
- return ""
- }
- name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
- if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
- // Strip the package name from method name.
- name = strings.TrimSuffix(name, ")-fm")
- name = strings.TrimSuffix(name, ")·fm")
- if i := strings.LastIndexByte(name, '('); i >= 0 {
- methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
- if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
- methodName = methodName[j+1:] // E.g., "myfunc"
- }
- name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
- }
- }
- if i := strings.LastIndexByte(name, '/'); i >= 0 {
- // Strip the package name.
- name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
- }
- return name
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
index c08a3cf80d9..96fffd291f7 100644
--- a/vendor/github.com/google/go-cmp/cmp/path.go
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -12,80 +12,52 @@ import (
"unicode/utf8"
)
-type (
- // Path is a list of PathSteps describing the sequence of operations to get
- // from some root type to the current position in the value tree.
- // The first Path element is always an operation-less PathStep that exists
- // simply to identify the initial type.
- //
- // When traversing structs with embedded structs, the embedded struct will
- // always be accessed as a field before traversing the fields of the
- // embedded struct themselves. That is, an exported field from the
- // embedded struct will never be accessed directly from the parent struct.
- Path []PathStep
-
- // PathStep is a union-type for specific operations to traverse
- // a value's tree structure. Users of this package never need to implement
- // these types as values of this type will be returned by this package.
- PathStep interface {
- String() string
- Type() reflect.Type // Resulting type after performing the path step
- isPathStep()
- }
+// Path is a list of PathSteps describing the sequence of operations to get
+// from some root type to the current position in the value tree.
+// The first Path element is always an operation-less PathStep that exists
+// simply to identify the initial type.
+//
+// When traversing structs with embedded structs, the embedded struct will
+// always be accessed as a field before traversing the fields of the
+// embedded struct themselves. That is, an exported field from the
+// embedded struct will never be accessed directly from the parent struct.
+type Path []PathStep
- // SliceIndex is an index operation on a slice or array at some index Key.
- SliceIndex interface {
- PathStep
- Key() int // May return -1 if in a split state
-
- // SplitKeys returns the indexes for indexing into slices in the
- // x and y values, respectively. These indexes may differ due to the
- // insertion or removal of an element in one of the slices, causing
- // all of the indexes to be shifted. If an index is -1, then that
- // indicates that the element does not exist in the associated slice.
- //
- // Key is guaranteed to return -1 if and only if the indexes returned
- // by SplitKeys are not the same. SplitKeys will never return -1 for
- // both indexes.
- SplitKeys() (x int, y int)
-
- isSliceIndex()
- }
- // MapIndex is an index operation on a map at some index Key.
- MapIndex interface {
- PathStep
- Key() reflect.Value
- isMapIndex()
- }
- // TypeAssertion represents a type assertion on an interface.
- TypeAssertion interface {
- PathStep
- isTypeAssertion()
- }
- // StructField represents a struct field access on a field called Name.
- StructField interface {
- PathStep
- Name() string
- Index() int
- isStructField()
- }
- // Indirect represents pointer indirection on the parent type.
- Indirect interface {
- PathStep
- isIndirect()
- }
- // Transform is a transformation from the parent type to the current type.
- Transform interface {
- PathStep
- Name() string
- Func() reflect.Value
+// PathStep is a union-type for specific operations to traverse
+// a value's tree structure. Users of this package never need to implement
+// these types as values of this type will be returned by this package.
+//
+// Implementations of this interface are
+// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
+type PathStep interface {
+ String() string
- // Option returns the originally constructed Transformer option.
- // The == operator can be used to detect the exact option used.
- Option() Option
+ // Type is the resulting type after performing the path step.
+ Type() reflect.Type
- isTransform()
- }
+ // Values is the resulting values after performing the path step.
+ // The type of each valid value is guaranteed to be identical to Type.
+ //
+ // In some cases, one or both may be invalid or have restrictions:
+ // • For StructField, both are not interface-able if the current field
+ // is unexported and the struct type is not explicitly permitted by
+ // AllowUnexported to traverse unexported fields.
+ // • For SliceIndex, one may be invalid if an element is missing from
+ // either the x or y slice.
+ // • For MapIndex, one may be invalid if an entry is missing from
+ // either the x or y map.
+ //
+ // The provided values must not be mutated.
+ Values() (vx, vy reflect.Value)
+}
+
+var (
+ _ PathStep = StructField{}
+ _ PathStep = SliceIndex{}
+ _ PathStep = MapIndex{}
+ _ PathStep = Indirect{}
+ _ PathStep = TypeAssertion{}
+ _ PathStep = Transform{}
)
func (pa *Path) push(s PathStep) {
@@ -124,7 +96,7 @@ func (pa Path) Index(i int) PathStep {
func (pa Path) String() string {
var ss []string
for _, s := range pa {
- if _, ok := s.(*structField); ok {
+ if _, ok := s.(StructField); ok {
ss = append(ss, s.String())
}
}
@@ -144,13 +116,13 @@ func (pa Path) GoString() string {
nextStep = pa[i+1]
}
switch s := s.(type) {
- case *indirect:
+ case Indirect:
numIndirect++
pPre, pPost := "(", ")"
switch nextStep.(type) {
- case *indirect:
+ case Indirect:
continue // Next step is indirection, so let them batch up
- case *structField:
+ case StructField:
numIndirect-- // Automatic indirection on struct fields
case nil:
pPre, pPost = "", "" // Last step; no need for parenthesis
@@ -161,19 +133,10 @@ func (pa Path) GoString() string {
}
numIndirect = 0
continue
- case *transform:
+ case Transform:
ssPre = append(ssPre, s.trans.name+"(")
ssPost = append(ssPost, ")")
continue
- case *typeAssertion:
- // As a special-case, elide type assertions on anonymous types
- // since they are typically generated dynamically and can be very
- // verbose. For example, some transforms return interface{} because
- // of Go's lack of generics, but typically take in and return the
- // exact same concrete type.
- if s.Type().PkgPath() == "" {
- continue
- }
}
ssPost = append(ssPost, s.String())
}
@@ -183,44 +146,13 @@ func (pa Path) GoString() string {
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
}
-type (
- pathStep struct {
- typ reflect.Type
- }
-
- sliceIndex struct {
- pathStep
- xkey, ykey int
- }
- mapIndex struct {
- pathStep
- key reflect.Value
- }
- typeAssertion struct {
- pathStep
- }
- structField struct {
- pathStep
- name string
- idx int
-
- // These fields are used for forcibly accessing an unexported field.
- // pvx, pvy, and field are only valid if unexported is true.
- unexported bool
- force bool // Forcibly allow visibility
- pvx, pvy reflect.Value // Parent values
- field reflect.StructField // Field information
- }
- indirect struct {
- pathStep
- }
- transform struct {
- pathStep
- trans *transformer
- }
-)
+type pathStep struct {
+ typ reflect.Type
+ vx, vy reflect.Value
+}
-func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
func (ps pathStep) String() string {
if ps.typ == nil {
return ""
@@ -232,7 +164,54 @@ func (ps pathStep) String() string {
return fmt.Sprintf("{%s}", s)
}
-func (si sliceIndex) String() string {
+// StructField represents a struct field access on a field called Name.
+type StructField struct{ *structField }
+type structField struct {
+ pathStep
+ name string
+ idx int
+
+ // These fields are used for forcibly accessing an unexported field.
+ // pvx, pvy, and field are only valid if unexported is true.
+ unexported bool
+ mayForce bool // Forcibly allow visibility
+ pvx, pvy reflect.Value // Parent values
+ field reflect.StructField // Field information
+}
+
+func (sf StructField) Type() reflect.Type { return sf.typ }
+func (sf StructField) Values() (vx, vy reflect.Value) {
+ if !sf.unexported {
+ return sf.vx, sf.vy // CanInterface reports true
+ }
+
+ // Forcibly obtain read-write access to an unexported struct field.
+ if sf.mayForce {
+ vx = retrieveUnexportedField(sf.pvx, sf.field)
+ vy = retrieveUnexportedField(sf.pvy, sf.field)
+ return vx, vy // CanInterface reports true
+ }
+ return sf.vx, sf.vy // CanInterface reports false
+}
+func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
+
+// Name is the field name.
+func (sf StructField) Name() string { return sf.name }
+
+// Index is the index of the field in the parent struct type.
+// See reflect.Type.Field.
+func (sf StructField) Index() int { return sf.idx }
+
+// SliceIndex is an index operation on a slice or array at some index Key.
+type SliceIndex struct{ *sliceIndex }
+type sliceIndex struct {
+ pathStep
+ xkey, ykey int
+}
+
+func (si SliceIndex) Type() reflect.Type { return si.typ }
+func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
+func (si SliceIndex) String() string {
switch {
case si.xkey == si.ykey:
return fmt.Sprintf("[%d]", si.xkey)
@@ -247,63 +226,83 @@ func (si sliceIndex) String() string {
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
}
}
-func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
-func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
-func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) }
-func (in indirect) String() string { return "*" }
-func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
-func (si sliceIndex) Key() int {
+// Key is the index key; it may return -1 if in a split state
+func (si SliceIndex) Key() int {
if si.xkey != si.ykey {
return -1
}
return si.xkey
}
-func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey }
-func (mi mapIndex) Key() reflect.Value { return mi.key }
-func (sf structField) Name() string { return sf.name }
-func (sf structField) Index() int { return sf.idx }
-func (tf transform) Name() string { return tf.trans.name }
-func (tf transform) Func() reflect.Value { return tf.trans.fnc }
-func (tf transform) Option() Option { return tf.trans }
-
-func (pathStep) isPathStep() {}
-func (sliceIndex) isSliceIndex() {}
-func (mapIndex) isMapIndex() {}
-func (typeAssertion) isTypeAssertion() {}
-func (structField) isStructField() {}
-func (indirect) isIndirect() {}
-func (transform) isTransform() {}
-var (
- _ SliceIndex = sliceIndex{}
- _ MapIndex = mapIndex{}
- _ TypeAssertion = typeAssertion{}
- _ StructField = structField{}
- _ Indirect = indirect{}
- _ Transform = transform{}
-
- _ PathStep = sliceIndex{}
- _ PathStep = mapIndex{}
- _ PathStep = typeAssertion{}
- _ PathStep = structField{}
- _ PathStep = indirect{}
- _ PathStep = transform{}
-)
+// SplitKeys are the indexes for indexing into slices in the
+// x and y values, respectively. These indexes may differ due to the
+// insertion or removal of an element in one of the slices, causing
+// all of the indexes to be shifted. If an index is -1, then that
+// indicates that the element does not exist in the associated slice.
+//
+// Key is guaranteed to return -1 if and only if the indexes returned
+// by SplitKeys are not the same. SplitKeys will never return -1 for
+// both indexes.
+func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
+
+// MapIndex is an index operation on a map at some index Key.
+type MapIndex struct{ *mapIndex }
+type mapIndex struct {
+ pathStep
+ key reflect.Value
+}
+
+func (mi MapIndex) Type() reflect.Type { return mi.typ }
+func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
+func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
+
+// Key is the value of the map key.
+func (mi MapIndex) Key() reflect.Value { return mi.key }
+
+// Indirect represents pointer indirection on the parent type.
+type Indirect struct{ *indirect }
+type indirect struct {
+ pathStep
+}
+
+func (in Indirect) Type() reflect.Type { return in.typ }
+func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
+func (in Indirect) String() string { return "*" }
+
+// TypeAssertion represents a type assertion on an interface.
+type TypeAssertion struct{ *typeAssertion }
+type typeAssertion struct {
+ pathStep
+}
+
+func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
+func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
+func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
+
+// Transform is a transformation from the parent type to the current type.
+type Transform struct{ *transform }
+type transform struct {
+ pathStep
+ trans *transformer
+}
+
+func (tf Transform) Type() reflect.Type { return tf.typ }
+func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
+func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
+
+// Name is the name of the Transformer.
+func (tf Transform) Name() string { return tf.trans.name }
+
+// Func is the function pointer to the transformer function.
+func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
+
+// Option returns the originally constructed Transformer option.
+// The == operator can be used to detect the exact option used.
+func (tf Transform) Option() Option { return tf.trans }
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}
-
-// isValid reports whether the identifier is valid.
-// Empty and underscore-only strings are not valid.
-func isValid(id string) bool {
- ok := id != "" && id != "_"
- for j, c := range id {
- ok = ok && (j > 0 || !unicode.IsDigit(c))
- ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c))
- }
- return ok
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go
new file mode 100644
index 00000000000..6ddf29993e5
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report.go
@@ -0,0 +1,51 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+// defaultReporter implements the reporter interface.
+//
+// As Equal serially calls the PushStep, Report, and PopStep methods, the
+// defaultReporter constructs a tree-based representation of the compared value
+// and the result of each comparison (see valueNode).
+//
+// When the String method is called, the FormatDiff method transforms the
+// valueNode tree into a textNode tree, which is a tree-based representation
+// of the textual output (see textNode).
+//
+// Lastly, the textNode.String method produces the final report as a string.
+type defaultReporter struct {
+ root *valueNode
+ curr *valueNode
+}
+
+func (r *defaultReporter) PushStep(ps PathStep) {
+ r.curr = r.curr.PushStep(ps)
+ if r.root == nil {
+ r.root = r.curr
+ }
+}
+func (r *defaultReporter) Report(rs Result) {
+ r.curr.Report(rs)
+}
+func (r *defaultReporter) PopStep() {
+ r.curr = r.curr.PopStep()
+}
+
+// String provides a full report of the differences detected as a structured
+// literal in pseudo-Go syntax. String may only be called after the entire tree
+// has been traversed.
+func (r *defaultReporter) String() string {
+ assert(r.root != nil && r.curr == nil)
+ if r.root.NumDiff == 0 {
+ return ""
+ }
+ return formatOptions{}.FormatDiff(r.root).String()
+}
+
+func assert(ok bool) {
+ if !ok {
+ panic("assertion failure")
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
new file mode 100644
index 00000000000..05efb992c53
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -0,0 +1,296 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// TODO: Enforce limits?
+// * Enforce maximum number of records to print per node?
+// * Enforce maximum size in bytes allowed?
+// * As a heuristic, use less verbosity for equal nodes than unequal nodes.
+// TODO: Enforce unique outputs?
+// * Avoid Stringer methods if it results in same output?
+// * Print pointer address if outputs still equal?
+
+// numContextRecords is the number of surrounding equal records to print.
+const numContextRecords = 2
+
+type diffMode byte
+
+const (
+ diffUnknown diffMode = 0
+ diffIdentical diffMode = ' '
+ diffRemoved diffMode = '-'
+ diffInserted diffMode = '+'
+)
+
+type typeMode int
+
+const (
+ // emitType always prints the type.
+ emitType typeMode = iota
+ // elideType never prints the type.
+ elideType
+ // autoType prints the type only for composite kinds
+ // (i.e., structs, slices, arrays, and maps).
+ autoType
+)
+
+type formatOptions struct {
+ // DiffMode controls the output mode of FormatDiff.
+ //
+ // If diffUnknown, then produce a diff of the x and y values.
+ // If diffIdentical, then emit values as if they were equal.
+ // If diffRemoved, then only emit x values (ignoring y values).
+ // If diffInserted, then only emit y values (ignoring x values).
+ DiffMode diffMode
+
+ // TypeMode controls whether to print the type for the current node.
+ //
+ // As a general rule of thumb, we always print the type of the next node
+ // after an interface, and always elide the type of the next node after
+ // a slice or map node.
+ TypeMode typeMode
+
+ // formatValueOptions are options specific to printing reflect.Values.
+ formatValueOptions
+}
+
+func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
+ opts.DiffMode = d
+ return opts
+}
+func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
+ opts.TypeMode = t
+ return opts
+}
+
+// FormatDiff converts a valueNode tree into a textNode tree, where the later
+// is a textual representation of the differences detected in the former.
+func (opts formatOptions) FormatDiff(v *valueNode) textNode {
+ // Check whether we have specialized formatting for this node.
+ // This is not necessary, but helpful for producing more readable outputs.
+ if opts.CanFormatDiffSlice(v) {
+ return opts.FormatDiffSlice(v)
+ }
+
+ // For leaf nodes, format the value based on the reflect.Values alone.
+ if v.MaxDepth == 0 {
+ switch opts.DiffMode {
+ case diffUnknown, diffIdentical:
+ // Format Equal.
+ if v.NumDiff == 0 {
+ outx := opts.FormatValue(v.ValueX, visitedPointers{})
+ outy := opts.FormatValue(v.ValueY, visitedPointers{})
+ if v.NumIgnored > 0 && v.NumSame == 0 {
+ return textEllipsis
+ } else if outx.Len() < outy.Len() {
+ return outx
+ } else {
+ return outy
+ }
+ }
+
+ // Format unequal.
+ assert(opts.DiffMode == diffUnknown)
+ var list textList
+ outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{})
+ outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{})
+ if outx != nil {
+ list = append(list, textRecord{Diff: '-', Value: outx})
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: '+', Value: outy})
+ }
+ return opts.WithTypeMode(emitType).FormatType(v.Type, list)
+ case diffRemoved:
+ return opts.FormatValue(v.ValueX, visitedPointers{})
+ case diffInserted:
+ return opts.FormatValue(v.ValueY, visitedPointers{})
+ default:
+ panic("invalid diff mode")
+ }
+ }
+
+ // Descend into the child value node.
+ if v.TransformerName != "" {
+ out := opts.WithTypeMode(emitType).FormatDiff(v.Value)
+ out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"}
+ return opts.FormatType(v.Type, out)
+ } else {
+ switch k := v.Type.Kind(); k {
+ case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map:
+ return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k))
+ case reflect.Ptr:
+ return textWrap{"&", opts.FormatDiff(v.Value), ""}
+ case reflect.Interface:
+ return opts.WithTypeMode(emitType).FormatDiff(v.Value)
+ default:
+ panic(fmt.Sprintf("%v cannot have children", k))
+ }
+ }
+}
+
+func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode {
+ // Derive record name based on the data structure kind.
+ var name string
+ var formatKey func(reflect.Value) string
+ switch k {
+ case reflect.Struct:
+ name = "field"
+ opts = opts.WithTypeMode(autoType)
+ formatKey = func(v reflect.Value) string { return v.String() }
+ case reflect.Slice, reflect.Array:
+ name = "element"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(reflect.Value) string { return "" }
+ case reflect.Map:
+ name = "entry"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = formatMapKey
+ }
+
+ // Handle unification.
+ switch opts.DiffMode {
+ case diffIdentical, diffRemoved, diffInserted:
+ var list textList
+ var deferredEllipsis bool // Add final "..." to indicate records were dropped
+ for _, r := range recs {
+ // Elide struct fields that are zero value.
+ if k == reflect.Struct {
+ var isZero bool
+ switch opts.DiffMode {
+ case diffIdentical:
+ isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueX)
+ case diffRemoved:
+ isZero = value.IsZero(r.Value.ValueX)
+ case diffInserted:
+ isZero = value.IsZero(r.Value.ValueY)
+ }
+ if isZero {
+ continue
+ }
+ }
+ // Elide ignored nodes.
+ if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
+ deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
+ if !deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ continue
+ }
+ if out := opts.FormatDiff(r.Value); out != nil {
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ }
+ }
+ if deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ return textWrap{"{", list, "}"}
+ case diffUnknown:
+ default:
+ panic("invalid diff mode")
+ }
+
+ // Handle differencing.
+ var list textList
+ groups := coalesceAdjacentRecords(name, recs)
+ for i, ds := range groups {
+ // Handle equal records.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing records to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
+ if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numLo++
+ }
+ for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numHi++
+ }
+ if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
+ numHi++ // Avoid pointless coalescing of a single equal record
+ }
+
+ // Format the equal values.
+ for _, r := range recs[:numLo] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ }
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ }
+ for _, r := range recs[numEqual-numHi : numEqual] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ }
+ recs = recs[numEqual:]
+ continue
+ }
+
+ // Handle unequal records.
+ for _, r := range recs[:ds.NumDiff()] {
+ switch {
+ case opts.CanFormatDiffSlice(r.Value):
+ out := opts.FormatDiffSlice(r.Value)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ case r.Value.NumChildren == r.Value.MaxDepth:
+ outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value)
+ outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value)
+ if outx != nil {
+ list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
+ }
+ default:
+ out := opts.FormatDiff(r.Value)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ }
+ }
+ recs = recs[ds.NumDiff():]
+ }
+ assert(len(recs) == 0)
+ return textWrap{"{", list, "}"}
+}
+
+// coalesceAdjacentRecords coalesces the list of records into groups of
+// adjacent equal, or unequal counts.
+func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
+ var prevCase int // Arbitrary index into which case last occurred
+ lastStats := func(i int) *diffStats {
+ if prevCase != i {
+ groups = append(groups, diffStats{Name: name})
+ prevCase = i
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, r := range recs {
+ switch rv := r.Value; {
+ case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
+ lastStats(1).NumIgnored++
+ case rv.NumDiff == 0:
+ lastStats(1).NumIdentical++
+ case rv.NumDiff > 0 && !rv.ValueY.IsValid():
+ lastStats(2).NumRemoved++
+ case rv.NumDiff > 0 && !rv.ValueX.IsValid():
+ lastStats(2).NumInserted++
+ default:
+ lastStats(2).NumModified++
+ }
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
new file mode 100644
index 00000000000..5521c604c54
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -0,0 +1,279 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+type formatValueOptions struct {
+ // AvoidStringer controls whether to avoid calling custom stringer
+ // methods like error.Error or fmt.Stringer.String.
+ AvoidStringer bool
+
+ // ShallowPointers controls whether to avoid descending into pointers.
+ // Useful when printing map keys, where pointer comparison is performed
+ // on the pointer address rather than the pointed-at value.
+ ShallowPointers bool
+
+ // PrintAddresses controls whether to print the address of all pointers,
+ // slice elements, and maps.
+ PrintAddresses bool
+}
+
+// FormatType prints the type as if it were wrapping s.
+// This may return s as-is depending on the current type and TypeMode mode.
+func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
+ // Check whether to emit the type or not.
+ switch opts.TypeMode {
+ case autoType:
+ switch t.Kind() {
+ case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
+ if s.Equal(textNil) {
+ return s
+ }
+ default:
+ return s
+ }
+ case elideType:
+ return s
+ }
+
+ // Determine the type label, applying special handling for unnamed types.
+ typeName := t.String()
+ if t.Name() == "" {
+ // According to Go grammar, certain type literals contain symbols that
+ // do not strongly bind to the next lexicographical token (e.g., *T).
+ switch t.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Ptr:
+ typeName = "(" + typeName + ")"
+ }
+ typeName = strings.Replace(typeName, "struct {", "struct{", -1)
+ typeName = strings.Replace(typeName, "interface {", "interface{", -1)
+ }
+
+ // Avoid wrap the value in parenthesis if unnecessary.
+ if s, ok := s.(textWrap); ok {
+ hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")")
+ hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}")
+ if hasParens || hasBraces {
+ return textWrap{typeName, s, ""}
+ }
+ }
+ return textWrap{typeName + "(", s, ")"}
+}
+
+// FormatValue prints the reflect.Value, taking extra care to avoid descending
+// into pointers already in m. As pointers are visited, m is also updated.
+func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) {
+ if !v.IsValid() {
+ return nil
+ }
+ t := v.Type()
+
+ // Check whether there is an Error or String method to call.
+ if !opts.AvoidStringer && v.CanInterface() {
+ // Avoid calling Error or String methods on nil receivers since many
+ // implementations crash when doing so.
+ if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
+ switch v := v.Interface().(type) {
+ case error:
+ return textLine("e" + formatString(v.Error()))
+ case fmt.Stringer:
+ return textLine("s" + formatString(v.String()))
+ }
+ }
+ }
+
+ // Check whether to explicitly wrap the result with the type.
+ var skipType bool
+ defer func() {
+ if !skipType {
+ out = opts.FormatType(t, out)
+ }
+ }()
+
+ var ptr string
+ switch t.Kind() {
+ case reflect.Bool:
+ return textLine(fmt.Sprint(v.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return textLine(fmt.Sprint(v.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ // Unnamed uints are usually bytes or words, so use hexadecimal.
+ if t.PkgPath() == "" || t.Kind() == reflect.Uintptr {
+ return textLine(formatHex(v.Uint()))
+ }
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Float32, reflect.Float64:
+ return textLine(fmt.Sprint(v.Float()))
+ case reflect.Complex64, reflect.Complex128:
+ return textLine(fmt.Sprint(v.Complex()))
+ case reflect.String:
+ return textLine(formatString(v.String()))
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ return textLine(formatPointer(v))
+ case reflect.Struct:
+ var list textList
+ for i := 0; i < v.NumField(); i++ {
+ vv := v.Field(i)
+ if value.IsZero(vv) {
+ continue // Elide fields with zero values
+ }
+ s := opts.WithTypeMode(autoType).FormatValue(vv, m)
+ list = append(list, textRecord{Key: t.Field(i).Name, Value: s})
+ }
+ return textWrap{"{", list, "}"}
+ case reflect.Slice:
+ if v.IsNil() {
+ return textNil
+ }
+ if opts.PrintAddresses {
+ ptr = formatPointer(v)
+ }
+ fallthrough
+ case reflect.Array:
+ var list textList
+ for i := 0; i < v.Len(); i++ {
+ vi := v.Index(i)
+ if vi.CanAddr() { // Check for cyclic elements
+ p := vi.Addr()
+ if m.Visit(p) {
+ var out textNode
+ out = textLine(formatPointer(p))
+ out = opts.WithTypeMode(emitType).FormatType(p.Type(), out)
+ out = textWrap{"*", out, ""}
+ list = append(list, textRecord{Value: out})
+ continue
+ }
+ }
+ s := opts.WithTypeMode(elideType).FormatValue(vi, m)
+ list = append(list, textRecord{Value: s})
+ }
+ return textWrap{ptr + "{", list, "}"}
+ case reflect.Map:
+ if v.IsNil() {
+ return textNil
+ }
+ if m.Visit(v) {
+ return textLine(formatPointer(v))
+ }
+
+ var list textList
+ for _, k := range value.SortKeys(v.MapKeys()) {
+ sk := formatMapKey(k)
+ sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m)
+ list = append(list, textRecord{Key: sk, Value: sv})
+ }
+ if opts.PrintAddresses {
+ ptr = formatPointer(v)
+ }
+ return textWrap{ptr + "{", list, "}"}
+ case reflect.Ptr:
+ if v.IsNil() {
+ return textNil
+ }
+ if m.Visit(v) || opts.ShallowPointers {
+ return textLine(formatPointer(v))
+ }
+ if opts.PrintAddresses {
+ ptr = formatPointer(v)
+ }
+ skipType = true // Let the underlying value print the type instead
+ return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""}
+ case reflect.Interface:
+ if v.IsNil() {
+ return textNil
+ }
+ // Interfaces accept different concrete types,
+ // so configure the underlying value to explicitly print the type.
+ skipType = true // Print the concrete type instead
+ return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+ }
+}
+
+// formatMapKey formats v as if it were a map key.
+// The result is guaranteed to be a single line.
+func formatMapKey(v reflect.Value) string {
+ var opts formatOptions
+ opts.TypeMode = elideType
+ opts.AvoidStringer = true
+ opts.ShallowPointers = true
+ s := opts.FormatValue(v, visitedPointers{}).String()
+ return strings.TrimSpace(s)
+}
+
+// formatString prints s as a double-quoted or backtick-quoted string.
+func formatString(s string) string {
+ // Use quoted string if it the same length as a raw string literal.
+ // Otherwise, attempt to use the raw string form.
+ qs := strconv.Quote(s)
+ if len(qs) == 1+len(s)+1 {
+ return qs
+ }
+
+ // Disallow newlines to ensure output is a single line.
+ // Only allow printable runes for readability purposes.
+ rawInvalid := func(r rune) bool {
+ return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
+ }
+ if strings.IndexFunc(s, rawInvalid) < 0 {
+ return "`" + s + "`"
+ }
+ return qs
+}
+
+// formatHex prints u as a hexadecimal integer in Go notation.
+func formatHex(u uint64) string {
+ var f string
+ switch {
+ case u <= 0xff:
+ f = "0x%02x"
+ case u <= 0xffff:
+ f = "0x%04x"
+ case u <= 0xffffff:
+ f = "0x%06x"
+ case u <= 0xffffffff:
+ f = "0x%08x"
+ case u <= 0xffffffffff:
+ f = "0x%010x"
+ case u <= 0xffffffffffff:
+ f = "0x%012x"
+ case u <= 0xffffffffffffff:
+ f = "0x%014x"
+ case u <= 0xffffffffffffffff:
+ f = "0x%016x"
+ }
+ return fmt.Sprintf(f, u)
+}
+
+// formatPointer prints the address of the pointer.
+func formatPointer(v reflect.Value) string {
+ p := v.Pointer()
+ if flags.Deterministic {
+ p = 0xdeadf00f // Only used for stable testing purposes
+ }
+ return fmt.Sprintf("⟪0x%x⟫", p)
+}
+
+type visitedPointers map[value.Pointer]struct{}
+
+// Visit inserts pointer v into the visited map and reports whether it had
+// already been visited before.
+func (m visitedPointers) Visit(v reflect.Value) bool {
+ p := value.PointerOf(v)
+ _, visited := m[p]
+ m[p] = struct{}{}
+ return visited
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
new file mode 100644
index 00000000000..8cb3265e767
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -0,0 +1,333 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+)
+
+// CanFormatDiffSlice reports whether we support custom formatting for nodes
+// that are slices of primitive kinds or strings.
+func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
+ switch {
+ case opts.DiffMode != diffUnknown:
+ return false // Must be formatting in diff mode
+ case v.NumDiff == 0:
+ return false // No differences detected
+ case v.NumIgnored+v.NumCompared+v.NumTransformed > 0:
+ // TODO: Handle the case where someone uses bytes.Equal on a large slice.
+ return false // Some custom option was used to determined equality
+ case !v.ValueX.IsValid() || !v.ValueY.IsValid():
+ return false // Both values must be valid
+ }
+
+ switch t := v.Type; t.Kind() {
+ case reflect.String:
+ case reflect.Array, reflect.Slice:
+ // Only slices of primitive types have specialized handling.
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ default:
+ return false
+ }
+
+ // If a sufficient number of elements already differ,
+ // use specialized formatting even if length requirement is not met.
+ if v.NumDiff > v.NumSame {
+ return true
+ }
+ default:
+ return false
+ }
+
+ // Use specialized string diffing for longer slices or strings.
+ const minLength = 64
+ return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength
+}
+
+// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
+// This provides custom-tailored logic to make printing of differences in
+// textual strings and slices of primitive kinds more readable.
+func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
+ assert(opts.DiffMode == diffUnknown)
+ t, vx, vy := v.Type, v.ValueX, v.ValueY
+
+ // Auto-detect the type of the data.
+ var isLinedText, isText, isBinary bool
+ var sx, sy string
+ switch {
+ case t.Kind() == reflect.String:
+ sx, sy = vx.String(), vy.String()
+ isText = true // Initial estimate, verify later
+ case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
+ sx, sy = string(vx.Bytes()), string(vy.Bytes())
+ isBinary = true // Initial estimate, verify later
+ case t.Kind() == reflect.Array:
+ // Arrays need to be addressable for slice operations to work.
+ vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
+ vx2.Set(vx)
+ vy2.Set(vy)
+ vx, vy = vx2, vy2
+ }
+ if isText || isBinary {
+ var numLines, lastLineIdx, maxLineLen int
+ isBinary = false
+ for i, r := range sx + sy {
+ if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
+ isBinary = true
+ break
+ }
+ if r == '\n' {
+ if maxLineLen < i-lastLineIdx {
+ lastLineIdx = i - lastLineIdx
+ }
+ lastLineIdx = i + 1
+ numLines++
+ }
+ }
+ isText = !isBinary
+ isLinedText = isText && numLines >= 4 && maxLineLen <= 256
+ }
+
+ // Format the string into printable records.
+ var list textList
+ var delim string
+ switch {
+ // If the text appears to be multi-lined text,
+ // then perform differencing across individual lines.
+ case isLinedText:
+ ssx := strings.Split(sx, "\n")
+ ssy := strings.Split(sy, "\n")
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.Index(0).String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ delim = "\n"
+ // If the text appears to be single-lined text,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is printed as quoted strings.
+ case isText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ delim = ""
+ // If the text appears to be binary data,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is inspired by hexdump.
+ case isBinary:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ }
+ s := strings.Join(ss, ", ")
+ comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
+ return textRecord{Diff: d, Value: textLine(s), Comment: comment}
+ },
+ )
+ // For all other slices of primitive types,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The size of each chunk depends on the width of the element kind.
+ default:
+ var chunkSize int
+ if t.Elem().Kind() == reflect.Bool {
+ chunkSize = 16
+ } else {
+ switch t.Elem().Bits() {
+ case 8:
+ chunkSize = 16
+ case 16:
+ chunkSize = 12
+ case 32:
+ chunkSize = 8
+ default:
+ chunkSize = 8
+ }
+ }
+ list = opts.formatDiffSlice(
+ vx, vy, chunkSize, t.Elem().Kind().String(),
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
+ }
+ }
+ s := strings.Join(ss, ", ")
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ }
+
+ // Wrap the output with appropriate type information.
+ var out textNode = textWrap{"{", list, "}"}
+ if !isText {
+ // The "{...}" byte-sequence literal is not valid Go syntax for strings.
+ // Emit the type for extra clarity (e.g. "string{...}").
+ if t.Kind() == reflect.String {
+ opts = opts.WithTypeMode(emitType)
+ }
+ return opts.FormatType(t, out)
+ }
+ switch t.Kind() {
+ case reflect.String:
+ out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)}
+ if t != reflect.TypeOf(string("")) {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)}
+ if t != reflect.TypeOf([]byte(nil)) {
+ out = opts.FormatType(t, out)
+ }
+ }
+ return out
+}
+
+// formatASCII formats s as an ASCII string.
+// This is useful for printing binary strings in a semi-legible way.
+func formatASCII(s string) string {
+ b := bytes.Repeat([]byte{'.'}, len(s))
+ for i := 0; i < len(s); i++ {
+ if ' ' <= s[i] && s[i] <= '~' {
+ b[i] = s[i]
+ }
+ }
+ return string(b)
+}
+
+func (opts formatOptions) formatDiffSlice(
+ vx, vy reflect.Value, chunkSize int, name string,
+ makeRec func(reflect.Value, diffMode) textRecord,
+) (list textList) {
+ es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result {
+ return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface())
+ })
+
+ appendChunks := func(v reflect.Value, d diffMode) int {
+ n0 := v.Len()
+ for v.Len() > 0 {
+ n := chunkSize
+ if n > v.Len() {
+ n = v.Len()
+ }
+ list = append(list, makeRec(v.Slice(0, n), d))
+ v = v.Slice(n, v.Len())
+ }
+ return n0 - v.Len()
+ }
+
+ groups := coalesceAdjacentEdits(name, es)
+ groups = coalesceInterveningIdentical(groups, chunkSize/4)
+ for i, ds := range groups {
+ // Print equal.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing equal bytes to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
+ numLo++
+ }
+ for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ numHi++
+ }
+ if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
+ numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
+ }
+
+ // Print the equal bytes.
+ appendChunks(vx.Slice(0, numLo), diffIdentical)
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ }
+ appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
+ vx = vx.Slice(numEqual, vx.Len())
+ vy = vy.Slice(numEqual, vy.Len())
+ continue
+ }
+
+ // Print unequal.
+ nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
+ vx = vx.Slice(nx, vx.Len())
+ ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
+ vy = vy.Slice(ny, vy.Len())
+ }
+ assert(vx.Len() == 0 && vy.Len() == 0)
+ return list
+}
+
+// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
+// equal or unequal counts.
+func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
+ var prevCase int // Arbitrary index into which case last occurred
+ lastStats := func(i int) *diffStats {
+ if prevCase != i {
+ groups = append(groups, diffStats{Name: name})
+ prevCase = i
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, e := range es {
+ switch e {
+ case diff.Identity:
+ lastStats(1).NumIdentical++
+ case diff.UniqueX:
+ lastStats(2).NumRemoved++
+ case diff.UniqueY:
+ lastStats(2).NumInserted++
+ case diff.Modified:
+ lastStats(2).NumModified++
+ }
+ }
+ return groups
+}
+
+// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
+// equal groups into adjacent unequal groups that currently result in a
+// dual inserted/removed printout. This acts as a high-pass filter to smooth
+// out high-frequency changes within the windowSize.
+func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
+ groups, groupsOrig := groups[:0], groups
+ for i, ds := range groupsOrig {
+ if len(groups) >= 2 && ds.NumDiff() > 0 {
+ prev := &groups[len(groups)-2] // Unequal group
+ curr := &groups[len(groups)-1] // Equal group
+ next := &groupsOrig[i] // Unequal group
+ hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
+ hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
+ if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
+ *prev = (*prev).Append(*curr).Append(*next)
+ groups = groups[:len(groups)-1] // Truncate off equal group
+ continue
+ }
+ }
+ groups = append(groups, ds)
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
new file mode 100644
index 00000000000..80605d0e440
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -0,0 +1,382 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+type indentMode int
+
+func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
+ if flags.Deterministic || randBool {
+ // Use regular spaces (U+0020).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ } else {
+ // Use non-breaking spaces (U+00a0).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ }
+ return repeatCount(n).appendChar(b, '\t')
+}
+
+type repeatCount int
+
+func (n repeatCount) appendChar(b []byte, c byte) []byte {
+ for ; n > 0; n-- {
+ b = append(b, c)
+ }
+ return b
+}
+
+// textNode is a simplified tree-based representation of structured text.
+// Possible node types are textWrap, textList, or textLine.
+type textNode interface {
+ // Len reports the length in bytes of a single-line version of the tree.
+ // Nested textRecord.Diff and textRecord.Comment fields are ignored.
+ Len() int
+ // Equal reports whether the two trees are structurally identical.
+ // Nested textRecord.Diff and textRecord.Comment fields are compared.
+ Equal(textNode) bool
+ // String returns the string representation of the text tree.
+ // It is not guaranteed that len(x.String()) == x.Len(),
+ // nor that x.String() == y.String() implies that x.Equal(y).
+ String() string
+
+ // formatCompactTo formats the contents of the tree as a single-line string
+ // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
+ // fields are ignored.
+ //
+ // However, not all nodes in the tree should be collapsed as a single-line.
+ // If a node can be collapsed as a single-line, it is replaced by a textLine
+ // node. Since the top-level node cannot replace itself, this also returns
+ // the current node itself.
+ //
+ // This does not mutate the receiver.
+ formatCompactTo([]byte, diffMode) ([]byte, textNode)
+ // formatExpandedTo formats the contents of the tree as a multi-line string
+ // to the provided buffer. In order for column alignment to operate well,
+ // formatCompactTo must be called before calling formatExpandedTo.
+ formatExpandedTo([]byte, diffMode, indentMode) []byte
+}
+
+// textWrap is a wrapper that concatenates a prefix and/or a suffix
+// to the underlying node.
+type textWrap struct {
+ Prefix string // e.g., "bytes.Buffer{"
+ Value textNode // textWrap | textList | textLine
+ Suffix string // e.g., "}"
+}
+
+func (s textWrap) Len() int {
+ return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
+}
+func (s1 textWrap) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textWrap); ok {
+ return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
+ }
+ return false
+}
+func (s textWrap) String() string {
+ var d diffMode
+ var n indentMode
+ _, s2 := s.formatCompactTo(nil, d)
+ b := n.appendIndent(nil, d) // Leading indent
+ b = s2.formatExpandedTo(b, d, n) // Main body
+ b = append(b, '\n') // Trailing newline
+ return string(b)
+}
+func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ n0 := len(b) // Original buffer length
+ b = append(b, s.Prefix...)
+ b, s.Value = s.Value.formatCompactTo(b, d)
+ b = append(b, s.Suffix...)
+ if _, ok := s.Value.(textLine); ok {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ b = append(b, s.Prefix...)
+ b = s.Value.formatExpandedTo(b, d, n)
+ b = append(b, s.Suffix...)
+ return b
+}
+
+// textList is a comma-separated list of textWrap or textLine nodes.
+// The list may be formatted as multi-lines or single-line at the discretion
+// of the textList.formatCompactTo method.
+type textList []textRecord
+type textRecord struct {
+ Diff diffMode // e.g., 0 or '-' or '+'
+ Key string // e.g., "MyField"
+ Value textNode // textWrap | textLine
+ Comment fmt.Stringer // e.g., "6 identical fields"
+}
+
+// AppendEllipsis appends a new ellipsis node to the list if none already
+// exists at the end. If cs is non-zero it coalesces the statistics with the
+// previous diffStats.
+func (s *textList) AppendEllipsis(ds diffStats) {
+ hasStats := ds != diffStats{}
+ if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
+ if hasStats {
+ *s = append(*s, textRecord{Value: textEllipsis, Comment: ds})
+ } else {
+ *s = append(*s, textRecord{Value: textEllipsis})
+ }
+ return
+ }
+ if hasStats {
+ (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
+ }
+}
+
+func (s textList) Len() (n int) {
+ for i, r := range s {
+ n += len(r.Key)
+ if r.Key != "" {
+ n += len(": ")
+ }
+ n += r.Value.Len()
+ if i < len(s)-1 {
+ n += len(", ")
+ }
+ }
+ return n
+}
+
+func (s1 textList) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textList); ok {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ r1, r2 := s1[i], s2[i]
+ if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (s textList) String() string {
+ return textWrap{"{", s, "}"}.String()
+}
+
+func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ s = append(textList(nil), s...) // Avoid mutating original
+
+ // Determine whether we can collapse this list as a single line.
+ n0 := len(b) // Original buffer length
+ var multiLine bool
+ for i, r := range s {
+ if r.Diff == diffInserted || r.Diff == diffRemoved {
+ multiLine = true
+ }
+ b = append(b, r.Key...)
+ if r.Key != "" {
+ b = append(b, ": "...)
+ }
+ b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
+ if _, ok := s[i].Value.(textLine); !ok {
+ multiLine = true
+ }
+ if r.Comment != nil {
+ multiLine = true
+ }
+ if i < len(s)-1 {
+ b = append(b, ", "...)
+ }
+ }
+ // Force multi-lined output when printing a removed/inserted node that
+ // is sufficiently long.
+ if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 {
+ multiLine = true
+ }
+ if !multiLine {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+
+func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ alignKeyLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return r.Key == "" || !isLine
+ },
+ func(r textRecord) int { return len(r.Key) },
+ )
+ alignValueLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
+ },
+ func(r textRecord) int { return len(r.Value.(textLine)) },
+ )
+
+ // Format the list as a multi-lined output.
+ n++
+ for i, r := range s {
+ b = n.appendIndent(append(b, '\n'), d|r.Diff)
+ if r.Key != "" {
+ b = append(b, r.Key+": "...)
+ }
+ b = alignKeyLens[i].appendChar(b, ' ')
+
+ b = r.Value.formatExpandedTo(b, d|r.Diff, n)
+ if !r.Value.Equal(textEllipsis) {
+ b = append(b, ',')
+ }
+ b = alignValueLens[i].appendChar(b, ' ')
+
+ if r.Comment != nil {
+ b = append(b, " // "+r.Comment.String()...)
+ }
+ }
+ n--
+
+ return n.appendIndent(append(b, '\n'), d)
+}
+
+func (s textList) alignLens(
+ skipFunc func(textRecord) bool,
+ lenFunc func(textRecord) int,
+) []repeatCount {
+ var startIdx, endIdx, maxLen int
+ lens := make([]repeatCount, len(s))
+ for i, r := range s {
+ if skipFunc(r) {
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ startIdx, endIdx, maxLen = i+1, i+1, 0
+ } else {
+ if maxLen < lenFunc(r) {
+ maxLen = lenFunc(r)
+ }
+ endIdx = i + 1
+ }
+ }
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ return lens
+}
+
+// textLine is a single-line segment of text and is always a leaf node
+// in the textNode tree.
+type textLine []byte
+
+var (
+ textNil = textLine("nil")
+ textEllipsis = textLine("...")
+)
+
+func (s textLine) Len() int {
+ return len(s)
+}
+func (s1 textLine) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textLine); ok {
+ return bytes.Equal([]byte(s1), []byte(s2))
+ }
+ return false
+}
+func (s textLine) String() string {
+ return string(s)
+}
+func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ return append(b, s...), s
+}
+func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
+ return append(b, s...)
+}
+
+type diffStats struct {
+ Name string
+ NumIgnored int
+ NumIdentical int
+ NumRemoved int
+ NumInserted int
+ NumModified int
+}
+
+func (s diffStats) NumDiff() int {
+ return s.NumRemoved + s.NumInserted + s.NumModified
+}
+
+func (s diffStats) Append(ds diffStats) diffStats {
+ assert(s.Name == ds.Name)
+ s.NumIgnored += ds.NumIgnored
+ s.NumIdentical += ds.NumIdentical
+ s.NumRemoved += ds.NumRemoved
+ s.NumInserted += ds.NumInserted
+ s.NumModified += ds.NumModified
+ return s
+}
+
+// String prints a humanly-readable summary of coalesced records.
+//
+// Example:
+// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
+func (s diffStats) String() string {
+ var ss []string
+ var sum int
+ labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
+ counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
+ for i, n := range counts {
+ if n > 0 {
+ ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
+ }
+ sum += n
+ }
+
+ // Pluralize the name (adjusting for some obscure English grammar rules).
+ name := s.Name
+ if sum > 1 {
+ name = name + "s"
+ if strings.HasSuffix(name, "ys") {
+ name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
+ }
+ }
+
+ // Format the list according to English grammar (with Oxford comma).
+ switch n := len(ss); n {
+ case 0:
+ return ""
+ case 1, 2:
+ return strings.Join(ss, " and ") + " " + name
+ default:
+ return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
+ }
+}
+
+type commentString string
+
+func (s commentString) String() string { return string(s) }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go
new file mode 100644
index 00000000000..83031a7f507
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_value.go
@@ -0,0 +1,121 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import "reflect"
+
+// valueNode represents a single node within a report, which is a
+// structured representation of the value tree, containing information
+// regarding which nodes are equal or not.
+type valueNode struct {
+ parent *valueNode
+
+ Type reflect.Type
+ ValueX reflect.Value
+ ValueY reflect.Value
+
+ // NumSame is the number of leaf nodes that are equal.
+ // All descendants are equal only if NumDiff is 0.
+ NumSame int
+ // NumDiff is the number of leaf nodes that are not equal.
+ NumDiff int
+ // NumIgnored is the number of leaf nodes that are ignored.
+ NumIgnored int
+ // NumCompared is the number of leaf nodes that were compared
+ // using an Equal method or Comparer function.
+ NumCompared int
+ // NumTransformed is the number of non-leaf nodes that were transformed.
+ NumTransformed int
+ // NumChildren is the number of transitive descendants of this node.
+ // This counts from zero; thus, leaf nodes have no descendants.
+ NumChildren int
+ // MaxDepth is the maximum depth of the tree. This counts from zero;
+ // thus, leaf nodes have a depth of zero.
+ MaxDepth int
+
+ // Records is a list of struct fields, slice elements, or map entries.
+ Records []reportRecord // If populated, implies Value is not populated
+
+ // Value is the result of a transformation, pointer indirect, of
+ // type assertion.
+ Value *valueNode // If populated, implies Records is not populated
+
+ // TransformerName is the name of the transformer.
+ TransformerName string // If non-empty, implies Value is populated
+}
+type reportRecord struct {
+ Key reflect.Value // Invalid for slice element
+ Value *valueNode
+}
+
+func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
+ vx, vy := ps.Values()
+ child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
+ switch s := ps.(type) {
+ case StructField:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
+ case SliceIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Value: child})
+ case MapIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
+ case Indirect:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case TypeAssertion:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case Transform:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ parent.TransformerName = s.Name()
+ parent.NumTransformed++
+ default:
+ assert(parent == nil) // Must be the root step
+ }
+ return child
+}
+
+func (r *valueNode) Report(rs Result) {
+ assert(r.MaxDepth == 0) // May only be called on leaf nodes
+
+ if rs.ByIgnore() {
+ r.NumIgnored++
+ } else {
+ if rs.Equal() {
+ r.NumSame++
+ } else {
+ r.NumDiff++
+ }
+ }
+ assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
+
+ if rs.ByMethod() {
+ r.NumCompared++
+ }
+ if rs.ByFunc() {
+ r.NumCompared++
+ }
+ assert(r.NumCompared <= 1)
+}
+
+func (child *valueNode) PopStep() (parent *valueNode) {
+ if child.parent == nil {
+ return nil
+ }
+ parent = child.parent
+ parent.NumSame += child.NumSame
+ parent.NumDiff += child.NumDiff
+ parent.NumIgnored += child.NumIgnored
+ parent.NumCompared += child.NumCompared
+ parent.NumTransformed += child.NumTransformed
+ parent.NumChildren += child.NumChildren + 1
+ if parent.MaxDepth < child.MaxDepth+1 {
+ parent.MaxDepth = child.MaxDepth + 1
+ }
+ return parent
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go
deleted file mode 100644
index 20e9f18e0dd..00000000000
--- a/vendor/github.com/google/go-cmp/cmp/reporter.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE.md file.
-
-package cmp
-
-import (
- "fmt"
- "reflect"
- "strings"
-
- "github.com/google/go-cmp/cmp/internal/value"
-)
-
-type defaultReporter struct {
- Option
- diffs []string // List of differences, possibly truncated
- ndiffs int // Total number of differences
- nbytes int // Number of bytes in diffs
- nlines int // Number of lines in diffs
-}
-
-var _ reporter = (*defaultReporter)(nil)
-
-func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) {
- if eq {
- return // Ignore equal results
- }
- const maxBytes = 4096
- const maxLines = 256
- r.ndiffs++
- if r.nbytes < maxBytes && r.nlines < maxLines {
- sx := value.Format(x, value.FormatConfig{UseStringer: true})
- sy := value.Format(y, value.FormatConfig{UseStringer: true})
- if sx == sy {
- // Unhelpful output, so use more exact formatting.
- sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true})
- sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true})
- }
- s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy)
- r.diffs = append(r.diffs, s)
- r.nbytes += len(s)
- r.nlines += strings.Count(s, "\n")
- }
-}
-
-func (r *defaultReporter) String() string {
- s := strings.Join(r.diffs, "")
- if r.ndiffs == len(r.diffs) {
- return s
- }
- return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs))
-}
diff --git a/vendor/github.com/knative/pkg/apis/condition_set.go b/vendor/github.com/knative/pkg/apis/condition_set.go
index 7148352ca68..d4c70098e1f 100644
--- a/vendor/github.com/knative/pkg/apis/condition_set.go
+++ b/vendor/github.com/knative/pkg/apis/condition_set.go
@@ -74,9 +74,6 @@ type ConditionManager interface {
// InitializeConditions updates all Conditions in the ConditionSet to Unknown
// if not set.
InitializeConditions()
-
- // InitializeCondition updates a Condition to Unknown if not set.
- InitializeCondition(t ConditionType)
}
// NewLivingConditionSet returns a ConditionSet to hold the conditions for the
@@ -302,19 +299,37 @@ func (r conditionsImpl) MarkFalse(t ConditionType, reason, messageFormat string,
// InitializeConditions updates all Conditions in the ConditionSet to Unknown
// if not set.
func (r conditionsImpl) InitializeConditions() {
+ happy := r.GetCondition(r.happy)
+ if happy == nil {
+ happy = &Condition{
+ Type: r.happy,
+ Status: corev1.ConditionUnknown,
+ Severity: ConditionSeverityError,
+ }
+ r.SetCondition(*happy)
+ }
+ // If the happy state is true, it implies that all of the terminal
+ // subconditions must be true, so initialize any unset conditions to
+ // true if our happy condition is true, otherwise unknown.
+ status := corev1.ConditionUnknown
+ if happy.Status == corev1.ConditionTrue {
+ status = corev1.ConditionTrue
+ }
for _, t := range r.dependents {
- r.InitializeCondition(t)
+ r.initializeTerminalCondition(t, status)
}
- r.InitializeCondition(r.happy)
}
-// InitializeCondition updates a Condition to Unknown if not set.
-func (r conditionsImpl) InitializeCondition(t ConditionType) {
- if c := r.GetCondition(t); c == nil {
- r.SetCondition(Condition{
- Type: t,
- Status: corev1.ConditionUnknown,
- Severity: r.severity(t),
- })
+// initializeTerminalCondition initializes a Condition to the given status if unset.
+func (r conditionsImpl) initializeTerminalCondition(t ConditionType, status corev1.ConditionStatus) *Condition {
+ if c := r.GetCondition(t); c != nil {
+ return c
+ }
+ c := Condition{
+ Type: t,
+ Status: status,
+ Severity: ConditionSeverityError,
}
+ r.SetCondition(c)
+ return &c
}
diff --git a/vendor/github.com/knative/pkg/apis/contexts.go b/vendor/github.com/knative/pkg/apis/contexts.go
index 3a775b8fc6a..466b89809f1 100644
--- a/vendor/github.com/knative/pkg/apis/contexts.go
+++ b/vendor/github.com/knative/pkg/apis/contexts.go
@@ -20,6 +20,7 @@ import (
"context"
authenticationv1 "k8s.io/api/authentication/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// This is attached to contexts passed to webhook interfaces when
@@ -75,3 +76,77 @@ func GetUserInfo(ctx context.Context) *authenticationv1.UserInfo {
}
return nil
}
+
+// This is attached to contexts as they are passed down through a resource
+// being validated or defaulted to signal the ObjectMeta of the enclosing
+// resource.
+type parentMetaKey struct{}
+
+// WithinParent attaches the ObjectMeta of the resource enclosing the
+// nested resources we are validating. This is intended for use with
+// interfaces like apis.Defaultable and apis.Validatable.
+func WithinParent(ctx context.Context, om metav1.ObjectMeta) context.Context {
+ return context.WithValue(ctx, parentMetaKey{}, om)
+}
+
+// ParentMeta accesses the ObjectMeta of the enclosing parent resource
+// from the context. See WithinParent for how to attach the parent's
+// ObjectMeta to the context.
+func ParentMeta(ctx context.Context) metav1.ObjectMeta {
+ if om, ok := ctx.Value(parentMetaKey{}).(metav1.ObjectMeta); ok {
+ return om
+ }
+ return metav1.ObjectMeta{}
+}
+
+// This is attached to contexts as they are passed down through a resource
+// being validated or defaulted to signal that we are within a Spec.
+type inSpec struct{}
+
+// WithinSpec notes on the context that further validation or defaulting
+// is within the context of a Spec. This is intended for use with
+// interfaces like apis.Defaultable and apis.Validatable.
+func WithinSpec(ctx context.Context) context.Context {
+ return context.WithValue(ctx, inSpec{}, struct{}{})
+}
+
+// IsInSpec returns whether the context of validation or defaulting is
+// the Spec of the parent resource.
+func IsInSpec(ctx context.Context) bool {
+ return ctx.Value(inSpec{}) != nil
+}
+
+// This is attached to contexts as they are passed down through a resource
+// being validated or defaulted to signal that we are within a Status.
+type inStatus struct{}
+
+// WithinStatus notes on the context that further validation or defaulting
+// is within the context of a Status. This is intended for use with
+// interfaces like apis.Defaultable and apis.Validatable.
+func WithinStatus(ctx context.Context) context.Context {
+ return context.WithValue(ctx, inStatus{}, struct{}{})
+}
+
+// IsInStatus returns whether the context of validation or defaulting is
+// the Status of the parent resource.
+func IsInStatus(ctx context.Context) bool {
+ return ctx.Value(inStatus{}) != nil
+}
+
+// This is attached to contexts as they are passed down through a resource
+// being validated to direct them to disallow deprecated fields.
+type disallowDeprecated struct{}
+
+// DisallowDeprecated notes on the context that further validation
+// should disallow the used of deprecated fields. This may be used
+// to ensure that new paths through resources to a common type don't
+// allow the mistakes of old versions to be introduced.
+func DisallowDeprecated(ctx context.Context) context.Context {
+ return context.WithValue(ctx, disallowDeprecated{}, struct{}{})
+}
+
+// IsDeprecatedAllowed checks the context to see whether deprecated fields
+// are allowed.
+func IsDeprecatedAllowed(ctx context.Context) bool {
+ return ctx.Value(disallowDeprecated{}) == nil
+}
diff --git a/vendor/github.com/knative/pkg/apis/field_error.go b/vendor/github.com/knative/pkg/apis/field_error.go
index eafc09b02bb..da498281b02 100644
--- a/vendor/github.com/knative/pkg/apis/field_error.go
+++ b/vendor/github.com/knative/pkg/apis/field_error.go
@@ -300,17 +300,17 @@ func ErrDisallowedFields(fieldPaths ...string) *FieldError {
}
}
-// ErrInvalidArrayValue consturcts a FieldError for a repetetive `field`
+// ErrInvalidArrayValue constructs a FieldError for a repetetive `field`
// at `index` that has received an invalid string value.
-func ErrInvalidArrayValue(value, field string, index int) *FieldError {
+func ErrInvalidArrayValue(value interface{}, field string, index int) *FieldError {
return ErrInvalidValue(value, CurrentField).ViaFieldIndex(field, index)
}
// ErrInvalidValue constructs a FieldError for a field that has received an
// invalid string value.
-func ErrInvalidValue(value, fieldPath string) *FieldError {
+func ErrInvalidValue(value interface{}, fieldPath string) *FieldError {
return &FieldError{
- Message: fmt.Sprintf("invalid value %q", value),
+ Message: fmt.Sprintf("invalid value: %v", value),
Paths: []string{fieldPath},
}
}
@@ -335,9 +335,9 @@ func ErrMultipleOneOf(fieldPaths ...string) *FieldError {
// ErrInvalidKeyName is a variadic helper method for constructing a FieldError
// that specifies a key name that is invalid.
-func ErrInvalidKeyName(value, fieldPath string, details ...string) *FieldError {
+func ErrInvalidKeyName(key, fieldPath string, details ...string) *FieldError {
return &FieldError{
- Message: fmt.Sprintf("invalid key name %q", value),
+ Message: fmt.Sprintf("invalid key name %q", key),
Paths: []string{fieldPath},
Details: strings.Join(details, ", "),
}
@@ -345,9 +345,9 @@ func ErrInvalidKeyName(value, fieldPath string, details ...string) *FieldError {
// ErrOutOfBoundsValue constructs a FieldError for a field that has received an
// out of bound value.
-func ErrOutOfBoundsValue(value, lower, upper, fieldPath string) *FieldError {
+func ErrOutOfBoundsValue(value, lower, upper interface{}, fieldPath string) *FieldError {
return &FieldError{
- Message: fmt.Sprintf("expected %s <= %s <= %s", lower, value, upper),
+ Message: fmt.Sprintf("expected %v <= %v <= %v", lower, value, upper),
Paths: []string{fieldPath},
}
}
diff --git a/vendor/github.com/knative/pkg/apis/interfaces.go b/vendor/github.com/knative/pkg/apis/interfaces.go
index aa1e4d29d84..601d083dd63 100644
--- a/vendor/github.com/knative/pkg/apis/interfaces.go
+++ b/vendor/github.com/knative/pkg/apis/interfaces.go
@@ -38,10 +38,10 @@ type Validatable interface {
// "higher" versions of the same type.
type Convertible interface {
// ConvertUp up-converts the receiver into `to`.
- ConvertUp(to Convertible) error
+ ConvertUp(ctx context.Context, to Convertible) error
// ConvertDown down-converts from `from` into the receiver.
- ConvertDown(from Convertible) error
+ ConvertDown(ctx context.Context, from Convertible) error
}
// Immutable indicates that a particular type has fields that should
diff --git a/vendor/github.com/knative/pkg/kmp/diff.go b/vendor/github.com/knative/pkg/kmp/diff.go
index ef9bae39e5e..09c041446b3 100644
--- a/vendor/github.com/knative/pkg/kmp/diff.go
+++ b/vendor/github.com/knative/pkg/kmp/diff.go
@@ -36,6 +36,8 @@ func init() {
// SafeDiff wraps cmp.Diff but recovers from panics and uses custom Comparers for:
// * k8s.io/apimachinery/pkg/api/resource.Quantity
+// SafeDiff should be used instead of cmp.Diff in non-test code to protect the running
+// process from crashing.
func SafeDiff(x, y interface{}, opts ...cmp.Option) (diff string, err error) {
// cmp.Diff will panic if we miss something; return error instead of crashing.
defer func() {
@@ -50,6 +52,10 @@ func SafeDiff(x, y interface{}, opts ...cmp.Option) (diff string, err error) {
return
}
+// SafeEqual wraps cmp.Equal but recovers from panics and uses custom Comparers for:
+// * k8s.io/apimachinery/pkg/api/resource.Quantity
+// SafeEqual should be used instead of cmp.Equal in non-test code to protect the running
+// process from crashing.
func SafeEqual(x, y interface{}, opts ...cmp.Option) (equal bool, err error) {
// cmp.Equal will panic if we miss something; return error instead of crashing.
defer func() {
@@ -63,3 +69,24 @@ func SafeEqual(x, y interface{}, opts ...cmp.Option) (equal bool, err error) {
return
}
+
+// CompareSetFields returns a list of field names that differ between
+// x and y. Uses SafeEqual for comparison.
+func CompareSetFields(x, y interface{}, opts ...cmp.Option) ([]string, error) {
+ r := new(FieldListReporter)
+ opts = append(opts, cmp.Reporter(r))
+ _, err := SafeEqual(x, y, opts...)
+ return r.Fields(), err
+}
+
+// ShortDiff returns a zero-context, unified human-readable diff.
+// Uses SafeEqual for comparison.
+func ShortDiff(prev, cur interface{}, opts ...cmp.Option) (string, error) {
+ r := new(ShortDiffReporter)
+ opts = append(opts, cmp.Reporter(r))
+ var err error
+ if _, err = SafeEqual(prev, cur, opts...); err != nil {
+ return "", err
+ }
+ return r.Diff()
+}
diff --git a/vendor/github.com/knative/pkg/kmp/reporters.go b/vendor/github.com/knative/pkg/kmp/reporters.go
new file mode 100644
index 00000000000..e09cf2f37a6
--- /dev/null
+++ b/vendor/github.com/knative/pkg/kmp/reporters.go
@@ -0,0 +1,136 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kmp
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+// FieldListReporter implements the cmp.Reporter interface. It keeps
+// track of the field names that differ between two structs and reports
+// them through the Fields() function.
+type FieldListReporter struct {
+ path cmp.Path
+ fieldNames []string
+}
+
+// PushStep implements the cmp.Reporter.
+func (r *FieldListReporter) PushStep(ps cmp.PathStep) {
+ r.path = append(r.path, ps)
+}
+
+// fieldName returns a readable name for the field. If the field has JSON annotations it
+// returns the JSON key. If the field does not have JSON annotations or the JSON annotation
+// marks the field as ignored it returns the field's go name
+func (r *FieldListReporter) fieldName() string {
+ if len(r.path) < 2 {
+ return r.path.Index(0).String()
+ } else {
+ fieldName := strings.TrimPrefix(r.path.Index(1).String(), ".")
+ // Prefer JSON name to fieldName if it exists
+ structField, exists := r.path.Index(0).Type().FieldByName(fieldName)
+ if exists {
+ tag := structField.Tag.Get("json")
+ if tag != "" && tag != "-" {
+ return strings.SplitN(tag, ",", 2)[0]
+ }
+
+ }
+ return fieldName
+ }
+}
+
+// Report implements the cmp.Reporter.
+func (r *FieldListReporter) Report(rs cmp.Result) {
+ if rs.Equal() {
+ return
+ }
+ name := r.fieldName()
+ // Only append elements we don't already have.
+ for _, v := range r.fieldNames {
+ if name == v {
+ return
+ }
+ }
+ r.fieldNames = append(r.fieldNames, name)
+}
+
+// PopStep implements cmp.Reporter.
+func (r *FieldListReporter) PopStep() {
+ r.path = r.path[:len(r.path)-1]
+}
+
+// Fields returns the field names that differed between the two
+// objects after calling cmp.Equal with the FieldListReporter. Field names
+// are returned in alphabetical order.
+func (r *FieldListReporter) Fields() []string {
+ sort.Strings(r.fieldNames)
+ return r.fieldNames
+}
+
+// ShortDiffReporter implements the cmp.Reporter interface. It reports
+// on fields which have diffing values in a short zero-context, unified diff
+// format.
+type ShortDiffReporter struct {
+ path cmp.Path
+ diffs []string
+ err error
+}
+
+// PushStep implements the cmp.Reporter.
+func (r *ShortDiffReporter) PushStep(ps cmp.PathStep) {
+ r.path = append(r.path, ps)
+}
+
+// Report implements the cmp.Reporter.
+func (r *ShortDiffReporter) Report(rs cmp.Result) {
+ if rs.Equal() {
+ return
+ }
+ cur := r.path.Last()
+ vx, vy := cur.Values()
+ t := cur.Type()
+ var diff string
+ // Prefix struct values with the types to add clarity in output
+ if !vx.IsValid() || !vy.IsValid() {
+ r.err = fmt.Errorf("Unable to diff %+v and %+v on path %#v", vx, vy, r.path)
+ } else if t.Kind() == reflect.Struct {
+ diff = fmt.Sprintf("%#v:\n\t-: %+v: \"%+v\"\n\t+: %+v: \"%+v\"\n", r.path, t, vx, t, vy)
+ } else {
+ diff = fmt.Sprintf("%#v:\n\t-: \"%+v\"\n\t+: \"%+v\"\n", r.path, vx, vy)
+ }
+ r.diffs = append(r.diffs, diff)
+}
+
+// PopStep implements the cmp.Reporter.
+func (r *ShortDiffReporter) PopStep() {
+ r.path = r.path[:len(r.path)-1]
+}
+
+// Diff returns the generated short diff for this object.
+// cmp.Equal should be called before this method.
+func (r *ShortDiffReporter) Diff() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ return strings.Join(r.diffs, ""), nil
+}
diff --git a/vendor/github.com/knative/pkg/test/clients.go b/vendor/github.com/knative/pkg/test/clients.go
index edae5e27cdb..fbd9e65836a 100644
--- a/vendor/github.com/knative/pkg/test/clients.go
+++ b/vendor/github.com/knative/pkg/test/clients.go
@@ -95,9 +95,9 @@ func (client *KubeClient) CreatePod(pod *corev1.Pod) (*corev1.Pod, error) {
return pods.Create(pod)
}
-// PodLogs returns Pod logs for given Pod and Container
-func (client *KubeClient) PodLogs(podName, containerName string) ([]byte, error) {
- pods := client.Kube.CoreV1().Pods(Flags.Namespace)
+// PodLogs returns Pod logs for given Pod and Container in the namespace
+func (client *KubeClient) PodLogs(podName, containerName, namespace string) ([]byte, error) {
+ pods := client.Kube.CoreV1().Pods(namespace)
podList, err := pods.List(metav1.ListOptions{})
if err != nil {
return nil, err
diff --git a/vendor/github.com/knative/pkg/test/kube_checks.go b/vendor/github.com/knative/pkg/test/kube_checks.go
index ed988334dc3..1b78914f5e7 100644
--- a/vendor/github.com/knative/pkg/test/kube_checks.go
+++ b/vendor/github.com/knative/pkg/test/kube_checks.go
@@ -89,9 +89,9 @@ func DeploymentScaledToZeroFunc() func(d *appsv1.Deployment) (bool, error) {
// WaitForLogContent waits until logs for given Pod/Container include the given content.
// If the content is not present within timeout it returns error.
-func WaitForLogContent(client *KubeClient, podName, containerName, content string) error {
+func WaitForLogContent(client *KubeClient, podName, containerName, namespace, content string) error {
return wait.PollImmediate(interval, logTimeout, func() (bool, error) {
- logs, err := client.PodLogs(podName, containerName)
+ logs, err := client.PodLogs(podName, containerName, namespace)
if err != nil {
return true, err
}
@@ -104,12 +104,29 @@ func WaitForAllPodsRunning(client *KubeClient, namespace string) error {
return WaitForPodListState(client, PodsRunning, "PodsAreRunning", namespace)
}
+// WaitForPodRunning waits for the given pod to be in running state
+func WaitForPodRunning(client *KubeClient, name string, namespace string) error {
+ p := client.Kube.CoreV1().Pods(namespace)
+ return wait.PollImmediate(interval, podTimeout, func() (bool, error) {
+ p, err := p.Get(name, metav1.GetOptions{})
+ if err != nil {
+ return true, err
+ }
+ return PodRunning(p), nil
+ })
+}
+
// PodsRunning will check the status conditions of the pod list and return true all pods are Running
func PodsRunning(podList *corev1.PodList) (bool, error) {
for _, pod := range podList.Items {
- if pod.Status.Phase != corev1.PodRunning && pod.Status.Phase != corev1.PodSucceeded {
+ if isRunning := PodRunning(&pod); !isRunning {
return false, nil
}
}
return true, nil
}
+
+// PodRunning will check the status conditions of the pod and return true if it's Running
+func PodRunning(pod *corev1.Pod) bool {
+ return pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded
+}
diff --git a/vendor/github.com/knative/pkg/test/spoof/error_checks.go b/vendor/github.com/knative/pkg/test/spoof/error_checks.go
index 9582799ae3c..0cd2995ca2a 100644
--- a/vendor/github.com/knative/pkg/test/spoof/error_checks.go
+++ b/vendor/github.com/knative/pkg/test/spoof/error_checks.go
@@ -20,7 +20,6 @@ package spoof
import (
"net"
- "net/url"
"strings"
)
@@ -30,14 +29,15 @@ func isTCPTimeout(e error) bool {
}
func isDNSError(err error) bool {
- if err, ok := err.(*url.Error); err != nil && ok {
- if err, ok := err.Err.(*net.OpError); err != nil && ok {
- if err, ok := err.Err.(*net.DNSError); err != nil && ok {
- return true
- }
- }
+ if err == nil {
+ return false
}
- return false
+ // Checking by casting to url.Error and casting the nested error
+ // seems to be not as robust as string check.
+ msg := strings.ToLower(err.Error())
+ // Example error message:
+ // > Get http://this.url.does.not.exist: dial tcp: lookup this.url.does.not.exist on 127.0.0.1:53: no such host
+ return strings.Contains(msg, "no such host") || strings.Contains(msg, ":53")
}
func isTCPConnectRefuse(err error) bool {
diff --git a/vendor/github.com/knative/pkg/webhook/webhook.go b/vendor/github.com/knative/pkg/webhook/webhook.go
index 1ecbcc363e2..cd57f176440 100644
--- a/vendor/github.com/knative/pkg/webhook/webhook.go
+++ b/vendor/github.com/knative/pkg/webhook/webhook.go
@@ -206,18 +206,12 @@ func getOrGenerateKeyCertsFromSecret(ctx context.Context, client kubernetes.Inte
return serverKey, serverCert, caCert, nil
}
-// validate checks whether "new" and "old" implement HasImmutableFields and checks them,
-// it then delegates validation to apis.Validatable on "new".
-func validate(ctx context.Context, old, new GenericCRD) error {
- if old != nil {
- // Copy the old object and set defaults so that we don't reject our own
- // defaulting done earlier in the webhook.
- old = old.DeepCopyObject().(GenericCRD)
- old.SetDefaults(ctx)
-
- ctx = apis.WithinUpdate(ctx, old)
-
- // TODO(mattmoor): Remove this.
+// validate performs validation on the provided "new" CRD.
+// For legacy purposes, this also does apis.Immutable validation,
+// which is deprecated and will be removed in a future release.
+func validate(ctx context.Context, new GenericCRD) error {
+ if apis.IsInUpdate(ctx) {
+ old := apis.GetBaseline(ctx)
if immutableNew, ok := new.(apis.Immutable); ok {
immutableOld, ok := old.(apis.Immutable)
if !ok {
@@ -227,8 +221,6 @@ func validate(ctx context.Context, old, new GenericCRD) error {
return err
}
}
- } else {
- ctx = apis.WithinCreate(ctx)
}
// Can't just `return new.Validate()` because it doesn't properly nil-check.
@@ -550,7 +542,20 @@ func (ac *AdmissionController) mutate(ctx context.Context, req *admissionv1beta1
patches = append(patches, rtp...)
}
+ // Set up the context for defaulting and validation
+ if oldObj != nil {
+ // Copy the old object and set defaults so that we don't reject our own
+ // defaulting done earlier in the webhook.
+ oldObj = oldObj.DeepCopyObject().(GenericCRD)
+ oldObj.SetDefaults(ctx)
+
+ ctx = apis.WithinUpdate(ctx, oldObj)
+ } else {
+ ctx = apis.WithinCreate(ctx)
+ }
ctx = apis.WithUserInfo(ctx, &req.UserInfo)
+
+ // Default the new object.
if patches, err = setDefaults(ctx, patches, newObj); err != nil {
logger.Errorw("Failed the resource specific defaulter", zap.Error(err))
// Return the error message as-is to give the defaulter callback
@@ -562,7 +567,7 @@ func (ac *AdmissionController) mutate(ctx context.Context, req *admissionv1beta1
if newObj == nil {
return nil, errMissingNewObject
}
- if err := validate(ctx, oldObj, newObj); err != nil {
+ if err := validate(ctx, newObj); err != nil {
logger.Errorw("Failed the resource specific validation", zap.Error(err))
// Return the error message as-is to give the validation callback
// discretion over (our portion of) the message that the user sees.
From f8317dd0ead16253fcea1a61a749bb228708f117 Mon Sep 17 00:00:00 2001
From: akashrv <45154452+akashrv@users.noreply.github.com>
Date: Mon, 15 Apr 2019 15:27:00 -0700
Subject: [PATCH 22/76] Remove Istio dependency from Eventing (Part - 1)
(#1044)
* WIP
* WIP - In-memory working with E2E tests
* WIP - remove istio dependency from in-memroy channel
* UTs pass, E2E tests pass with in-memory as well as kafka
* fixed uts that failed due to last K8s service change
* Removed unnecessary space from a line
* dding istio annotation to test POD. This will ve needed when running E2E
tests against channels other than in-memory
* Bug fix to set clusterIp of K8s service only when it is not of type ExternalName
* Updated code based on PR comments
* Updates based on PR comments
* Updates based on PR comments
* Fixed UTs
* Updated VENDOR_LICENSE
---
cmd/broker/ingress/main.go | 3 +-
cmd/controller/main.go | 16 +-
cmd/fanoutsidecar/main.go | 152 +++---
.../in-memory-channel/in-memory-channel.yaml | 42 +-
contrib/kafka/cmd/controller/main.go | 12 +-
contrib/kafka/main.go | 12 +-
.../reconcile_test.go | 2 +-
pkg/channelwatcher/channel_watcher.go | 59 +++
pkg/provisioners/channel_util.go | 74 ++-
pkg/provisioners/channel_util_test.go | 34 +-
.../inmemory/channel/controller.go | 29 +-
.../inmemory/channel/reconcile.go | 147 +-----
.../inmemory/channel/reconcile_test.go | 440 +-----------------
.../clusterchannelprovisioner/reconcile.go | 12 +-
.../reconcile_test.go | 67 ++-
pkg/provisioners/inmemory/controller/main.go | 2 +
pkg/provisioners/provisioner_util.go | 30 +-
pkg/provisioners/provisioner_util_test.go | 2 +-
.../v1alpha1/broker/resources/ingress.go | 2 +
.../multi_channel_fanout_handler.go | 24 +-
.../multi_channel_fanout_handler_test.go | 49 +-
pkg/sidecar/swappable/swappable_test.go | 24 +-
test/crd.go | 3 +
third_party/VENDOR-LICENSE | 34 --
24 files changed, 423 insertions(+), 848 deletions(-)
create mode 100644 pkg/channelwatcher/channel_watcher.go
diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go
index ea0094fba73..5317d44320c 100644
--- a/cmd/broker/ingress/main.go
+++ b/cmd/broker/ingress/main.go
@@ -42,10 +42,11 @@ import (
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.uber.org/zap"
- _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
crlog "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+ // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
+ // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
var (
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index a508d7092a9..82851fc1bce 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -24,20 +24,13 @@ import (
"os"
"time"
+ eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/logconfig"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/channel"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/namespace"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/subscription"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/manager"
-
- // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
- _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
-
- eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "github.com/knative/eventing/pkg/logconfig"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"github.com/knative/pkg/configmap"
"github.com/knative/pkg/logging"
@@ -46,9 +39,14 @@ import (
"github.com/knative/pkg/system"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
controllerruntime "sigs.k8s.io/controller-runtime/pkg/client/config"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+ // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
+ // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
const (
diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go
index 59e8ce8892b..370289ffb5e 100644
--- a/cmd/fanoutsidecar/main.go
+++ b/cmd/fanoutsidecar/main.go
@@ -25,49 +25,47 @@ import (
"fmt"
"log"
"net/http"
- "strings"
"time"
- "github.com/knative/eventing/pkg/sidecar/configmap/filesystem"
- "github.com/knative/eventing/pkg/sidecar/configmap/watcher"
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/channelwatcher"
+ "github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/sidecar/fanout"
+ "github.com/knative/eventing/pkg/sidecar/multichannelfanout"
"github.com/knative/eventing/pkg/sidecar/swappable"
- "github.com/knative/eventing/pkg/utils"
- "github.com/knative/pkg/system"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
- "k8s.io/client-go/kubernetes"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
-)
-
-const (
- defaultConfigMapName = "in-memory-channel-dispatcher-config-map"
-
- // The following are the only valid values of the config_map_noticer flag.
- cmnfVolume = "volume"
- cmnfWatcher = "watcher"
+ // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
+ // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
var (
readTimeout = 1 * time.Minute
writeTimeout = 1 * time.Minute
- port int
- configMapNoticer string
- configMapNamespace string
- configMapName string
+ port int
+ channelProvisioners listFlags
)
-func init() {
- flag.IntVar(&port, "sidecar_port", -1, "The port to run the sidecar on.")
- flag.StringVar(&configMapNoticer, "config_map_noticer", "", fmt.Sprintf("The system to notice changes to the ConfigMap. Valid values are: %s", configMapNoticerValues()))
- flag.StringVar(&configMapNamespace, "config_map_namespace", system.Namespace(), "The namespace of the ConfigMap that is watched for configuration.")
- flag.StringVar(&configMapName, "config_map_name", defaultConfigMapName, "The name of the ConfigMap that is watched for configuration.")
+type listFlags []string
+
+func (l *listFlags) String() string {
+ return ""
+}
+func (l *listFlags) Set(value string) error {
+ *l = append(*l, value)
+ return nil
}
-func configMapNoticerValues() string {
- return strings.Join([]string{cmnfVolume, cmnfWatcher}, ", ")
+func init() {
+ flag.IntVar(&port, "sidecar_port", -1, "The port to run the sidecar on.")
+ flag.Var(&channelProvisioners, "channel_provisioner", "The provisioner of the channels that will be watched.")
}
func main() {
@@ -84,14 +82,18 @@ func main() {
logger.Fatal("--sidecar_port flag must be set")
}
+ if len(channelProvisioners) < 1 {
+ logger.Fatal("--channel_provisioner must be specified")
+ }
+
sh, err := swappable.NewEmptyHandler(logger)
if err != nil {
logger.Fatal("Unable to create swappable.Handler", zap.Error(err))
}
- mgr, err := setupConfigMapNoticer(logger, sh.UpdateConfig)
+ mgr, err := setupChannelWatcher(logger, sh.UpdateConfig)
if err != nil {
- logger.Fatal("Unable to create configMap noticer.", zap.Error(err))
+ logger.Fatal("Unable to create channel watcher.", zap.Error(err))
}
s := &http.Server{
@@ -125,57 +127,87 @@ func main() {
}
}
-func setupConfigMapNoticer(logger *zap.Logger, configUpdated swappable.UpdateConfig) (manager.Manager, error) {
+func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfig) (manager.Manager, error) {
mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{})
if err != nil {
- logger.Error("Error starting manager.", zap.Error(err))
+ logger.Error("Error creating new maanger.", zap.Error(err))
return nil, err
}
-
- switch configMapNoticer {
- case cmnfVolume:
- err = setupConfigMapVolume(logger, mgr, configUpdated)
- case cmnfWatcher:
- err = setupConfigMapWatcher(logger, mgr, configUpdated)
- default:
- err = fmt.Errorf("need to provide the --config_map_noticer flag (valid values are %s)", configMapNoticerValues())
- }
- if err != nil {
+ if err = v1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
+ logger.Error("Error while adding eventing scheme to manager.", zap.Error(err))
return nil, err
}
+ channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated))
return mgr, nil
}
-func setupConfigMapVolume(logger *zap.Logger, mgr manager.Manager, configUpdated swappable.UpdateConfig) error {
- cmn, err := filesystem.NewConfigMapWatcher(logger, filesystem.ConfigDir, configUpdated)
- if err != nil {
- logger.Error("Unable to create filesystem.ConifgMapWatcher", zap.Error(err))
- return err
- }
- if err = mgr.Add(cmn); err != nil {
- logger.Error("Unable to add the config map watcher", zap.Error(err))
- return err
+func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc {
+ return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error {
+ channels, err := listAllChannels(ctx, c)
+ if err != nil {
+ logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err))
+ return err
+ }
+ config := multiChannelFanoutConfig(channels)
+ return updateConfig(config)
}
- return nil
}
-func setupConfigMapWatcher(logger *zap.Logger, mgr manager.Manager, configUpdated swappable.UpdateConfig) error {
- kc, err := kubernetes.NewForConfig(mgr.GetConfig())
- if err != nil {
- return err
+func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) {
+ channels := make([]v1alpha1.Channel, 0)
+ for {
+ cl := &v1alpha1.ChannelList{}
+ opts := &client.ListOptions{
+ // Set Raw because if we need to get more than one page, then we will put the continue token
+ // into opts.Raw.Continue.
+ Raw: &metav1.ListOptions{},
+ }
+ if err := c.List(ctx, opts, cl); err != nil {
+ return nil, err
+ }
+ for _, c := range cl.Items {
+ if c.Status.IsReady() && shouldWatch(&c) {
+ channels = append(channels, c)
+ }
+ }
+ if cl.Continue != "" {
+ opts.Raw.Continue = cl.Continue
+ } else {
+ return channels, nil
+ }
}
+}
- cmw, err := watcher.NewWatcher(logger, kc, configMapNamespace, configMapName, configUpdated)
- if err != nil {
- return err
+func shouldWatch(ch *v1alpha1.Channel) bool {
+ if ch.Spec.Provisioner != nil && ch.Spec.Provisioner.Namespace == "" {
+ for _, v := range channelProvisioners {
+ if v == ch.Spec.Provisioner.Name {
+ return true
+ }
+ }
}
+ return false
+}
- if err = mgr.Add(utils.NewBlockingStart(logger, cmw)); err != nil {
- logger.Error("Unable to add the config map watcher", zap.Error(err))
- return err
+func multiChannelFanoutConfig(channels []v1alpha1.Channel) *multichannelfanout.Config {
+ cc := make([]multichannelfanout.ChannelConfig, 0)
+ for _, c := range channels {
+ channelConfig := multichannelfanout.ChannelConfig{
+ Namespace: c.Namespace,
+ Name: c.Name,
+ HostName: c.Status.Address.Hostname,
+ }
+ if c.Spec.Subscribable != nil {
+ channelConfig.FanoutConfig = fanout.Config{
+ Subscriptions: c.Spec.Subscribable.Subscribers,
+ }
+ }
+ cc = append(cc, channelConfig)
+ }
+ return &multichannelfanout.Config{
+ ChannelConfigs: cc,
}
- return nil
}
// runnableServer is a small wrapper around http.Server so that it matches the manager.Runnable
diff --git a/config/provisioners/in-memory-channel/in-memory-channel.yaml b/config/provisioners/in-memory-channel/in-memory-channel.yaml
index e0191da4081..d1b30298273 100644
--- a/config/provisioners/in-memory-channel/in-memory-channel.yaml
+++ b/config/provisioners/in-memory-channel/in-memory-channel.yaml
@@ -62,7 +62,6 @@ rules:
- apiGroups:
- "" # Core API group.
resources:
- - configmaps
- services
verbs:
- get
@@ -83,24 +82,6 @@ rules:
- services
verbs:
- update
- - apiGroups:
- - "" # Core API Group.
- resources:
- - configmaps
- resourceNames:
- - in-memory-channel-dispatcher-config-map
- verbs:
- - update
- - apiGroups:
- - networking.istio.io
- resources:
- - virtualservices
- verbs:
- - get
- - list
- - watch
- - create
- - update
- apiGroups:
- "" # Core API Group.
resources:
@@ -168,9 +149,10 @@ metadata:
name: in-memory-channel-dispatcher
rules:
- apiGroups:
- - "" # Core API group.
+ - "eventing.knative.dev"
resources:
- - configmaps
+ - "channels"
+ - "channels/status"
verbs:
- get
- list
@@ -216,24 +198,10 @@ spec:
image: github.com/knative/eventing/cmd/fanoutsidecar
args:
- --sidecar_port=8080
- - --config_map_noticer=watcher
- - --config_map_namespace=knative-eventing
- - --config_map_name=in-memory-channel-dispatcher-config-map
+ - --channel_provisioner=in-memory
+ - --channel_provisioner=in-memory-channel
env:
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
-
----
-
-# Create the ConfigMap, because if we don't the dispatcher will flap when it first comes online and
-# this can cause the integration tests to fail.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: in-memory-channel-dispatcher-config-map
- namespace: knative-eventing
-data:
- multiChannelFanoutConfig: '{}'
diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go
index 37e45a43349..375361f4af3 100644
--- a/contrib/kafka/cmd/controller/main.go
+++ b/contrib/kafka/cmd/controller/main.go
@@ -4,20 +4,20 @@ import (
"flag"
"os"
+ provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller"
+ "github.com/knative/eventing/contrib/kafka/pkg/controller/channel"
+ eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/provisioners"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
- _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
-
- provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller"
- "github.com/knative/eventing/contrib/kafka/pkg/controller/channel"
- eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "github.com/knative/eventing/pkg/provisioners"
+ // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
+ // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
// SchemeFunc adds types to a Scheme.
diff --git a/contrib/kafka/main.go b/contrib/kafka/main.go
index 316f2dbd521..ed98481c20b 100644
--- a/contrib/kafka/main.go
+++ b/contrib/kafka/main.go
@@ -4,20 +4,20 @@ import (
"flag"
"os"
+ provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller"
+ "github.com/knative/eventing/contrib/kafka/pkg/controller/channel"
+ eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/provisioners"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
- _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
-
- provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller"
- "github.com/knative/eventing/contrib/kafka/pkg/controller/channel"
- eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "github.com/knative/eventing/pkg/provisioners"
+ // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
+ // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
const (
diff --git a/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go b/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go
index e2f043d9231..57a70ade635 100644
--- a/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go
+++ b/contrib/natss/pkg/controller/clusterchannelprovisioner/reconcile_test.go
@@ -254,7 +254,7 @@ func makeK8sService() *corev1.Service {
Selector: provisioners.DispatcherLabels(Name),
Ports: []corev1.ServicePort{
{
- Name: "http",
+ Protocol: corev1.ProtocolTCP,
Port: 80,
TargetPort: intstr.FromInt(8080),
},
diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go
new file mode 100644
index 00000000000..b9a77670ce8
--- /dev/null
+++ b/pkg/channelwatcher/channel_watcher.go
@@ -0,0 +1,59 @@
+package channelwatcher
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/logging"
+ "go.uber.org/zap"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error
+
+type reconciler struct {
+ client client.Client
+ logger *zap.Logger
+ handler WatchHandlerFunc
+}
+
+func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
+ ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", req)))
+ logging.FromContext(ctx).Info("New update for channel.")
+ if err := r.handler(ctx, r.client, req.NamespacedName); err != nil {
+ logging.FromContext(ctx).Error("WatchHandlerFunc returned error", zap.Error(err))
+ return reconcile.Result{}, err
+ }
+ return reconcile.Result{}, nil
+}
+
+func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) error {
+ c, err := controller.New("ChannelWatcher", mgr, controller.Options{
+ Reconciler: &reconciler{
+ client: mgr.GetClient(),
+ logger: logger,
+ handler: watchHandler,
+ },
+ })
+ if err != nil {
+ logger.Error("Unable to create controller for channelwatcher.", zap.Error(err))
+ return err
+ }
+
+ // Watch Channels.
+ err = c.Watch(&source.Kind{
+ Type: &v1alpha1.Channel{},
+ }, &handler.EnqueueRequestForObject{})
+ if err != nil {
+ logger.Error("Unable to watch Channels.", zap.Error(err), zap.Any("type", &v1alpha1.Channel{}))
+ return err
+ }
+ return nil
+}
diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go
index a4261fe8a1e..497dae5adc8 100644
--- a/pkg/provisioners/channel_util.go
+++ b/pkg/provisioners/channel_util.go
@@ -41,9 +41,14 @@ const (
// AddFinalizerResult is used indicate whether a finalizer was added or already present.
type AddFinalizerResult bool
+// RemoveFinalizerResult is used to indicate whether a finalizer was found and removed (FinalizerRemoved), or finalizer not found (FinalizerNotFound).
+type RemoveFinalizerResult bool
+
const (
- FinalizerAlreadyPresent AddFinalizerResult = false
- FinalizerAdded AddFinalizerResult = true
+ FinalizerAlreadyPresent AddFinalizerResult = false
+ FinalizerAdded AddFinalizerResult = true
+ FinalizerRemoved RemoveFinalizerResult = true
+ FinalizerNotFound RemoveFinalizerResult = false
)
// AddFinalizer adds finalizerName to the Object.
@@ -57,25 +62,47 @@ func AddFinalizer(o metav1.Object, finalizerName string) AddFinalizerResult {
return FinalizerAdded
}
-func RemoveFinalizer(o metav1.Object, finalizerName string) {
+func RemoveFinalizer(o metav1.Object, finalizerName string) RemoveFinalizerResult {
+ result := FinalizerNotFound
finalizers := sets.NewString(o.GetFinalizers()...)
- finalizers.Delete(finalizerName)
- o.SetFinalizers(finalizers.List())
+ if finalizers.Has(finalizerName) {
+ result = FinalizerRemoved
+ finalizers.Delete(finalizerName)
+ o.SetFinalizers(finalizers.List())
+ }
+ return result
+}
+
+// K8sServiceOption is a functional option that can modify the K8s Service in CreateK8sService
+type K8sServiceOption func(*corev1.Service) error
+
+// ExternalService is a functional option for CreateK8sService to create a K8s service of type ExternalName.
+func ExternalService(c *eventingv1alpha1.Channel) K8sServiceOption {
+ return func(svc *corev1.Service) error {
+ svc.Spec = corev1.ServiceSpec{
+ Type: corev1.ServiceTypeExternalName,
+ ExternalName: names.ServiceHostName(channelDispatcherServiceName(c.Spec.Provisioner.Name), system.Namespace()),
+ }
+ return nil
+ }
}
-func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel) (*corev1.Service, error) {
+func CreateK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel, opts ...K8sServiceOption) (*corev1.Service, error) {
getSvc := func() (*corev1.Service, error) {
return getK8sService(ctx, client, c)
}
- return createK8sService(ctx, client, getSvc, newK8sService(c))
+ svc, err := newK8sService(c, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return createK8sService(ctx, client, getSvc, svc)
}
func getK8sService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel) (*corev1.Service, error) {
list := &corev1.ServiceList{}
opts := &runtimeClient.ListOptions{
- Namespace: c.Namespace,
- // TODO After the full release start selecting on new set of labels by using k8sServiceLabels(c)
- LabelSelector: labels.SelectorFromSet(k8sOldServiceLabels(c)),
+ Namespace: c.Namespace,
+ LabelSelector: labels.SelectorFromSet(k8sServiceLabels(c)),
// Set Raw because if we need to get more than one page, then we will put the continue token
// into opts.Raw.Continue.
Raw: &metav1.ListOptions{},
@@ -107,12 +134,17 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g
} else if err != nil {
return nil, err
}
-
// spec.clusterIP is immutable and is set on existing services. If we don't set this
// to the same value, we will encounter an error while updating.
- svc.Spec.ClusterIP = current.Spec.ClusterIP
+ if svc.Spec.Type != corev1.ServiceTypeExternalName {
+ svc.Spec.ClusterIP = current.Spec.ClusterIP
+ }
if !equality.Semantic.DeepDerivative(svc.Spec, current.Spec) ||
- !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) {
+ !expectedLabelsPresent(current.ObjectMeta.Labels, svc.ObjectMeta.Labels) ||
+ // This DeepEqual is necessary to force update dispatcher services when upgrading from 0.5 to 0.6.
+ // Above DeepDerivative will not work because we have removed an optional field (name) from ports
+ // TODO: Remove this check in 0.7+
+ !equality.Semantic.DeepEqual(svc.Spec.Ports, current.Spec.Ports) {
current.Spec = svc.Spec
current.ObjectMeta.Labels = addExpectedLabels(current.ObjectMeta.Labels, svc.ObjectMeta.Labels)
err = client.Update(ctx, current)
@@ -248,8 +280,9 @@ func UpdateChannel(ctx context.Context, client runtimeClient.Client, u *eventing
// newK8sService creates a new Service for a Channel resource. It also sets the appropriate
// OwnerReferences on the resource so handleObject can discover the Channel resource that 'owns' it.
// As well as being garbage collected when the Channel is deleted.
-func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service {
- return &corev1.Service{
+func newK8sService(c *eventingv1alpha1.Channel, opts ...K8sServiceOption) (*corev1.Service, error) {
+ // Add annotations
+ svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: channelServiceName(c.ObjectMeta.Name),
Namespace: c.Namespace,
@@ -265,12 +298,19 @@ func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service {
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
- Name: PortName,
- Port: PortNumber,
+ Name: PortName,
+ Protocol: corev1.ProtocolTCP,
+ Port: PortNumber,
},
},
},
}
+ for _, opt := range opts {
+ if err := opt(svc); err != nil {
+ return nil, err
+ }
+ }
+ return svc, nil
}
// k8sOldServiceLabels returns a map with only old eventing channel and provisioner labels
diff --git a/pkg/provisioners/channel_util_test.go b/pkg/provisioners/channel_util_test.go
index 6aded3735d3..848dda5d8f3 100644
--- a/pkg/provisioners/channel_util_test.go
+++ b/pkg/provisioners/channel_util_test.go
@@ -404,6 +404,35 @@ func TestAddFinalizer(t *testing.T) {
}
}
+func TestRemoveFinalizer(t *testing.T) {
+ testCases := map[string]struct {
+ expected RemoveFinalizerResult
+ }{
+ "Finalizer not found": {
+ expected: false,
+ },
+ "Finalizer removed successfully": {
+ expected: true,
+ },
+ }
+ finalizer := "test-finalizer"
+ for n, tc := range testCases {
+ t.Run(n, func(t *testing.T) {
+ c := getNewChannel()
+ if tc.expected {
+ c.Finalizers = []string{finalizer}
+ } else {
+ c.Finalizers = []string{}
+ }
+ actual := RemoveFinalizer(c, finalizer)
+
+ if diff := cmp.Diff(actual, tc.expected); diff != "" {
+ t.Errorf("unexpected error (-want, +got) = %v", diff)
+ }
+ })
+ }
+}
+
func TestChannelNames(t *testing.T) {
testCases := []struct {
Name string
@@ -597,8 +626,9 @@ func makeK8sService() *corev1.Service {
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
- Name: PortName,
- Port: PortNumber,
+ Name: PortName,
+ Port: PortNumber,
+ Protocol: corev1.ProtocolTCP,
},
},
},
diff --git a/pkg/provisioners/inmemory/channel/controller.go b/pkg/provisioners/inmemory/channel/controller.go
index 7ff6128759a..88f0e96233f 100644
--- a/pkg/provisioners/inmemory/channel/controller.go
+++ b/pkg/provisioners/inmemory/channel/controller.go
@@ -18,11 +18,8 @@ package channel
import (
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
- "github.com/knative/pkg/system"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
@@ -33,18 +30,6 @@ const (
// controllerAgentName is the string used by this controller to identify
// itself when creating events.
controllerAgentName = "in-memory-channel-controller"
-
- // ConfigMapName is the name of the ConfigMap in the knative-eventing namespace that contains
- // the subscription information for all in-memory Channels. The Provisioner writes to it and the
- // Dispatcher reads from it.
- ConfigMapName = "in-memory-channel-dispatcher-config-map"
-)
-
-var (
- defaultConfigMapKey = types.NamespacedName{
- Namespace: system.Namespace(),
- Name: ConfigMapName,
- }
)
// ProvideController returns a Controller that represents the in-memory-channel Provisioner.
@@ -52,9 +37,8 @@ func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Cont
// Setup a new controller to Reconcile Channels that belong to this Cluster Provisioner
// (in-memory channels).
r := &reconciler{
- configMapKey: defaultConfigMapKey,
- recorder: mgr.GetRecorder(controllerAgentName),
- logger: logger,
+ recorder: mgr.GetRecorder(controllerAgentName),
+ logger: logger,
}
c, err := controller.New(controllerAgentName, mgr, controller.Options{
Reconciler: r,
@@ -82,14 +66,5 @@ func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Cont
return nil, err
}
- // Watch the VirtualServices that are owned by Channels.
- err = c.Watch(&source.Kind{
- Type: &istiov1alpha3.VirtualService{},
- }, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true})
- if err != nil {
- logger.Error("Unable to watch VirtualServices.", zap.Error(err))
- return nil, err
- }
-
return c, nil
}
diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go
index 53ed753244e..5d5f8392a82 100644
--- a/pkg/provisioners/inmemory/channel/reconcile.go
+++ b/pkg/provisioners/inmemory/channel/reconcile.go
@@ -21,7 +21,6 @@ import (
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
@@ -32,19 +31,14 @@ import (
util "github.com/knative/eventing/pkg/provisioners"
ccpcontroller "github.com/knative/eventing/pkg/provisioners/inmemory/clusterchannelprovisioner"
"github.com/knative/eventing/pkg/reconciler/names"
- "github.com/knative/eventing/pkg/sidecar/configmap"
- "github.com/knative/eventing/pkg/sidecar/fanout"
- "github.com/knative/eventing/pkg/sidecar/multichannelfanout"
)
const (
finalizerName = controllerAgentName
// Name of the corev1.Events emitted from the reconciliation process
- channelReconciled = "ChannelReconciled"
- channelUpdateStatusFailed = "ChannelUpdateStatusFailed"
- channelConfigSyncFailed = "ChannelConfigSyncFailed"
- k8sServiceCreateFailed = "K8sServiceCreateFailed"
- virtualServiceCreateFailed = "VirtualServiceCreateFailed"
+ channelReconciled = "ChannelReconciled"
+ channelUpdateStatusFailed = "ChannelUpdateStatusFailed"
+ k8sServiceCreateFailed = "K8sServiceCreateFailed"
// TODO after in-memory-channel is retired, asyncProvisionerName should be removed
defaultProvisionerName = "in-memory-channel"
)
@@ -53,8 +47,6 @@ type reconciler struct {
client client.Client
recorder record.EventRecorder
logger *zap.Logger
-
- configMapKey client.ObjectKey
}
// Verify the struct implements reconcile.Reconciler
@@ -93,8 +85,14 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
}
logger.Info("Reconciling Channel")
- // Modify a copy, not the original.
- c = c.DeepCopy()
+ // Finalizer needs to be removed (even though no finalizers are added) to maintain backwards compatibility
+ // with v0.5 in which a finalzier was added. Or else channels will not get deleted after upgrading to 0.6+
+ if result := util.RemoveFinalizer(c, finalizerName); result == util.FinalizerRemoved {
+ r.client.Update(ctx, c)
+ logger.Info("Channel reconciled. Finalizer Removed")
+ r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q. Finalizer removed.", c.Name)
+ return reconcile.Result{Requeue: true}, nil
+ }
err = r.reconcile(ctx, c)
if err != nil {
@@ -106,7 +104,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name)
}
- if updateStatusErr := util.UpdateChannel(ctx, r.client, c); updateStatusErr != nil {
+ if updateStatusErr := r.client.Status().Update(ctx, c); updateStatusErr != nil {
logger.Info("Error updating Channel Status", zap.Error(updateStatusErr))
r.recorder.Eventf(c, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update Channel's status: %v", err)
return reconcile.Result{}, updateStatusErr
@@ -128,135 +126,20 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
c.Status.InitializeConditions()
- // We are syncing three things:
- // 1. The K8s Service to talk to this Channel.
- // 2. The Istio VirtualService to talk to this Channel.
- // 3. The configuration of all Channel subscriptions.
-
- // We always need to sync the Channel config, so do it first.
- if err := r.syncChannelConfig(ctx); err != nil {
- logger.Info("Error syncing the Channel config", zap.Error(err))
- r.recorder.Eventf(c, corev1.EventTypeWarning, channelConfigSyncFailed, "Failed to sync Channel config: %v", err)
- return err
- }
-
- if c.DeletionTimestamp != nil {
- // K8s garbage collection will delete the K8s service and VirtualService for this channel.
- // We use a finalizer to ensure the channel config has been synced.
- util.RemoveFinalizer(c, finalizerName)
- return nil
- }
-
- util.AddFinalizer(c, finalizerName)
-
- svc, err := util.CreateK8sService(ctx, r.client, c)
+ // We are syncing K8s Service to talk to this Channel.
+ svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c))
if err != nil {
logger.Info("Error creating the Channel's K8s Service", zap.Error(err))
r.recorder.Eventf(c, corev1.EventTypeWarning, k8sServiceCreateFailed, "Failed to reconcile Channel's K8s Service: %v", err)
return err
}
- c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace))
- if c.Spec.Provisioner.Name == defaultProvisionerName {
- _, err = util.CreateVirtualService(ctx, r.client, c, svc)
- if err != nil {
- logger.Info("Error creating the Virtual Service for the Channel", zap.Error(err))
- r.recorder.Eventf(c, corev1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err)
- return err
- }
- } else {
- // We need to have a single dispatcher that is pointed at by _both_
- // ClusterChannelProvisioners. So fake the channel, by saying that its provisioner is the
- // one with the single dispatcher. The faked provisioner is used only to determine the
- // dispatcher Service's name.
- cCopy := c.DeepCopy()
- cCopy.Spec.Provisioner.Name = defaultProvisionerName
- _, err = util.CreateVirtualService(ctx, r.client, cCopy, svc)
- if err != nil {
- logger.Info("Error creating the Virtual Service for the Channel", zap.Error(err))
- r.recorder.Eventf(c, corev1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err)
- return err
- }
- }
+ c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace))
c.Status.MarkProvisioned()
return nil
}
-func (r *reconciler) syncChannelConfig(ctx context.Context) error {
- channels, err := r.listAllChannels(ctx)
- if err != nil {
- r.logger.Info("Unable to list channels", zap.Error(err))
- return err
- }
- config := multiChannelFanoutConfig(channels)
- return r.writeConfigMap(ctx, config)
-}
-
-func (r *reconciler) writeConfigMap(ctx context.Context, config *multichannelfanout.Config) error {
- logger := r.logger.With(zap.Any("configMap", r.configMapKey))
-
- updated, err := configmap.SerializeConfig(*config)
- if err != nil {
- r.logger.Error("Unable to serialize config", zap.Error(err), zap.Any("config", config))
- return err
- }
-
- cm := &corev1.ConfigMap{}
- err = r.client.Get(ctx, r.configMapKey, cm)
- if errors.IsNotFound(err) {
- cm = r.createNewConfigMap(updated)
- err = r.client.Create(ctx, cm)
- }
- if err != nil {
- logger.Info("Unable to get/create ConfigMap", zap.Error(err))
- return err
- }
-
- if equality.Semantic.DeepEqual(cm.Data, updated) {
- // Nothing to update.
- return nil
- }
-
- cm.Data = updated
- return r.client.Update(ctx, cm)
-}
-
-func (r *reconciler) createNewConfigMap(data map[string]string) *corev1.ConfigMap {
- return &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: r.configMapKey.Namespace,
- Name: r.configMapKey.Name,
- },
- Data: data,
- }
-}
-
-func multiChannelFanoutConfig(channels []eventingv1alpha1.Channel) *multichannelfanout.Config {
- cc := make([]multichannelfanout.ChannelConfig, 0)
- for _, c := range channels {
- channelConfig := multichannelfanout.ChannelConfig{
- Namespace: c.Namespace,
- Name: c.Name,
- }
- if c.Spec.Subscribable != nil {
- // TODO After in-memory-channel is retired, this logic must be refactored.
- asyncHandler := false
- if c.Spec.Provisioner.Name != defaultProvisionerName {
- asyncHandler = true
- }
- channelConfig.FanoutConfig = fanout.Config{
- Subscriptions: c.Spec.Subscribable.Subscribers,
- AsyncHandler: asyncHandler,
- }
- }
- cc = append(cc, channelConfig)
- }
- return &multichannelfanout.Config{
- ChannelConfigs: cc,
- }
-}
-
func (r *reconciler) listAllChannels(ctx context.Context) ([]eventingv1alpha1.Channel, error) {
channels := make([]eventingv1alpha1.Channel, 0)
diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go
index 30b9b2ac27b..0b221854a41 100644
--- a/pkg/provisioners/inmemory/channel/reconcile_test.go
+++ b/pkg/provisioners/inmemory/channel/reconcile_test.go
@@ -18,27 +18,27 @@ package channel
import (
"context"
- "encoding/json"
"errors"
"fmt"
"testing"
- "github.com/google/go-cmp/cmp"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
util "github.com/knative/eventing/pkg/provisioners"
+ "github.com/knative/eventing/pkg/reconciler/names"
controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
- "github.com/knative/eventing/pkg/sidecar/configmap"
"github.com/knative/eventing/pkg/sidecar/fanout"
"github.com/knative/eventing/pkg/sidecar/multichannelfanout"
"github.com/knative/eventing/pkg/utils"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ "github.com/knative/pkg/system"
_ "github.com/knative/pkg/system/testing"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
@@ -56,8 +56,6 @@ const (
cmName = "test-config-map"
testErrorMessage = "test induced error"
-
- insertedByVerifyConfigMapData = "data inserted by verifyConfigMapData so that it can be WantPresent"
)
var (
@@ -183,11 +181,9 @@ var (
// map of events to set test cases' expectations easier
events = map[string]corev1.Event{
- channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal},
- channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning},
- channelConfigSyncFailed: {Reason: channelConfigSyncFailed, Type: corev1.EventTypeWarning},
- k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning},
- virtualServiceCreateFailed: {Reason: virtualServiceCreateFailed, Type: corev1.EventTypeWarning},
+ channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal},
+ channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning},
+ k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning},
}
)
@@ -251,98 +247,28 @@ func TestReconcile(t *testing.T) {
},
},
{
- Name: "Channel deleted - Channel config sync fails",
- InitialState: []runtime.Object{
- makeDeletingChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: errorListingChannels(),
- },
- WantPresent: []runtime.Object{
- // Finalizer has not been removed.
- makeDeletingChannel(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[channelConfigSyncFailed],
- },
- },
- {
- Name: "Channel deleted - finalizer removed",
+ Name: "Channel has finalizer (to test back compat with version <= 0.5, when finalizers were added",
InitialState: []runtime.Object{
- makeDeletingChannel(),
+ makeChannelWithFinalizer(),
},
WantPresent: []runtime.Object{
- makeDeletingChannelWithoutFinalizer(),
- },
- WantEvent: []corev1.Event{
- events[channelReconciled],
- },
- },
- {
- Name: "Channel config sync fails - can't list Channels",
- InitialState: []runtime.Object{
- makeChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: errorListingChannels(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[channelConfigSyncFailed],
- },
- },
- {
- Name: "Channel config sync fails - can't get ConfigMap",
- InitialState: []runtime.Object{
makeChannel(),
},
- Mocks: controllertesting.Mocks{
- MockGets: errorGettingConfigMap(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[channelConfigSyncFailed],
- },
- },
- {
- Name: "Channel config sync fails - can't create ConfigMap",
- InitialState: []runtime.Object{
- makeChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: errorCreatingConfigMap(),
- },
- WantErrMsg: testErrorMessage,
WantEvent: []corev1.Event{
- events[channelConfigSyncFailed],
- },
- },
- {
- Name: "Channel config sync fails - can't update ConfigMap",
- InitialState: []runtime.Object{
- makeChannel(),
- makeConfigMap(),
- },
- Mocks: controllertesting.Mocks{
- MockUpdates: errorUpdatingConfigMap(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[channelConfigSyncFailed],
+ events[channelReconciled],
},
+ WantResult: reconcile.Result{Requeue: true},
},
{
Name: "K8s service get fails",
InitialState: []runtime.Object{
makeChannel(),
- makeConfigMap(),
},
Mocks: controllertesting.Mocks{
MockLists: errorListingK8sService(),
},
WantPresent: []runtime.Object{
- makeChannelWithFinalizer(),
+ makeChannel(),
},
WantErrMsg: testErrorMessage,
WantEvent: []corev1.Event{
@@ -353,99 +279,24 @@ func TestReconcile(t *testing.T) {
Name: "K8s service creation fails",
InitialState: []runtime.Object{
makeChannel(),
- makeConfigMap(),
},
Mocks: controllertesting.Mocks{
MockCreates: errorCreatingK8sService(),
},
WantPresent: []runtime.Object{
// TODO: This should have a useful error message saying that the K8s Service failed.
- makeChannelWithFinalizer(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[k8sServiceCreateFailed],
- },
- },
- {
- Name: "Virtual service get fails",
- InitialState: []runtime.Object{
makeChannel(),
- makeConfigMap(),
- makeK8sService(),
- makeVirtualService(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: errorListingVirtualService(),
- },
- WantPresent: []runtime.Object{
- // TODO: This should have a useful error message saying that the VirtualService
- // failed.
- makeChannelWithFinalizerAndAddress(),
},
WantErrMsg: testErrorMessage,
WantEvent: []corev1.Event{
- events[virtualServiceCreateFailed],
- },
- },
- {
- Name: "Virtual service creation fails",
- InitialState: []runtime.Object{
- makeChannel(),
- makeConfigMap(),
- makeK8sService(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: errorCreatingVirtualService(),
- },
- WantPresent: []runtime.Object{
- // TODO: This should have a useful error message saying that the VirtualService
- // failed.
- makeChannelWithFinalizerAndAddress(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[virtualServiceCreateFailed],
- },
- },
- {
- Name: "Channel get for update fails",
- InitialState: []runtime.Object{
- makeChannel(),
- makeConfigMap(),
- makeK8sService(),
- makeVirtualService(),
- },
- Mocks: controllertesting.Mocks{
- MockGets: errorOnSecondChannelGet(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[channelReconciled], events[channelUpdateStatusFailed],
+ events[k8sServiceCreateFailed],
},
},
{
- Name: "Channel update fails",
- InitialState: []runtime.Object{
- makeChannel(),
- makeConfigMap(),
- makeK8sService(),
- makeVirtualService(),
- },
- Mocks: controllertesting.Mocks{
- MockUpdates: errorUpdatingChannel(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[channelReconciled], events[channelUpdateStatusFailed],
- },
- }, {
Name: "Channel status update fails",
InitialState: []runtime.Object{
makeChannel(),
- makeConfigMap(),
makeK8sService(),
- makeVirtualService(),
},
Mocks: controllertesting.Mocks{
MockStatusUpdates: errorUpdatingChannelStatus(),
@@ -454,83 +305,14 @@ func TestReconcile(t *testing.T) {
WantEvent: []corev1.Event{
events[channelReconciled], events[channelUpdateStatusFailed],
},
- }, {
- Name: "Channel reconcile successful - Channel list follows pagination",
- InitialState: []runtime.Object{
- makeChannel(),
- makeConfigMap(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: (&paginatedChannelsListStruct{channels: channels}).MockLists(),
- // This is more accurate to be in WantPresent, but we need to check JSON equality,
- // not string equality, so it can't be done in WantPresent. Instead, we verify
- // during the update call, swapping out the data and WantPresent with that inserted
- // data.
- MockUpdates: verifyConfigMapData(channelsConfig),
- },
- WantPresent: []runtime.Object{
- makeReadyChannel(),
- makeK8sService(),
- makeVirtualService(),
- makeConfigMapWithVerifyConfigMapData(),
- },
- WantEvent: []corev1.Event{
- events[channelReconciled],
- },
- },
- {
- Name: "Channel reconcile successful - Channel has no subscribers",
- InitialState: []runtime.Object{
- makeChannel(),
- makeConfigMap(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: (&paginatedChannelsListStruct{channels: []eventingv1alpha1.Channel{
- {
- ObjectMeta: metav1.ObjectMeta{
- Namespace: "high-consul",
- Name: "duarte",
- },
- Spec: eventingv1alpha1.ChannelSpec{
- Provisioner: &corev1.ObjectReference{
- Name: ccpName,
- },
- },
- },
- }}).MockLists(),
- // This is more accurate to be in WantPresent, but we need to check JSON equality,
- // not string equality, so it can't be done in WantPresent. Instead, we verify
- // during the update call, swapping out the data and WantPresent with that inserted
- // data.
- MockUpdates: verifyConfigMapData(multichannelfanout.Config{
- ChannelConfigs: []multichannelfanout.ChannelConfig{
- {
- Namespace: "high-consul",
- Name: "duarte",
- },
- },
- }),
- },
- WantPresent: []runtime.Object{
- makeReadyChannel(),
- makeK8sService(),
- makeVirtualService(),
- makeConfigMapWithVerifyConfigMapData(),
- },
- WantEvent: []corev1.Event{
- events[channelReconciled],
- },
},
{
Name: "Channel reconcile successful - Async channel",
- // VirtualService should have channel provisioner name
- // defaults to in-memory-channel but the service should match provisioner's service name
InitialState: []runtime.Object{
makeChannel("in-memory"),
},
Mocks: controllertesting.Mocks{},
WantPresent: []runtime.Object{
- makeVirtualService(),
makeK8sService("in-memory"),
},
WantEvent: []corev1.Event{
@@ -539,14 +321,11 @@ func TestReconcile(t *testing.T) {
},
{
Name: "Channel reconcile successful - Non Async channel",
- // VirtualService should have channel provisioner name
- // defaults to in-memory-channel
InitialState: []runtime.Object{
makeChannel(),
},
Mocks: controllertesting.Mocks{},
WantPresent: []runtime.Object{
- makeVirtualService(),
makeK8sService(),
},
WantEvent: []corev1.Event{
@@ -556,17 +335,12 @@ func TestReconcile(t *testing.T) {
}
for _, tc := range testCases {
- configMapKey := types.NamespacedName{
- Namespace: cmNamespace,
- Name: cmName,
- }
c := tc.GetClient()
recorder := tc.GetEventRecorder()
r := &reconciler{
- client: c,
- recorder: recorder,
- logger: zap.NewNop(),
- configMapKey: configMapKey,
+ client: c,
+ recorder: recorder,
+ logger: zap.NewNop(),
}
if tc.ReconcileKey == "" {
tc.ReconcileKey = fmt.Sprintf("/%s", cName)
@@ -607,19 +381,6 @@ func getProvisionerName(pn []string) string {
return provisionerName
}
-func makeChannelWithFinalizerAndAddress() *eventingv1alpha1.Channel {
- c := makeChannelWithFinalizer()
- c.Status.SetAddress(serviceAddress)
- return c
-}
-
-func makeReadyChannel() *eventingv1alpha1.Channel {
- // Ready channels have the finalizer and are Addressable.
- c := makeChannelWithFinalizerAndAddress()
- c.Status.MarkProvisioned()
- return c
-}
-
func makeChannelNilProvisioner() *eventingv1alpha1.Channel {
c := makeChannel()
c.Spec.Provisioner = nil
@@ -644,38 +405,6 @@ func makeChannelWithFinalizer() *eventingv1alpha1.Channel {
return c
}
-func makeDeletingChannel() *eventingv1alpha1.Channel {
- c := makeChannelWithFinalizer()
- c.DeletionTimestamp = &deletionTime
- return c
-}
-
-func makeDeletingChannelWithoutFinalizer() *eventingv1alpha1.Channel {
- c := makeDeletingChannel()
- c.Finalizers = nil
- return c
-}
-
-func makeConfigMap() *corev1.ConfigMap {
- return &corev1.ConfigMap{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "v1",
- Kind: "ConfigMap",
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: cmNamespace,
- Name: cmName,
- },
- }
-}
-
-func makeConfigMapWithVerifyConfigMapData() *corev1.ConfigMap {
- cm := makeConfigMap()
- cm.Data = map[string]string{}
- cm.Data[configmap.MultiChannelFanoutConfigKey] = insertedByVerifyConfigMapData
- return cm
-}
-
func makeK8sService(pn ...string) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
@@ -703,60 +432,8 @@ func makeK8sService(pn ...string) *corev1.Service {
},
},
Spec: corev1.ServiceSpec{
- Ports: []corev1.ServicePort{
- {
- Name: util.PortName,
- Port: util.PortNumber,
- },
- },
- },
- }
-}
-
-func makeVirtualService() *istiov1alpha3.VirtualService {
- return &istiov1alpha3.VirtualService{
- TypeMeta: metav1.TypeMeta{
- APIVersion: istiov1alpha3.SchemeGroupVersion.String(),
- Kind: "VirtualService",
- },
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: fmt.Sprintf("%s-channel-", cName),
- Namespace: cNamespace,
- Labels: map[string]string{
- util.EventingChannelLabel: cName,
- util.OldEventingChannelLabel: cName,
- util.EventingProvisionerLabel: ccpName,
- util.OldEventingProvisionerLabel: ccpName,
- },
- OwnerReferences: []metav1.OwnerReference{
- {
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: "Channel",
- Name: cName,
- UID: cUID,
- Controller: &truePointer,
- BlockOwnerDeletion: &truePointer,
- },
- },
- },
- Spec: istiov1alpha3.VirtualServiceSpec{
- Hosts: []string{
- serviceAddress,
- fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()),
- },
- HTTP: []istiov1alpha3.HTTPRoute{{
- Rewrite: &istiov1alpha3.HTTPRewrite{
- Authority: fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()),
- },
- Route: []istiov1alpha3.DestinationWeight{{
- Destination: istiov1alpha3.Destination{
- Host: "in-memory-channel-dispatcher.knative-testing.svc." + utils.GetClusterDomainName(),
- Port: istiov1alpha3.PortSelector{
- Number: util.PortNumber,
- },
- }},
- }},
- },
+ ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", getProvisionerName(pn)), system.Namespace()),
+ Type: corev1.ServiceTypeExternalName,
},
}
}
@@ -780,18 +457,6 @@ func errorGettingChannel() []controllertesting.MockGet {
},
}
}
-
-func errorGettingConfigMap() []controllertesting.MockGet {
- return []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*corev1.ConfigMap); ok {
- return controllertesting.Handled, errors.New(testErrorMessage)
- }
- return controllertesting.Unhandled, nil
- },
- }
-}
-
func errorListingK8sService() []controllertesting.MockList {
return []controllertesting.MockList{
func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) {
@@ -803,17 +468,6 @@ func errorListingK8sService() []controllertesting.MockList {
}
}
-func errorListingVirtualService() []controllertesting.MockList {
- return []controllertesting.MockList{
- func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*istiov1alpha3.VirtualServiceList); ok {
- return controllertesting.Handled, errors.New(testErrorMessage)
- }
- return controllertesting.Unhandled, nil
- },
- }
-}
-
func errorListingChannels() []controllertesting.MockList {
return []controllertesting.MockList{
func(client.Client, context.Context, *client.ListOptions, runtime.Object) (controllertesting.MockHandled, error) {
@@ -822,17 +476,6 @@ func errorListingChannels() []controllertesting.MockList {
}
}
-func errorCreatingConfigMap() []controllertesting.MockCreate {
- return []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*corev1.ConfigMap); ok {
- return controllertesting.Handled, errors.New(testErrorMessage)
- }
- return controllertesting.Unhandled, nil
- },
- }
-}
-
func errorCreatingK8sService() []controllertesting.MockCreate {
return []controllertesting.MockCreate{
func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
@@ -844,17 +487,6 @@ func errorCreatingK8sService() []controllertesting.MockCreate {
}
}
-func errorCreatingVirtualService() []controllertesting.MockCreate {
- return []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*istiov1alpha3.VirtualService); ok {
- return controllertesting.Handled, errors.New(testErrorMessage)
- }
- return controllertesting.Unhandled, nil
- },
- }
-}
-
func errorUpdatingChannel() []controllertesting.MockUpdate {
return []controllertesting.MockUpdate{
func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
@@ -877,17 +509,6 @@ func errorUpdatingChannelStatus() []controllertesting.MockStatusUpdate {
}
}
-func errorUpdatingConfigMap() []controllertesting.MockUpdate {
- return []controllertesting.MockUpdate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*corev1.ConfigMap); ok {
- return controllertesting.Handled, errors.New(testErrorMessage)
- }
- return controllertesting.Unhandled, nil
- },
- }
-}
-
type paginatedChannelsListStruct struct {
channels []eventingv1alpha1.Channel
}
@@ -911,28 +532,3 @@ func (p *paginatedChannelsListStruct) MockLists() []controllertesting.MockList {
},
}
}
-
-func verifyConfigMapData(expected multichannelfanout.Config) []controllertesting.MockUpdate {
- return []controllertesting.MockUpdate{
- func(innerClient client.Client, ctx context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if cm, ok := obj.(*corev1.ConfigMap); ok {
- s := cm.Data[configmap.MultiChannelFanoutConfigKey]
- c := multichannelfanout.Config{}
- err := json.Unmarshal([]byte(s), &c)
- if err != nil {
- return controllertesting.Handled,
- fmt.Errorf("test is unable to unmarshal ConfigMap data: %v", err)
- }
- if diff := cmp.Diff(c, expected); diff != "" {
- return controllertesting.Handled,
- fmt.Errorf("test got unwanted ChannelsConfig (-want +got) %s", diff)
- }
- // Verified it is correct, now so that we can verify this actually occurred, swap
- // out the data with a known value for later comparison.
- cm.Data[configmap.MultiChannelFanoutConfigKey] = insertedByVerifyConfigMapData
- return controllertesting.Handled, innerClient.Update(ctx, obj)
- }
- return controllertesting.Unhandled, nil
- },
- }
-}
diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go
index 5e79fc3c802..678c544d46a 100644
--- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go
+++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile.go
@@ -22,6 +22,7 @@ import (
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -153,7 +154,7 @@ func (r *reconciler) reconcile(ctx context.Context, ccp *eventingv1alpha1.Cluste
return nil
}
- svc, err := util.CreateDispatcherService(ctx, r.client, ccp)
+ svc, err := util.CreateDispatcherService(ctx, r.client, ccp, setDispatcherServiceSelector())
if err != nil {
logger.Info("Error creating the ClusterChannelProvisioner's K8s Service", zap.Error(err))
@@ -179,6 +180,15 @@ func (r *reconciler) reconcile(ctx context.Context, ccp *eventingv1alpha1.Cluste
return nil
}
+// Since there are two provisioners "in-memory" and "in-memory-channel" but one single dispatcher service deployment,
+// update the label of the K8s service to always point at the same dispatcher service deployment
+func setDispatcherServiceSelector() util.ServiceOption {
+ return func(svc *v1.Service) error {
+ svc.Spec.Selector = util.DispatcherLabels("in-memory-channel")
+ return nil
+ }
+}
+
func (r *reconciler) deleteOldDispatcherService(ctx context.Context, ccp *eventingv1alpha1.ClusterChannelProvisioner) error {
svcName := fmt.Sprintf("%s-clusterbus", ccp.Name)
svcKey := types.NamespacedName{
diff --git a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go
index e4ff44abb9b..9d8934d0c61 100644
--- a/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go
+++ b/pkg/provisioners/inmemory/clusterchannelprovisioner/reconcile_test.go
@@ -40,10 +40,11 @@ import (
)
const (
- ccpUID = "test-uid"
- testErrorMessage = "test-induced-error"
- testNS = "test-ns"
- Name = "in-memory-channel"
+ ccpUID = "test-uid"
+ testErrorMessage = "test-induced-error"
+ testNS = "test-ns"
+ inMemoryChannelName = "in-memory-channel"
+ inMemoryName = "in-memory"
)
var (
@@ -96,7 +97,7 @@ func TestIsControlled(t *testing.T) {
"wrong namespace": {
ref: &corev1.ObjectReference{
Namespace: "other",
- Name: Name,
+ Name: inMemoryName,
},
isControlled: false,
},
@@ -108,7 +109,7 @@ func TestIsControlled(t *testing.T) {
},
"is controlled": {
ref: &corev1.ObjectReference{
- Name: Name,
+ Name: inMemoryName,
},
isControlled: true,
},
@@ -143,7 +144,7 @@ func TestReconcile(t *testing.T) {
&eventingv1alpha1.ClusterChannelProvisioner{
ObjectMeta: metav1.ObjectMeta{
Namespace: "not empty string",
- Name: Name,
+ Name: inMemoryName,
},
},
},
@@ -240,6 +241,20 @@ func TestReconcile(t *testing.T) {
events[ccpReconciled],
},
},
+ {
+ Name: "Create dispatcher succeeds - in-memory-Channel",
+ ReconcileKey: inMemoryChannelName,
+ InitialState: []runtime.Object{
+ makeClusterChannelProvisionerOld(),
+ },
+ WantPresent: []runtime.Object{
+ makeReadyClusterChannelProvisionerOld(),
+ makeK8sServiceOld(),
+ },
+ WantEvent: []corev1.Event{
+ events[ccpReconciled],
+ },
+ },
{
Name: "Create dispatcher succeeds - request is namespace-scoped",
InitialState: []runtime.Object{
@@ -249,7 +264,7 @@ func TestReconcile(t *testing.T) {
makeReadyClusterChannelProvisioner(),
makeK8sService(),
},
- ReconcileKey: fmt.Sprintf("%s/%s", testNS, Name),
+ ReconcileKey: fmt.Sprintf("%s/%s", testNS, inMemoryName),
WantEvent: []corev1.Event{
events[ccpReconciled],
},
@@ -297,13 +312,19 @@ func TestReconcile(t *testing.T) {
logger: zap.NewNop(),
}
if tc.ReconcileKey == "" {
- tc.ReconcileKey = fmt.Sprintf("/%s", Name)
+ tc.ReconcileKey = fmt.Sprintf("/%s", inMemoryName)
}
tc.IgnoreTimes = true
t.Run(tc.Name, tc.Runner(t, r, c, recorder))
}
}
+func makeClusterChannelProvisionerOld() *eventingv1alpha1.ClusterChannelProvisioner {
+ ccp := makeClusterChannelProvisioner()
+ ccp.SetName(inMemoryChannelName)
+ return ccp
+}
+
func makeClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvisioner {
return &eventingv1alpha1.ClusterChannelProvisioner{
TypeMeta: metav1.TypeMeta{
@@ -311,7 +332,7 @@ func makeClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvisioner
Kind: "ClusterChannelProvisioner",
},
ObjectMeta: metav1.ObjectMeta{
- Name: Name,
+ Name: inMemoryName,
UID: ccpUID,
},
Spec: eventingv1alpha1.ClusterChannelProvisionerSpec{},
@@ -328,6 +349,12 @@ func makeReadyClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvis
return ccp
}
+func makeReadyClusterChannelProvisionerOld() *eventingv1alpha1.ClusterChannelProvisioner {
+ ccp := makeReadyClusterChannelProvisioner()
+ ccp.Name = inMemoryChannelName
+ return ccp
+}
+
func makeDeletingClusterChannelProvisioner() *eventingv1alpha1.ClusterChannelProvisioner {
ccp := makeClusterChannelProvisioner()
ccp.DeletionTimestamp = &deletionTime
@@ -342,35 +369,43 @@ func makeK8sService() *corev1.Service {
},
ObjectMeta: metav1.ObjectMeta{
Namespace: system.Namespace(),
- Name: fmt.Sprintf("%s-dispatcher", Name),
+ Name: fmt.Sprintf("%s-dispatcher", inMemoryName),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
Kind: "ClusterChannelProvisioner",
- Name: Name,
+ Name: inMemoryName,
UID: ccpUID,
Controller: &truePointer,
BlockOwnerDeletion: &truePointer,
},
},
- Labels: util.DispatcherLabels(Name),
+ Labels: util.DispatcherLabels(inMemoryName),
},
Spec: corev1.ServiceSpec{
- Selector: util.DispatcherLabels(Name),
+ Selector: util.DispatcherLabels(inMemoryChannelName),
Ports: []corev1.ServicePort{
{
- Name: "http",
Port: 80,
TargetPort: intstr.FromInt(8080),
+ Protocol: corev1.ProtocolTCP,
},
},
},
}
}
+func makeK8sServiceOld() *corev1.Service {
+ svc := makeK8sService()
+ svc.SetName(fmt.Sprintf("%s-dispatcher", inMemoryChannelName))
+ svc.GetOwnerReferences()[0].Name = inMemoryChannelName
+ svc.SetLabels(util.DispatcherLabels(inMemoryChannelName))
+ return svc
+}
+
func makeOldK8sService() *corev1.Service {
svc := makeK8sService()
- svc.ObjectMeta.Name = fmt.Sprintf("%s-clusterbus", Name)
+ svc.ObjectMeta.Name = fmt.Sprintf("%s-clusterbus", inMemoryName)
return svc
}
diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go
index d8da2d062b4..2b09c992b4f 100644
--- a/pkg/provisioners/inmemory/controller/main.go
+++ b/pkg/provisioners/inmemory/controller/main.go
@@ -29,6 +29,8 @@ import (
"go.uber.org/zap"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
+ // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
+ // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
func main() {
diff --git a/pkg/provisioners/provisioner_util.go b/pkg/provisioners/provisioner_util.go
index 4afe9d4aea0..0a3653df75f 100644
--- a/pkg/provisioners/provisioner_util.go
+++ b/pkg/provisioners/provisioner_util.go
@@ -5,6 +5,7 @@ import (
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -19,7 +20,10 @@ import (
"github.com/knative/pkg/system"
)
-func CreateDispatcherService(ctx context.Context, client runtimeClient.Client, ccp *eventingv1alpha1.ClusterChannelProvisioner) (*corev1.Service, error) {
+// ServiceOption can be used to optionally modify the K8s default that gets created for the Dispatcher in CreateDispatcherService
+type ServiceOption func(*v1.Service) error
+
+func CreateDispatcherService(ctx context.Context, client runtimeClient.Client, ccp *eventingv1alpha1.ClusterChannelProvisioner, opts ...ServiceOption) (*corev1.Service, error) {
svcKey := types.NamespacedName{
Namespace: system.Namespace(),
Name: channelDispatcherServiceName(ccp.Name),
@@ -29,7 +33,12 @@ func CreateDispatcherService(ctx context.Context, client runtimeClient.Client, c
err := client.Get(ctx, svcKey, svc)
return svc, err
}
- return createK8sService(ctx, client, getSvc, newDispatcherService(ccp))
+ svc, err := newDispatcherService(ccp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return createK8sService(ctx, client, getSvc, svc)
}
func UpdateClusterChannelProvisionerStatus(ctx context.Context, client runtimeClient.Client, u *eventingv1alpha1.ClusterChannelProvisioner) error {
@@ -50,9 +59,9 @@ func UpdateClusterChannelProvisionerStatus(ctx context.Context, client runtimeCl
// newDispatcherService creates a new Service for a ClusterChannelProvisioner resource. It also sets
// the appropriate OwnerReferences on the resource so handleObject can discover
// the ClusterChannelProvisioner resource that 'owns' it.
-func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner) *corev1.Service {
+func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner, opts ...ServiceOption) (*corev1.Service, error) {
labels := DispatcherLabels(ccp.Name)
- return &corev1.Service{
+ svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: channelDispatcherServiceName(ccp.Name),
Namespace: system.Namespace(),
@@ -69,13 +78,24 @@ func newDispatcherService(ccp *eventingv1alpha1.ClusterChannelProvisioner) *core
Selector: labels,
Ports: []corev1.ServicePort{
{
- Name: "http",
+ // There is a bug in Istio where named port doesn't work when connecting using an ExternalName service
+ // Refer to https://github.com/istio/istio/issues/13193 for more details.
+ // TODO: Uncomment Name:"http" when ISTIO fixes the issue
+ // Name: "http",
Port: 80,
+ Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(8080),
},
},
},
}
+
+ for _, opt := range opts {
+ if err := opt(svc); err != nil {
+ return nil, err
+ }
+ }
+ return svc, nil
}
func DispatcherLabels(ccpName string) map[string]string {
diff --git a/pkg/provisioners/provisioner_util_test.go b/pkg/provisioners/provisioner_util_test.go
index cdf2eb724e6..fcd6a9dafe1 100644
--- a/pkg/provisioners/provisioner_util_test.go
+++ b/pkg/provisioners/provisioner_util_test.go
@@ -180,9 +180,9 @@ func makeDispatcherService() *corev1.Service {
Selector: DispatcherLabels(clusterChannelProvisionerName),
Ports: []corev1.ServicePort{
{
- Name: "http",
Port: 80,
TargetPort: intstr.FromInt(8080),
+ Protocol: corev1.ProtocolTCP,
},
},
},
diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go
index f83a991a7b1..3bde11755e9 100644
--- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go
+++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go
@@ -58,6 +58,8 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment {
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: ingressLabels(args.Broker),
+ // TODO: Remove this annotation once all channels stop using istio virtual service
+ // https://github.com/knative/eventing/issues/294
Annotations: map[string]string{
"sidecar.istio.io/inject": "true",
},
diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go
index 8872b7026ac..282a1c0985d 100644
--- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go
+++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go
@@ -30,7 +30,6 @@ import (
"net/http"
"github.com/google/go-cmp/cmp"
- "github.com/knative/eventing/pkg/provisioners"
"github.com/knative/eventing/pkg/sidecar/fanout"
"go.uber.org/zap"
)
@@ -45,27 +44,19 @@ type Config struct {
type ChannelConfig struct {
Namespace string `json:"namespace"`
Name string `json:"name"`
+ HostName string `json:"hostname"`
FanoutConfig fanout.Config `json:"fanoutConfig"`
}
-// MakeChannelKey creates the key used for this Channel in the Handler's handlers map.
-func makeChannelKey(namespace, name string) string {
- return fmt.Sprintf("%s/%s", namespace, name)
-}
-
// makeChannelKeyFromConfig creates the channel key for a given channelConfig. It is a helper around
// MakeChannelKey.
func makeChannelKeyFromConfig(config ChannelConfig) string {
- return makeChannelKey(config.Namespace, config.Name)
+ return config.HostName
}
// getChannelKey extracts the channel key from the given HTTP request.
-func getChannelKey(r *http.Request) (string, error) {
- cr, err := provisioners.ParseChannel(r.Host)
- if err != nil {
- return "", err
- }
- return makeChannelKey(cr.Namespace, cr.Name), nil
+func getChannelKey(r *http.Request) string {
+ return r.Host
}
// Handler is an http.Handler that introspects the incoming request to determine what Channel it is
@@ -114,12 +105,7 @@ func (h *Handler) CopyWithNewConfig(conf Config) (*Handler, error) {
// ServeHTTP delegates the actual handling of the request to a fanout.Handler, based on the
// request's channel key.
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- channelKey, err := getChannelKey(r)
- if err != nil {
- h.logger.Error("Unable to extract channelKey", zap.Error(err))
- w.WriteHeader(http.StatusInternalServerError)
- return
- }
+ channelKey := getChannelKey(r)
fh, ok := h.handlers[channelKey]
if !ok {
h.logger.Error("Unable to find a handler for request", zap.String("channelKey", channelKey))
diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go
index e6c9c30d048..32b86bdc84a 100644
--- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go
+++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler_test.go
@@ -34,33 +34,6 @@ const (
replaceDomain = "replaceDomain"
)
-func TestMakeChannelKey(t *testing.T) {
- testCases := []struct {
- namespace string
- name string
- key string
- }{
- {
- namespace: "default",
- name: "channel",
- key: "default/channel",
- },
- {
- namespace: "foo",
- name: "bar",
- key: "foo/bar",
- },
- }
- for _, tc := range testCases {
- name := fmt.Sprintf("%s, %s -> %s", tc.namespace, tc.name, tc.key)
- t.Run(name, func(t *testing.T) {
- if key := makeChannelKey(tc.namespace, tc.name); key != tc.key {
- t.Errorf("Unexpected ChannelKey. Expected '%v'. Actual '%v'", tc.key, key)
- }
- })
- }
-}
-
func TestNewHandler(t *testing.T) {
testCases := []struct {
name string
@@ -72,16 +45,14 @@ func TestNewHandler(t *testing.T) {
config: Config{
ChannelConfigs: []ChannelConfig{
{
- Namespace: "default",
- Name: "duplicate",
+ HostName: "duplicatekey",
},
{
- Namespace: "default",
- Name: "duplicate",
+ HostName: "duplicatekey",
},
},
},
- createErr: "duplicate channel key: default/duplicate",
+ createErr: "duplicate channel key: duplicatekey",
},
}
@@ -241,8 +212,9 @@ func TestServeHTTP(t *testing.T) {
config: Config{
ChannelConfigs: []ChannelConfig{
{
- Namespace: "default",
- Name: "first-channel",
+ Namespace: "ns",
+ Name: "name",
+ HostName: "first-channel.default",
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -261,8 +233,10 @@ func TestServeHTTP(t *testing.T) {
config: Config{
ChannelConfigs: []ChannelConfig{
{
- Namespace: "default",
- Name: "first-channel",
+
+ Namespace: "ns",
+ Name: "name",
+ HostName: "first-channel.default",
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -274,6 +248,7 @@ func TestServeHTTP(t *testing.T) {
{
Namespace: "default",
Name: "second-channel",
+ HostName: "second-channel.default",
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -303,7 +278,7 @@ func TestServeHTTP(t *testing.T) {
h, err := NewHandler(zap.NewNop(), tc.config)
if err != nil {
- t.Errorf("Unexpected NewHandler error: '%v'", err)
+ t.Fatalf("Unexpected NewHandler error: '%v'", err)
}
r := requestWithChannelKey(tc.key)
diff --git a/pkg/sidecar/swappable/swappable_test.go b/pkg/sidecar/swappable/swappable_test.go
index 7ee97d00955..b4cc0daa872 100644
--- a/pkg/sidecar/swappable/swappable_test.go
+++ b/pkg/sidecar/swappable/swappable_test.go
@@ -30,9 +30,8 @@ import (
)
const (
- namespace = "default"
- name = "channel1"
replaceDomain = "replaceDomain"
+ hostName = "a.b.c.d"
)
func TestHandler(t *testing.T) {
@@ -44,8 +43,7 @@ func TestHandler(t *testing.T) {
{
ChannelConfigs: []multichannelfanout.ChannelConfig{
{
- Namespace: namespace,
- Name: name,
+ HostName: hostName,
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -59,8 +57,7 @@ func TestHandler(t *testing.T) {
{
ChannelConfigs: []multichannelfanout.ChannelConfig{
{
- Namespace: namespace,
- Name: name,
+ HostName: hostName,
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -96,8 +93,7 @@ func TestHandler_InvalidConfigChange(t *testing.T) {
initialConfig: multichannelfanout.Config{
ChannelConfigs: []multichannelfanout.ChannelConfig{
{
- Namespace: namespace,
- Name: name,
+ HostName: hostName,
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -112,12 +108,10 @@ func TestHandler_InvalidConfigChange(t *testing.T) {
// Duplicate (namespace, name).
ChannelConfigs: []multichannelfanout.ChannelConfig{
{
- Namespace: namespace,
- Name: name,
+ HostName: hostName,
},
{
- Namespace: namespace,
- Name: name,
+ HostName: hostName,
},
},
},
@@ -183,7 +177,7 @@ func updateConfigAndTest(t *testing.T, h *Handler, config multichannelfanout.Con
func assertRequestAccepted(t *testing.T, h *Handler) {
w := httptest.NewRecorder()
- h.ServeHTTP(w, makeRequest(namespace, name))
+ h.ServeHTTP(w, makeRequest(hostName))
if w.Code != http.StatusAccepted {
t.Errorf("Unexpected response code. Expected 202. Actual %v", w.Code)
}
@@ -196,8 +190,8 @@ func (*successHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
_ = r.Body.Close()
}
-func makeRequest(namespace, name string) *http.Request {
- r := httptest.NewRequest("POST", fmt.Sprintf("http://%s.%s/", name, namespace), strings.NewReader(""))
+func makeRequest(hostName string) *http.Request {
+ r := httptest.NewRequest("POST", fmt.Sprintf("http://%s/", hostName), strings.NewReader(""))
return r
}
diff --git a/test/crd.go b/test/crd.go
index efbab8f3888..53864c882cc 100644
--- a/test/crd.go
+++ b/test/crd.go
@@ -31,6 +31,9 @@ const (
// ClusterChannelProvisioner returns a ClusterChannelProvisioner for a given name.
func ClusterChannelProvisioner(name string) *corev1.ObjectReference {
+ if name == "" {
+ return nil
+ }
return pkgTest.CoreV1ObjectReference("ClusterChannelProvisioner", eventsApiVersion, name)
}
diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE
index e8d7037e247..697031fbfb7 100644
--- a/third_party/VENDOR-LICENSE
+++ b/third_party/VENDOR-LICENSE
@@ -627,40 +627,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-===========================================================
-Import: github.com/knative/eventing/vendor/github.com/fsnotify/fsnotify
-
-Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2012 fsnotify Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
===========================================================
Import: github.com/knative/eventing/vendor/github.com/ghodss/yaml
From c412df6ab257f36cf4538f4d9c015af8b2394cdb Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 15 Apr 2019 15:26:23 -0700
Subject: [PATCH 23/76] Expand the baseline attributes available
Baseline attributes now include ID and Subject, which is a new field
being standardized for 0.3.
The SDK now has common methods for retrieving ID and Time attributes among
others. There's also a single top-level cloudevents package that aliases
all the types in other packages.
---
Gopkg.lock | 5 +-
pkg/broker/cel.go | 27 +++--
.../dev_knative/cloud_events_context.pb.go | 67 ++++++++---
.../dev_knative/cloud_events_context.proto | 17 ++-
vendor/github.com/cloudevents/sdk-go/alias.go | 111 ++++++++++++++++++
5 files changed, 191 insertions(+), 36 deletions(-)
create mode 100644 vendor/github.com/cloudevents/sdk-go/alias.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 1bb1d2cef1a..33be1a8025c 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -66,9 +66,10 @@
version = "v2.1.15"
[[projects]]
- digest = "1:fa1c3e6de410f74eb102fd927c838a66feb5b825fdf63d0e82cbbfd1a16db8a1"
+ digest = "1:13e0fa926561dc81e7229d65f5c0aa2cc74b4020d2d3da416fff4ee0946545da"
name = "github.com/cloudevents/sdk-go"
packages = [
+ ".",
"pkg/cloudevents",
"pkg/cloudevents/client",
"pkg/cloudevents/codec",
@@ -1278,6 +1279,7 @@
"cloud.google.com/go/pubsub",
"github.com/Shopify/sarama",
"github.com/bsm/sarama-cluster",
+ "github.com/cloudevents/sdk-go",
"github.com/cloudevents/sdk-go/pkg/cloudevents",
"github.com/cloudevents/sdk-go/pkg/cloudevents/client",
"github.com/cloudevents/sdk-go/pkg/cloudevents/context",
@@ -1285,6 +1287,7 @@
"github.com/cloudevents/sdk-go/pkg/cloudevents/types",
"github.com/fsnotify/fsnotify",
"github.com/golang/protobuf/jsonpb",
+ "github.com/golang/protobuf/proto",
"github.com/golang/protobuf/ptypes/struct",
"github.com/google/cel-go/cel",
"github.com/google/cel-go/checker/decls",
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index aa75f9c1796..cd8f782b598 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/json"
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
+ "github.com/cloudevents/sdk-go"
"github.com/golang/protobuf/jsonpb"
structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/google/cel-go/cel"
@@ -58,19 +58,20 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
}
vars := map[string]interface{}{}
- // Set baseline context fields
- dmt, err := event.Context.GetDataMediaType()
- if err != nil {
- r.logger.Error("Failed to parse data media type", zap.Error(err))
- }
-
+ // Set baseline context attributes. The attributes available may not be
+ // exactly the same as the attributes defined in the current version of the
+ // CloudEvents spec.
ceCtx := &celprotos.CloudEventsContext{
- Specversion: event.Context.GetSpecVersion(),
- Type: event.Context.GetType(),
- Source: event.Context.GetSource(),
- Schemaurl: event.Context.GetSchemaURL(),
- Datamediatype: dmt,
- Datacontenttype: event.Context.GetDataContentType(),
+ Specversion: event.SpecVersion(),
+ Type: event.Type(),
+ Source: event.Source(),
+ Subject: event.Subject(),
+ Id: event.ID(),
+ // TODO Time. Should this be a string or a (cel-native) protobuf timestamp?
+ Schemaurl: event.SchemaURL(),
+ Datacontenttype: event.DataContentType(),
+ Datamediatype: event.DataMediaType(),
+ Datacontentencoding: event.DataContentEncoding(),
}
vars[CELVarKeyContext] = ceCtx
diff --git a/pkg/broker/dev_knative/cloud_events_context.pb.go b/pkg/broker/dev_knative/cloud_events_context.pb.go
index 03d3135614b..27d0e119ce9 100644
--- a/pkg/broker/dev_knative/cloud_events_context.pb.go
+++ b/pkg/broker/dev_knative/cloud_events_context.pb.go
@@ -20,13 +20,20 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+// CloudEventsContext defines the standard attributes that are always available
+// in the CEL evaluation environment. For compatibility reasons, these
+// attributes may not be exactly the same as the attributes in the current
+// version of the CloudEvents spec.
type CloudEventsContext struct {
Specversion string `protobuf:"bytes,1,opt,name=specversion,proto3" json:"specversion,omitempty"`
Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
- Schemaurl string `protobuf:"bytes,4,opt,name=schemaurl,proto3" json:"schemaurl,omitempty"`
- Datamediatype string `protobuf:"bytes,5,opt,name=datamediatype,proto3" json:"datamediatype,omitempty"`
- Datacontenttype string `protobuf:"bytes,6,opt,name=datacontenttype,proto3" json:"datacontenttype,omitempty"`
+ Subject string `protobuf:"bytes,4,opt,name=subject,proto3" json:"subject,omitempty"`
+ Id string `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"`
+ Schemaurl string `protobuf:"bytes,7,opt,name=schemaurl,proto3" json:"schemaurl,omitempty"`
+ Datacontenttype string `protobuf:"bytes,8,opt,name=datacontenttype,proto3" json:"datacontenttype,omitempty"`
+ Datamediatype string `protobuf:"bytes,9,opt,name=datamediatype,proto3" json:"datamediatype,omitempty"`
+ Datacontentencoding string `protobuf:"bytes,10,opt,name=datacontentencoding,proto3" json:"datacontentencoding,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -78,6 +85,20 @@ func (m *CloudEventsContext) GetSource() string {
return ""
}
+func (m *CloudEventsContext) GetSubject() string {
+ if m != nil {
+ return m.Subject
+ }
+ return ""
+}
+
+func (m *CloudEventsContext) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
+
func (m *CloudEventsContext) GetSchemaurl() string {
if m != nil {
return m.Schemaurl
@@ -85,6 +106,13 @@ func (m *CloudEventsContext) GetSchemaurl() string {
return ""
}
+func (m *CloudEventsContext) GetDatacontenttype() string {
+ if m != nil {
+ return m.Datacontenttype
+ }
+ return ""
+}
+
func (m *CloudEventsContext) GetDatamediatype() string {
if m != nil {
return m.Datamediatype
@@ -92,9 +120,9 @@ func (m *CloudEventsContext) GetDatamediatype() string {
return ""
}
-func (m *CloudEventsContext) GetDatacontenttype() string {
+func (m *CloudEventsContext) GetDatacontentencoding() string {
if m != nil {
- return m.Datacontenttype
+ return m.Datacontentencoding
}
return ""
}
@@ -106,17 +134,20 @@ func init() {
func init() { proto.RegisterFile("cloud_events_context.proto", fileDescriptor_a3df9ee1ac825df6) }
var fileDescriptor_a3df9ee1ac825df6 = []byte{
- // 192 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x8f, 0x4d, 0x6a, 0xc3, 0x30,
- 0x10, 0x85, 0x71, 0xeb, 0x1a, 0x3c, 0xa6, 0x14, 0x66, 0x51, 0x44, 0xe9, 0xc2, 0x94, 0x2e, 0xbc,
- 0xf2, 0xa6, 0x47, 0x30, 0xbd, 0x40, 0x2e, 0x60, 0x14, 0x69, 0x20, 0x26, 0xb6, 0x64, 0xa4, 0xb1,
- 0x48, 0x6e, 0x99, 0x23, 0x05, 0x4f, 0x02, 0xf9, 0xd9, 0xcd, 0x7c, 0xef, 0xe3, 0xc1, 0x83, 0x2f,
- 0x33, 0xfa, 0xc5, 0xf6, 0x94, 0xc8, 0x71, 0xec, 0x8d, 0x77, 0x4c, 0x07, 0x6e, 0xe7, 0xe0, 0xd9,
- 0x63, 0x65, 0x29, 0xb5, 0x7b, 0xa7, 0x79, 0x48, 0xf4, 0x73, 0xca, 0x00, 0xbb, 0xd5, 0xfd, 0x17,
- 0xb5, 0xbb, 0x98, 0x58, 0x43, 0x15, 0x67, 0x32, 0x89, 0x42, 0x1c, 0xbc, 0x53, 0x59, 0x9d, 0x35,
- 0xe5, 0xe6, 0x1e, 0x21, 0x42, 0xce, 0xc7, 0x99, 0xd4, 0x8b, 0x44, 0x72, 0xe3, 0x27, 0x14, 0xd1,
- 0x2f, 0xc1, 0x90, 0x7a, 0x15, 0x7a, 0xfd, 0xf0, 0x1b, 0xca, 0x68, 0x76, 0x34, 0xe9, 0x25, 0x8c,
- 0x2a, 0x97, 0xe8, 0x06, 0xf0, 0x17, 0xde, 0xad, 0x66, 0x3d, 0x91, 0x1d, 0xb4, 0x54, 0xbe, 0x89,
- 0xf1, 0x08, 0xb1, 0x81, 0x8f, 0x15, 0xc8, 0x14, 0xc7, 0xe2, 0x15, 0xe2, 0x3d, 0xe3, 0x6d, 0x21,
- 0x33, 0xff, 0xce, 0x01, 0x00, 0x00, 0xff, 0xff, 0x21, 0xcb, 0xd7, 0x29, 0x04, 0x01, 0x00, 0x00,
+ // 236 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x4a, 0x03, 0x31,
+ 0x10, 0xc6, 0xe9, 0xba, 0xee, 0x76, 0xa7, 0xf8, 0x87, 0x11, 0x64, 0x10, 0x0f, 0x45, 0x3c, 0xf4,
+ 0x54, 0x04, 0x1f, 0xa1, 0x78, 0xf1, 0xe8, 0x0b, 0x94, 0x34, 0x19, 0x34, 0xda, 0x26, 0xcb, 0x66,
+ 0x12, 0xf4, 0xb5, 0x7c, 0x42, 0xe9, 0xa8, 0xb8, 0x8a, 0xb7, 0x7c, 0xbf, 0xef, 0x17, 0x86, 0x19,
+ 0xb8, 0xb0, 0xdb, 0x98, 0xdd, 0x9a, 0x0b, 0x07, 0x49, 0x6b, 0x1b, 0x83, 0xf0, 0xab, 0x2c, 0xfb,
+ 0x21, 0x4a, 0xc4, 0x99, 0xe3, 0xb2, 0x7c, 0x09, 0x46, 0x7c, 0xe1, 0xab, 0xf7, 0x0a, 0x70, 0xb5,
+ 0x77, 0xef, 0x54, 0x5d, 0x7d, 0x9a, 0x38, 0x87, 0x59, 0xea, 0xd9, 0x16, 0x1e, 0x92, 0x8f, 0x81,
+ 0x26, 0xf3, 0xc9, 0xa2, 0x7b, 0x18, 0x23, 0x44, 0xa8, 0xe5, 0xad, 0x67, 0xaa, 0xb4, 0xd2, 0x37,
+ 0x9e, 0x43, 0x93, 0x62, 0x1e, 0x2c, 0xd3, 0x81, 0xd2, 0xaf, 0x84, 0x04, 0x6d, 0xca, 0x9b, 0x67,
+ 0xb6, 0x42, 0xb5, 0x16, 0xdf, 0x11, 0x8f, 0xa1, 0xf2, 0x8e, 0x0e, 0x15, 0x56, 0xde, 0xe1, 0x25,
+ 0x74, 0xc9, 0x3e, 0xf1, 0xce, 0xe4, 0x61, 0x4b, 0xad, 0xe2, 0x1f, 0x80, 0x0b, 0x38, 0x71, 0x46,
+ 0x8c, 0xae, 0x13, 0x44, 0xc7, 0x4f, 0xd5, 0xf9, 0x8b, 0xf1, 0x1a, 0x8e, 0xf6, 0x68, 0xc7, 0xce,
+ 0x1b, 0xf5, 0x3a, 0xf5, 0x7e, 0x43, 0xbc, 0x81, 0xb3, 0xd1, 0x47, 0x0e, 0x36, 0x3a, 0x1f, 0x1e,
+ 0x09, 0xd4, 0xfd, 0xaf, 0xba, 0xaf, 0xa7, 0xcd, 0x69, 0xbb, 0x69, 0xf4, 0x90, 0xb7, 0x1f, 0x01,
+ 0x00, 0x00, 0xff, 0xff, 0xe0, 0x00, 0x3a, 0xb0, 0x66, 0x01, 0x00, 0x00,
}
diff --git a/pkg/broker/dev_knative/cloud_events_context.proto b/pkg/broker/dev_knative/cloud_events_context.proto
index fda378d5027..d1ef0ee7340 100644
--- a/pkg/broker/dev_knative/cloud_events_context.proto
+++ b/pkg/broker/dev_knative/cloud_events_context.proto
@@ -2,11 +2,20 @@ syntax = "proto3";
package dev.knative;
+// CloudEventsContext defines the standard attributes that are always available
+// in the CEL evaluation environment. For compatibility reasons, these
+// attributes may not be exactly the same as the attributes in the current
+// version of the CloudEvents spec.
message CloudEventsContext {
string specversion = 1;
string type = 2;
string source = 3;
- string schemaurl = 4;
- string datamediatype = 5;
- string datacontenttype = 6;
-}
\ No newline at end of file
+ string subject = 4;
+ string id = 5;
+ // Reserve field 6 for time. Unclear how to represent this in CEL.
+ reserved 6;
+ string schemaurl = 7;
+ string datacontenttype = 8;
+ string datamediatype = 9;
+ string datacontentencoding = 10;
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/alias.go b/vendor/github.com/cloudevents/sdk-go/alias.go
new file mode 100644
index 00000000000..2450f309f68
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/alias.go
@@ -0,0 +1,111 @@
+package cloudevents
+
+// Package cloudevents alias' common functions and types to improve discoverability and reduce
+// the number of imports for simple HTTP clients.
+
+import (
+ "github.com/cloudevents/sdk-go/pkg/cloudevents"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/context"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
+)
+
+// Client
+
+type Client = client.Client
+
+// Event
+
+type Event = cloudevents.Event
+type EventResponse = cloudevents.EventResponse
+
+// Context
+
+type EventContext = cloudevents.EventContext
+type EventContextV01 = cloudevents.EventContextV01
+type EventContextV02 = cloudevents.EventContextV02
+type EventContextV03 = cloudevents.EventContextV03
+
+// Custom Types
+
+type Timestamp = types.Timestamp
+type URLRef = types.URLRef
+
+// HTTP Transport
+
+type HTTPTransport = http.Transport
+type HTTPTransportContext = http.TransportContext
+type HTTPTransportResponseContext = http.TransportResponseContext
+type HTTPEncoding = http.Encoding
+
+var (
+ // ContentType Helpers
+
+ StringOfApplicationJSON = cloudevents.StringOfApplicationJSON
+ StringOfApplicationXML = cloudevents.StringOfApplicationXML
+ StringOfApplicationCloudEventsJSON = cloudevents.StringOfApplicationCloudEventsJSON
+ StringOfApplicationCloudEventsBatchJSON = cloudevents.StringOfApplicationCloudEventsBatchJSON
+ StringOfBase64 = cloudevents.StringOfBase64
+
+ Base64 = cloudevents.Base64
+
+ // Client Creation
+
+ NewClient = client.New
+ NewDefaultClient = client.NewDefault
+
+ // Client Options
+
+ WithEventDefaulter = client.WithEventDefaulter
+ WithUUIDs = client.WithUUIDs
+ WithTimeNow = client.WithTimeNow
+
+ // Event Creation
+
+ NewEvent = cloudevents.New
+ VersionV01 = cloudevents.CloudEventsVersionV01
+ VersionV02 = cloudevents.CloudEventsVersionV02
+ VersionV03 = cloudevents.CloudEventsVersionV03
+
+ // Context
+
+ ContextWithTarget = context.WithTarget
+ TargetFromContext = context.TargetFrom
+
+ // Custom Types
+
+ ParseTimestamp = types.ParseTimestamp
+ ParseURLRef = types.ParseURLRef
+
+ // HTTP Transport
+
+ NewHTTPTransport = http.New
+
+ // HTTP Transport Options
+
+ WithTarget = http.WithTarget
+ WithMethod = http.WithMethod
+ WitHHeader = http.WithHeader
+ WithShutdownTimeout = http.WithShutdownTimeout
+ WithEncoding = http.WithEncoding
+ WithBinaryEncoding = http.WithBinaryEncoding
+ WithStructuredEncoding = http.WithStructuredEncoding
+ WithPort = http.WithPort
+ WithPath = http.WithPath
+
+ // HTTP Context
+
+ HTTPTransportContextFrom = http.TransportContextFrom
+ ContextWithHeader = http.ContextWithHeader
+
+ // HTTP Transport Encodings
+
+ HTTPBinaryV01 = http.BinaryV01
+ HTTPStructuredV01 = http.StructuredV01
+ HTTPBinaryV02 = http.BinaryV02
+ HTTPStructuredV02 = http.StructuredV02
+ HTTPBinaryV03 = http.BinaryV03
+ HTTPStructuredV03 = http.StructuredV03
+ HTTPBatchedV03 = http.BatchedV03
+)
From ebe43f171ee227e86c9b069e6749578bca89b65b Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 15 Apr 2019 15:30:09 -0700
Subject: [PATCH 24/76] Clarify multiple filter strategy behavior
If multiple filter strategies are used, only one is used and others are
ignored. Currently the precedence is CEL, SourceAndType.
---
pkg/broker/receiver.go | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/pkg/broker/receiver.go b/pkg/broker/receiver.go
index 000408c622b..e6ddcc21cde 100644
--- a/pkg/broker/receiver.go
+++ b/pkg/broker/receiver.go
@@ -212,16 +212,20 @@ func (r *Receiver) getTrigger(ctx context.Context, ref provisioners.ChannelRefer
// shouldSendMessage determines whether message 'm' should be sent based on the triggerSpec 'ts'.
// Currently it supports exact matching on type and/or source of events.
+//
+// Only one of the filter strategies is used to evaluate the event. The current
+// precedence is: CEL, SourceAndType.
+//
+// If no filter strategy is present, shouldSendMessage returns true.
+//
// TODO this should allow returning error so the errors can be surfaced to the
-// trigger
+// trigger.
func (r *Receiver) shouldSendMessage(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) bool {
if ts.Filter == nil {
r.logger.Error("No filter specified")
return false
}
- // TODO what should happen if multiple filter types are specified? OR? AND?
- // precedence rules?
if ts.Filter.CEL != nil {
pass, err := r.filterEventByCEL(ts, event)
if err != nil {
From acbf5bf12bb561e90501b2a774543c3ed762761d Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Mon, 15 Apr 2019 15:31:35 -0700
Subject: [PATCH 25/76] Use DataMediaType to check for parseable data
DataMediaType is the content type with content type options like charset
removed.
---
pkg/broker/cel.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index cd8f782b598..805da83e817 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -121,7 +121,7 @@ func ceParsedExtensionsStruct(ext map[string]interface{}) (*structpb.Struct, err
func ceParsedDataStruct(event *cloudevents.Event) (*structpb.Struct, error) {
// TODO CloudEvents SDK might have a better way to do this with data codecs
- if event.Context.GetDataContentType() == "application/json" {
+ if event.DataMediaType() == "application/json" {
var decodedData map[string]interface{}
err := event.DataAs(&decodedData)
if err != nil {
From 6cd376b506782b80ccaf4d2600974d7e33a2d098 Mon Sep 17 00:00:00 2001
From: chaodaiG <45011425+chaodaiG@users.noreply.github.com>
Date: Mon, 15 Apr 2019 16:02:00 -0700
Subject: [PATCH 26/76] update test-infra (#1056)
---
Gopkg.lock | 4 +-
.../knative/test-infra/scripts/README.md | 2 +
.../knative/test-infra/scripts/e2e-tests.sh | 74 +++++++++++++++----
.../knative/test-infra/scripts/library.sh | 15 ++--
4 files changed, 72 insertions(+), 23 deletions(-)
diff --git a/Gopkg.lock b/Gopkg.lock
index 14b73872dea..60e009ffec6 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -394,14 +394,14 @@
[[projects]]
branch = "master"
- digest = "1:921c8888ac7bf7240bc5473d029201b3eb19f9592c7647c4c29f96b6c901b1bb"
+ digest = "1:f3c8dc03bb88bca90c59550e06b0f27a6ca489b1747ac8e5153686a6752e4d74"
name = "github.com/knative/test-infra"
packages = [
"scripts",
"tools/dep-collector",
]
pruneopts = "UT"
- revision = "4ce16d390c55c71290f3b285bdfe37a1c5490304"
+ revision = "3a09cd7f5428743509941d116fdee644041a3507"
[[projects]]
digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde"
diff --git a/vendor/github.com/knative/test-infra/scripts/README.md b/vendor/github.com/knative/test-infra/scripts/README.md
index 07398fb7fed..b33543080e9 100644
--- a/vendor/github.com/knative/test-infra/scripts/README.md
+++ b/vendor/github.com/knative/test-infra/scripts/README.md
@@ -108,8 +108,10 @@ This is a helper script for Knative E2E test scripts. To use it:
if the default values don't fit your needs:
- `E2E_CLUSTER_REGION`: Cluster region, defaults to `us-central1`.
+ - `E2E_CLUSTER_BACKUP_REGIONS`: Space-separated list of regions to retry test cluster creation in case of stockout. Defaults to `us-west1 us-east1`.
- `E2E_CLUSTER_ZONE`: Cluster zone (e.g., `a`), defaults to none (i.e. use a regional
cluster).
+ - `E2E_CLUSTER_BACKUP_ZONES`: Space-separated list of zones to retry test cluster creation in case of stockout. If defined, `E2E_CLUSTER_BACKUP_REGIONS` will be ignored thus it defaults to none.
- `E2E_CLUSTER_MACHINE`: Cluster node machine type, defaults to `n1-standard-4}`.
- `E2E_MIN_CLUSTER_NODES`: Minimum number of nodes in the cluster when autoscaling,
defaults to 1.
diff --git a/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh b/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
index c2328e7b8d5..e0c949cc43d 100755
--- a/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
+++ b/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-# Copyright 2018 The Knative Authors
+# Copyright 2019 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -38,9 +38,15 @@ function build_resource_name() {
# Test cluster parameters
# Configurable parameters
-readonly E2E_CLUSTER_REGION=${E2E_CLUSTER_REGION:-us-central1}
+# export E2E_CLUSTER_REGION and E2E_CLUSTER_ZONE as they're used in the cluster setup subprocess
+export E2E_CLUSTER_REGION=${E2E_CLUSTER_REGION:-us-central1}
# By default we use regional clusters.
-readonly E2E_CLUSTER_ZONE=${E2E_CLUSTER_ZONE:-}
+export E2E_CLUSTER_ZONE=${E2E_CLUSTER_ZONE:-}
+
+# Default backup regions in case of stockouts; by default we don't fall back to a different zone in the same region
+readonly E2E_CLUSTER_BACKUP_REGIONS=${E2E_CLUSTER_BACKUP_REGIONS:-us-west1 us-east1}
+readonly E2E_CLUSTER_BACKUP_ZONES=${E2E_CLUSTER_BACKUP_ZONES:-}
+
readonly E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-n1-standard-4}
readonly E2E_GKE_ENVIRONMENT=${E2E_GKE_ENVIRONMENT:-prod}
readonly E2E_GKE_COMMAND_GROUP=${E2E_GKE_COMMAND_GROUP:-beta}
@@ -154,20 +160,15 @@ function create_test_cluster() {
cluster_setup || fail_test "cluster setup failed"
fi
- header "Creating test cluster"
-
echo "Cluster will have a minimum of ${E2E_MIN_CLUSTER_NODES} and a maximum of ${E2E_MAX_CLUSTER_NODES} nodes."
# Smallest cluster required to run the end-to-end-tests
- local geoflag="--gcp-region=${E2E_CLUSTER_REGION}"
- [[ -n "${E2E_CLUSTER_ZONE}" ]] && geoflag="--gcp-zone=${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}"
local CLUSTER_CREATION_ARGS=(
--gke-create-command="container clusters create --quiet --enable-autoscaling --min-nodes=${E2E_MIN_CLUSTER_NODES} --max-nodes=${E2E_MAX_CLUSTER_NODES} --scopes=cloud-platform --enable-basic-auth --no-issue-client-certificate ${EXTRA_CLUSTER_CREATION_FLAGS[@]}"
--gke-shape={\"default\":{\"Nodes\":${E2E_MIN_CLUSTER_NODES}\,\"MachineType\":\"${E2E_CLUSTER_MACHINE}\"}}
--provider=gke
--deployment=gke
--cluster="${E2E_CLUSTER_NAME}"
- ${geoflag}
--gcp-network="${E2E_NETWORK_NAME}"
--gke-environment="${E2E_GKE_ENVIRONMENT}"
--gke-command-group="${E2E_GKE_COMMAND_GROUP}"
@@ -198,11 +199,7 @@ function create_test_cluster() {
local extra_flags=()
# If using boskos, save time and let it tear down the cluster
(( ! IS_BOSKOS )) && extra_flags+=(--down)
- # Don't fail test for kubetest, as it might incorrectly report test failure
- # if teardown fails (for details, see success() below)
- set +o errexit
- run_go_tool k8s.io/test-infra/kubetest \
- kubetest "${CLUSTER_CREATION_ARGS[@]}" \
+ create_test_cluster_with_retries "${CLUSTER_CREATION_ARGS[@]}" \
--up \
--extract "${E2E_CLUSTER_VERSION}" \
--gcp-node-image "${SERVING_GKE_IMAGE}" \
@@ -215,12 +212,55 @@ function create_test_cluster() {
set +o errexit
function_exists cluster_teardown && cluster_teardown
delete_leaked_network_resources
- local result="$(cat ${TEST_RESULT_FILE})"
+ local result=$(get_test_return_code)
echo "Artifacts were written to ${ARTIFACTS}"
echo "Test result code is ${result}"
exit ${result}
}
+# Retry backup regions/zones if cluster creations failed due to stockout.
+# Parameters: $1..$n - any kubetest flags other than geo flag.
+function create_test_cluster_with_retries() {
+ local cluster_creation_log=/tmp/${E2E_BASE_NAME}-cluster_creation-log
+ # zone_not_provided is a placeholder for e2e_cluster_zone to make for loop below work
+ local zone_not_provided="zone_not_provided"
+
+ local e2e_cluster_regions=(${E2E_CLUSTER_REGION})
+ local e2e_cluster_zones=(${E2E_CLUSTER_ZONE})
+
+ if [[ -n "${E2E_CLUSTER_BACKUP_ZONES}" ]]; then
+ e2e_cluster_zones+=(${E2E_CLUSTER_BACKUP_ZONES})
+ elif [[ -n "${E2E_CLUSTER_BACKUP_REGIONS}" ]]; then
+ e2e_cluster_regions+=(${E2E_CLUSTER_BACKUP_REGIONS})
+ e2e_cluster_zones=(${zone_not_provided})
+ else
+ echo "No backup region/zone set, cluster creation will fail in case of stockout"
+ fi
+
+ for e2e_cluster_region in "${e2e_cluster_regions[@]}"; do
+ for e2e_cluster_zone in "${e2e_cluster_zones[@]}"; do
+ E2E_CLUSTER_REGION=${e2e_cluster_region}
+ E2E_CLUSTER_ZONE=${e2e_cluster_zone}
+ [[ "${E2E_CLUSTER_ZONE}" == "${zone_not_provided}" ]] && E2E_CLUSTER_ZONE=""
+
+ local geoflag="--gcp-region=${E2E_CLUSTER_REGION}"
+ [[ -n "${E2E_CLUSTER_ZONE}" ]] && geoflag="--gcp-zone=${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}"
+
+ header "Creating test cluster in $E2E_CLUSTER_REGION $E2E_CLUSTER_ZONE"
+ # Don't fail test for kubetest, as it might incorrectly report test failure
+ # if teardown fails (for details, see success() below)
+ set +o errexit
+ { run_go_tool k8s.io/test-infra/kubetest \
+ kubetest "$@" ${geoflag}; } 2>&1 | tee ${cluster_creation_log}
+
+ # Exit if test succeeded
+ [[ "$(get_test_return_code)" == "0" ]] && return
+ # If test failed not because of cluster creation stockout, return
+ [[ -z "$(grep -Eio 'does not have enough resources to fulfill the request' ${cluster_creation_log})" ]] && return
+ done
+ done
+}
+
# Setup the test cluster for running the tests.
function setup_test_cluster() {
# Fail fast during setup.
@@ -263,6 +303,12 @@ function setup_test_cluster() {
fi
}
+# Gets the exit of of the test script.
+# For more details, see set_test_return_code().
+function get_test_return_code() {
+ echo $(cat ${TEST_RESULT_FILE})
+}
+
# Set the return code that the test script will return.
# Parameters: $1 - return code (0-255)
function set_test_return_code() {
diff --git a/vendor/github.com/knative/test-infra/scripts/library.sh b/vendor/github.com/knative/test-infra/scripts/library.sh
index 89a93c91aa9..3a2cafa6e27 100755
--- a/vendor/github.com/knative/test-infra/scripts/library.sh
+++ b/vendor/github.com/knative/test-infra/scripts/library.sh
@@ -146,22 +146,23 @@ function wait_until_pods_running() {
return 1
}
-# Waits until all batch job pods are running in the given namespace.
+# Waits until all batch jobs complete in the given namespace.
# Parameters: $1 - namespace.
function wait_until_batch_job_complete() {
- echo -n "Waiting until all batch job pods in namespace $1 run to completion."
+ echo -n "Waiting until all batch jobs in namespace $1 run to completion."
for i in {1..150}; do # timeout after 5 minutes
- local pods="$(kubectl get pods --selector=job-name --no-headers -n $1 2>/dev/null | grep -v '^[[:space:]]*$')"
- # All pods must be complete
- local not_complete=$(echo "${pods}" | grep -v Completed | wc -l)
+ local jobs=$(kubectl get jobs -n $1 --no-headers \
+ -ocustom-columns='n:{.metadata.name},c:{.spec.completions},s:{.status.succeeded}')
+ # All jobs must be complete
+ local not_complete=$(echo "${jobs}" | awk '{if ($2!=$3) print $0}' | wc -l)
if [[ ${not_complete} -eq 0 ]]; then
- echo -e "\nAll pods are complete:\n${pods}"
+ echo -e "\nAll jobs are complete:\n${jobs}"
return 0
fi
echo -n "."
sleep 2
done
- echo -e "\n\nERROR: timeout waiting for pods to complete\n${pods}"
+ echo -e "\n\nERROR: timeout waiting for jobs to complete\n${jobs}"
return 1
}
From 3b06453de917c7487a89eb7b474199298309529b Mon Sep 17 00:00:00 2001
From: Chi Zhang
Date: Mon, 15 Apr 2019 16:28:00 -0700
Subject: [PATCH 27/76] Fix errors in test readme (#1046)
* fix errors in test readme
* use prettier to format
* fix typos
* fix CR issues
* Do not bother to change the index
---
test/README.md | 113 ++++++++++++++++++++++++++++---------------------
1 file changed, 65 insertions(+), 48 deletions(-)
diff --git a/test/README.md b/test/README.md
index 1915e30622d..4dc7b55d14b 100644
--- a/test/README.md
+++ b/test/README.md
@@ -6,36 +6,74 @@ This directory contains tests and testing docs for `Knative Eventing`.
they test
- [End-to-end tests](#running-end-to-end-tests) reside in [`/test/e2e`](./e2e)
-## Running unit tests
+## Running tests with scripts
-Use `go test`:
+### Presubmit tests
+
+[`presubmit-tests.sh`](./presubmit-tests.sh) is the entry point for the tests
+before code submission.
+
+You can run it simply with:
```shell
-go test -v ./pkg/...
+test/presubmit-tests.sh
```
-_By default `go test` will not run [the e2e tests](#running-end-to-end-tests),
-which need [`-tags=e2e`](#running-end-to-end-tests) to be enabled._
+_By default, this script will run `build tests`, `unit tests` and
+`integration tests`._ If you only want to run one type of tests, you can run
+this script with corresponding flags like below:
-## Presubmit tests
+```shell
+test/presubmit-tests.sh --build-tests
+test/presubmit-tests.sh --unit-tests
+test/presubmit-tests.sh --integration-tests
+```
-[`presubmit-tests.sh`](./presubmit-tests.sh) is the entry point for the
-[end-to-end tests](/test/e2e).
+_Note that if the tests you are running include `integration tests`, it will
+create a new GKE cluster in project `$PROJECT_ID`, start Knative Serving and
+Eventing system, upload test images to `$KO_DOCKER_REPO`, and run all
+`e2e-*tests.sh` scripts under [`test`](.). After the tests finish, it will
+delete the cluster._
-This script, and consequently, the e2e tests will be run before every code
-submission. You can run these tests manually with:
+### E2E tests
+
+[`e2e-tests.sh`](./e2e-tests.sh) is the entry point for running all e2e tests.
+
+You can run it simply with:
```shell
-test/presubmit-tests.sh
+test/e2e-tests.sh
+```
+
+_By default, it will create a new GKE cluster in project `$PROJECT_ID`, start
+Knative Serving and Eventing system, upload test images to `$KO_DOCKER_REPO`,
+and run the end-to-end tests. After the tests finishes, it will delete the
+cluster._
+
+If you have already created your own Kubernetes cluster but haven't installed
+Knative, you can run with `test/e2e-tests.sh --run-tests`.
+
+If you have set up a running environment that meets
+[the e2e test environment requirements](#environment-requirements), you can run
+with `test/e2e-tests.sh --run-tests --skip-knative-setup`.
+
+## Running tests with `go test` command
+
+### Running unit tests
+
+You can also use `go test` command to run unit tests:
+
+```shell
+go test -v ./pkg/...
```
-_Note that to run `presubmit-tests.sh` or `e2e-tests.sh` scripts, you need to
-have a running environment that meets
-[the e2e test environment requirements](#environment-requirements)_
+_By default `go test` will not run [the e2e tests](#running-end-to-end-tests),
+which needs [`-tags=e2e`](#running-end-to-end-tests) to be enabled._
-## Running end-to-end tests
+### Running end-to-end tests
-To run [the e2e tests](./e2e), you need to have a running environment that meets
+To run [the e2e tests](./e2e) with `go test` command, you need to have a running
+environment that meets
[the e2e test environment requirements](#environment-requirements), and you need
to specify the build tag `e2e`.
@@ -43,50 +81,29 @@ to specify the build tag `e2e`.
go test -v -tags=e2e -count=1 ./test/e2e
```
-### One test case
+#### One test case
-To run one e2e test case, e.g. TestKubernetesEvents, use
-[the `-run` flag with `go test`](https://golang.org/cmd/go/#hdr-Testing_flags):
+To run one e2e test case, e.g. TestSingleBinaryEvent, use the
+[-run](https://golang.org/cmd/go/#hdr-Testing_flags) flag with `go test`:
```bash
-go test -v -tags=e2e -count=1 ./test/e2e -run ^TestKubernetesEvents$
+go test -v -tags=e2e -count=1 ./test/e2e -run ^TestSingleBinaryEvent$
```
-### Environment requirements
+## Environment requirements
There's couple of things you need to install before running e2e tests locally.
-1. `kubetest` installed:
-
- ```bash
- go get -u k8s.io/test-infra/kubetest
- ```
-
-1. [A running `Knative Serving` cluster.]
+1. A running [Knative](https://www.knative.dev/docs/install/) cluster
1. A docker repo containing [the test images](#test-images)
-Simply run the `./test/e2e-tests.sh` script. It will create a GKE cluster,
-install Knative Serving stack with Istio, upload test images to your Docker repo
-and run the end-to-end tests against the Knative Eventing built from source.
-
-If you already have the `*_OVERRIDE` environment variables set, call the script
-with the `--run-tests` argument and it will use the cluster and run the tests.
-Note that this requires you to have Serving and Istio installed and configured
-to your particular configuration setup. Knative Eventing will still built and
-deployed from source.
-
-Otherwise, calling this script without arguments will create a new cluster in
-project `$PROJECT_ID`, start Knative Serving and the eventing system, upload
-test images, run the tests and delete the cluster. In this case, it's required
-that `$KO_DOCKER_REPO` points to a valid writable docker repo.
-
## Test images
### Building the test images
-Note: this is only required when you run e2e tests locally with `go test`
+_Note: this is only required when you run e2e tests locally with `go test`
commands. Running tests through e2e-tests.sh will publish the images
-automatically.
+automatically._
The [`upload-test-images.sh`](./upload-test-images.sh) script can be used to
build and push the test images used by the e2e tests. It requires:
@@ -110,9 +127,9 @@ deployed in GCR.
New test images should be placed in `./test/test_images`. For each image create
a new sub-folder and include a Go file that will be an entry point to the
-application. This Go file should use the package "main" and include the function
-main(). It is a good practice to include a readme file as well. When uploading
-test images, `ko` will build an image from this folder.
+application. This Go file should use the package `main` and include the function
+`main()`. It is a good practice to include a `readme` file as well. When
+uploading test images, `ko` will build an image from this folder.
## Flags
From 11b760e0408c987047f3b50a1f4665c5314fa636 Mon Sep 17 00:00:00 2001
From: Chi Zhang
Date: Wed, 17 Apr 2019 11:02:28 -0700
Subject: [PATCH 28/76] Run e2e tests in parallel and refactoring (#1042)
* formatting the log in setup and teardown
* formatting the log in setup and teardown
* run the e2e test cases in parallel
* fix compile errors
* labeled the wrong namespace
* resolve the race condition
* fix the namespace error
* fix namespace error
* fix namespace error
* use the default testing namespace everywhere
* Now the test cases are able to run in parallel
* labeled the wrong namespace
* pass the namespace through the command line
* remove the namespace parameter in the function of creating Broker
* use namespace in the flag
'
* still use pkgTest.Flags.Namespace
* still need to print out the resource when cleaning up
* creating one namespace for each test case and refactoring
* fix the build failure and remove useless logs
* minor change
* minor change
* Update README
* fix README
* fix the ko delete stuck problem
* mistakenly deleted something...
* fix build error..
* remove redundant logs
* wait for single pod running instead of all
* resolve CR issues
* fix CR issue
* solve CR issues
* boolean flag cannot use =
* log.Fatal when error happening in flag parsing
* make logger a global variable
---
test/README.md | 27 +++-
test/builders.go | 2 +-
test/clients.go | 5 +-
test/crd.go | 9 +-
test/e2e-tests.sh | 32 ++--
test/e2e/broker_trigger_test.go | 27 ++--
test/e2e/channel_chain_test.go | 29 +---
test/e2e/e2e.go | 214 +++++++++++++++-----------
test/e2e/event_transformation_test.go | 30 +---
test/e2e/main_test.go | 56 +++++++
test/e2e/single_event_test.go | 55 +++----
test/e2e_flags.go | 76 ++++++++-
12 files changed, 341 insertions(+), 221 deletions(-)
create mode 100644 test/e2e/main_test.go
diff --git a/test/README.md b/test/README.md
index 4dc7b55d14b..9e30d974359 100644
--- a/test/README.md
+++ b/test/README.md
@@ -78,18 +78,39 @@ environment that meets
to specify the build tag `e2e`.
```bash
-go test -v -tags=e2e -count=1 ./test/e2e
+go test -v -tags=e2e -count=1 ./test/e2e -run ^TestMain$ -runFromMain=true
+```
+
+By default, it will run all tests configured for the default
+`ClusterChannelProvisioner` in `main_test.go`.
+
+If you want to run tests against other `ClusterChannelProvisioners`, you can
+specify them through `-clusterChannelProvisioners`.
+
+```bash
+go test -v -tags=e2e -count=1 ./test/e2e -run ^TestMain$ -runFromMain=true -clusterChannelProvisioners=in-memory-channel,gcp-pubsub
```
#### One test case
-To run one e2e test case, e.g. TestSingleBinaryEvent, use the
-[-run](https://golang.org/cmd/go/#hdr-Testing_flags) flag with `go test`:
+To run one e2e test case, e.g. `TestSingleBinaryEvents`, use
+[the `-run` flag with `go test`](https://golang.org/cmd/go/#hdr-Testing_flags):
```bash
go test -v -tags=e2e -count=1 ./test/e2e -run ^TestSingleBinaryEvent$
```
+By default, it will run the test against the default
+`ClusterChannelProvisioner`.
+
+If you want to run it against another `ClusterChannelProvisioner`, you can
+specify it through `-clusterChannelProvisioners`. Note that you can only specify
+one `ClusterChannelProvisioner` if you are not running from `TestMain`.
+
+```bash
+go test -v -tags=e2e -count=1 ./test/e2e -run ^TestSingleBinaryEvent$ -clusterChannelProvisioners=in-memory-channel
+```
+
## Environment requirements
There's couple of things you need to install before running e2e tests locally.
diff --git a/test/builders.go b/test/builders.go
index 23d7c6538e3..4ec5e6bcc56 100644
--- a/test/builders.go
+++ b/test/builders.go
@@ -19,7 +19,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-// Builder for trigger objects.
+// TriggerBuilder is the builder for trigger objects.
type TriggerBuilder struct {
*eventingv1alpha1.Trigger
}
diff --git a/test/clients.go b/test/clients.go
index 23520a25aa3..5ef37beec56 100644
--- a/test/clients.go
+++ b/test/clients.go
@@ -31,9 +31,8 @@ type Clients struct {
}
// NewClients instantiates and returns several clientsets required for making request to the
-// cluster specified by the combination of clusterName and configPath. Clients can
-// make requests within namespace.
-func NewClients(configPath string, clusterName string, namespace string) (*Clients, error) {
+// cluster specified by the combination of clusterName and configPath.
+func NewClients(configPath string, clusterName string) (*Clients, error) {
clients := &Clients{}
cfg, err := test.BuildClientConfig(configPath, clusterName)
if err != nil {
diff --git a/test/crd.go b/test/crd.go
index 53864c882cc..5fc3816e109 100644
--- a/test/crd.go
+++ b/test/crd.go
@@ -26,7 +26,7 @@ import (
)
const (
- eventsApiVersion = "eventing.knative.dev/v1alpha1"
+ eventsAPIVersion = "eventing.knative.dev/v1alpha1"
)
// ClusterChannelProvisioner returns a ClusterChannelProvisioner for a given name.
@@ -34,12 +34,12 @@ func ClusterChannelProvisioner(name string) *corev1.ObjectReference {
if name == "" {
return nil
}
- return pkgTest.CoreV1ObjectReference("ClusterChannelProvisioner", eventsApiVersion, name)
+ return pkgTest.CoreV1ObjectReference("ClusterChannelProvisioner", eventsAPIVersion, name)
}
// ChannelRef returns an ObjectReference for a given Channel Name.
func ChannelRef(name string) *corev1.ObjectReference {
- return pkgTest.CoreV1ObjectReference("Channel", eventsApiVersion, name)
+ return pkgTest.CoreV1ObjectReference("Channel", eventsAPIVersion, name)
}
// Channel returns a Channel with the specified provisioner.
@@ -65,7 +65,7 @@ func SubscriberSpecForService(name string) *v1alpha1.SubscriberSpec {
// ReplyStrategyForChannel returns a ReplyStrategy for a given Channel.
func ReplyStrategyForChannel(name string) *v1alpha1.ReplyStrategy {
return &v1alpha1.ReplyStrategy{
- Channel: pkgTest.CoreV1ObjectReference("Channel", eventsApiVersion, name),
+ Channel: pkgTest.CoreV1ObjectReference("Channel", eventsAPIVersion, name),
}
}
@@ -111,6 +111,7 @@ type TypeAndSource struct {
Source string
}
+// CloudEvent related constants.
const (
CloudEventEncodingBinary = "binary"
CloudEventEncodingStructured = "structured"
diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh
index 4051b5d0283..b0d2b0f86b7 100755
--- a/test/e2e-tests.sh
+++ b/test/e2e-tests.sh
@@ -26,25 +26,28 @@
source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
-# Names of the Resources used in the tests.
-# Currently this namespace must be the same as the namespace specified in
-# test/e2e/e2e.go.
-readonly E2E_TEST_NAMESPACE=e2etest-knative-eventing
-
# Helper functions.
+# Setup the Knative environment for running tests
function knative_setup() {
+ # Install the latest stable Knative/serving in the current cluster
start_latest_knative_serving || return 1
+
+ # Install the latest Knative/eventing in the current cluster
+ echo ">> Starting Knative Eventing"
+ echo "Installing Knative Eventing"
ko apply -f config/ || return 1
- wait_until_pods_running knative-eventing || fail_test "Eventing did not come up (1)"
+ wait_until_pods_running knative-eventing || fail_test "Knative Eventing did not come up"
- subheader "Standing up In-Memory ClusterChannelProvisioner"
+ echo "Installing In-Memory ClusterChannelProvisioner"
ko apply -f config/provisioners/in-memory-channel/in-memory-channel.yaml || return 1
- wait_until_pods_running knative-eventing || fail_test "Eventing did not come up (2)"
+ wait_until_pods_running knative-eventing || fail_test "Failed to install the In-Memory ClusterChannelProvisioner"
}
function knative_teardown() {
- ko delete --ignore-not-found=true -f config/
+ echo ">> Stopping Knative Eventing"
+ echo "Uninstalling Knative Eventing"
+ ko delete --ignore-not-found=true --now --timeout 60s -f config/
wait_until_object_does_not_exist namespaces knative-eventing
@@ -54,18 +57,11 @@ function knative_teardown() {
# Setup resources common to all eventing tests
function test_setup() {
- kubectl create namespace ${E2E_TEST_NAMESPACE} || return 1
# Publish test images
+ echo ">> Publishing test images"
$(dirname $0)/upload-test-images.sh e2e || fail_test "Error uploading test images"
}
-function test_teardown() {
- # Delete the test namespace
- echo "Deleting namespace $E2E_TEST_NAMESPACE"
- kubectl --ignore-not-found=true delete namespace ${E2E_TEST_NAMESPACE}
- wait_until_object_does_not_exist namespaces ${E2E_TEST_NAMESPACE}
-}
-
function dump_extra_cluster_state() {
# Collecting logs from all knative's eventing pods
echo "============================================================"
@@ -87,6 +83,6 @@ function dump_extra_cluster_state() {
initialize $@
-go_test_e2e -timeout=20m ./test/e2e || fail_test
+go_test_e2e -timeout=20m ./test/e2e -run ^TestMain$ -runFromMain=true -clusterChannelProvisioners=in-memory-channel || fail_test
success
diff --git a/test/e2e/broker_trigger_test.go b/test/e2e/broker_trigger_test.go
index 33464ac6e64..ea9c67ec502 100644
--- a/test/e2e/broker_trigger_test.go
+++ b/test/e2e/broker_trigger_test.go
@@ -33,7 +33,7 @@ import (
)
const (
- defaultBrokerName = "default"
+ brokerName = test.DefaultBrokerName
waitForFilterPodRunning = 30 * time.Second
selectorKey = "end2end-test-broker-trigger"
@@ -56,34 +56,26 @@ type eventReceiver struct {
// and sends different events to the broker's address. Finally, it verifies that only
// the appropriate events are routed to the subscribers.
func TestDefaultBrokerWithManyTriggers(t *testing.T) {
- clients, cleaner := Setup(t, t.Logf)
-
- // Verify namespace exists.
- ns, cleanupNS := CreateNamespaceIfNeeded(t, clients, t.Logf)
-
- defer cleanupNS()
- defer TearDown(clients, cleaner, t.Logf)
+ clients, ns, _, cleaner := Setup(t, true, t.Logf)
+ defer TearDown(clients, ns, cleaner, t.Logf)
t.Logf("Labeling namespace %s", ns)
-
// Label namespace so that it creates the default broker.
- err := LabelNamespace(clients, t.Logf, map[string]string{"knative-eventing-injection": "enabled"})
+ err := LabelNamespace(clients, ns, map[string]string{"knative-eventing-injection": "enabled"}, t.Logf)
if err != nil {
t.Fatalf("Error annotating namespace: %v", err)
}
-
t.Logf("Namespace %s annotated", ns)
// Wait for default broker ready.
t.Logf("Waiting for default broker to be ready")
- defaultBroker := test.Broker(defaultBrokerName, ns)
+ defaultBroker := test.Broker(brokerName, ns)
err = WaitForBrokerReady(clients, defaultBroker)
if err != nil {
t.Fatalf("Error waiting for default broker to become ready: %v", err)
}
defaultBrokerUrl := fmt.Sprintf("http://%s", defaultBroker.Status.Address.Hostname)
-
t.Logf("Default broker ready: %q", defaultBrokerUrl)
// These are the event types and sources that triggers will listen to, as well as the selectors
@@ -142,7 +134,7 @@ func TestDefaultBrokerWithManyTriggers(t *testing.T) {
EventSource(event.typeAndSource.Source).
// Don't need to set the broker as we use the default one
// but wanted to be more explicit.
- Broker(defaultBrokerName).
+ Broker(brokerName).
SubscriberSvc(subscriberName).
Build()
err := CreateTrigger(clients, trigger, t.Logf, cleaner)
@@ -156,7 +148,7 @@ func TestDefaultBrokerWithManyTriggers(t *testing.T) {
t.Logf("Waiting for triggers to become ready")
// Wait for all of the triggers in the namespace to be ready.
- if err := WaitForAllTriggersReady(clients, t.Logf, ns); err != nil {
+ if err := WaitForAllTriggersReady(clients, ns, t.Logf); err != nil {
t.Fatalf("Error waiting for triggers to become ready: %v", err)
}
@@ -222,13 +214,14 @@ func TestDefaultBrokerWithManyTriggers(t *testing.T) {
for _, event := range eventsToReceive {
subscriberPodName := name("dumper", event.typeAndSource.Type, event.typeAndSource.Source)
subscriberPod := subscriberPods[subscriberPodName]
+ subscriberContainerName := subscriberPod.Spec.Containers[0].Name
t.Logf("Dumper %q expecting %q", subscriberPodName, strings.Join(expectedEvents[subscriberPodName], ","))
- if err := WaitForLogContents(clients, t.Logf, subscriberPodName, subscriberPod.Spec.Containers[0].Name, ns, expectedEvents[subscriberPodName]); err != nil {
+ if err := WaitForLogContents(clients, t.Logf, subscriberPodName, subscriberContainerName, ns, expectedEvents[subscriberPodName]); err != nil {
t.Fatalf("Event(s) not found in logs of subscriber pod %q: %v", subscriberPodName, err)
}
// At this point all the events should have been received in the pod.
// We check whether we find unexpected events. If so, then we fail.
- found, err := FindAnyLogContents(clients, t.Logf, subscriberPodName, subscriberPod.Spec.Containers[0].Name, ns, unexpectedEvents[subscriberPodName])
+ found, err := FindAnyLogContents(clients, t.Logf, subscriberPodName, subscriberContainerName, ns, unexpectedEvents[subscriberPodName])
if err != nil {
t.Fatalf("Failed querying to find log contents in pod %q: %v", subscriberPodName, err)
}
diff --git a/test/e2e/channel_chain_test.go b/test/e2e/channel_chain_test.go
index a0ce7630231..cfa43f4e1e2 100644
--- a/test/e2e/channel_chain_test.go
+++ b/test/e2e/channel_chain_test.go
@@ -33,35 +33,25 @@ EventSource ---> Channel ---> Subscriptions ---> Channel ---> Subscriptions --->
*/
func TestChannelChain(t *testing.T) {
- if test.EventingFlags.Provisioner == "" {
- t.Fatal("ClusterChannelProvisioner must be set to a non-empty string. Either do not specify --clusterChannelProvisioner or set to something other than the empty string")
- }
-
const (
senderName = "e2e-channelchain-sender"
loggerPodName = "e2e-channelchain-logger-pod"
)
channelNames := [2]string{"e2e-channelchain1", "e2e-channelchain2"}
// subscriptionNames1 corresponds to Subscriptions on channelNames[0]
- subscriptionNames1 := [2]string{"e2e-complexscen-subs11", "e2e-complexscen-subs12"}
+ subscriptionNames1 := [2]string{"e2e-channelchain-subs11", "e2e-channelchain-subs12"}
// subscriptionNames2 corresponds to Subscriptions on channelNames[1]
- subscriptionNames2 := [1]string{"e2e-complexscen-subs21"}
-
- clients, cleaner := Setup(t, t.Logf)
- // verify namespace
- ns, cleanupNS := CreateNamespaceIfNeeded(t, clients, t.Logf)
- defer cleanupNS()
+ subscriptionNames2 := [1]string{"e2e-channelchain-subs21"}
- // TearDown() needs to be deferred after cleanupNS(). Otherwise the namespace is deleted and all
- // resources in it. So when TearDown() runs, it spews a lot of not found errors.
- defer TearDown(clients, cleaner, t.Logf)
+ clients, ns, provisioner, cleaner := Setup(t, true, t.Logf)
+ defer TearDown(clients, ns, cleaner, t.Logf)
// create loggerPod and expose it as a service
t.Logf("creating logger pod")
selector := map[string]string{"e2etest": string(uuid.NewUUID())}
loggerPod := test.EventLoggerPod(loggerPodName, ns, selector)
loggerSvc := test.Service(loggerPodName, ns, selector)
- loggerPod, err := CreatePodAndServiceReady(clients, loggerPod, loggerSvc, ns, t.Logf, cleaner)
+ loggerPod, err := CreatePodAndServiceReady(clients, loggerPod, loggerSvc, t.Logf, cleaner)
if err != nil {
t.Fatalf("Failed to create logger pod and service, and get them ready: %v", err)
}
@@ -70,8 +60,7 @@ func TestChannelChain(t *testing.T) {
t.Logf("Creating Channel and Subscription")
channels := make([]*v1alpha1.Channel, 0)
for _, channelName := range channelNames {
- channel := test.Channel(channelName, ns, test.ClusterChannelProvisioner(test.EventingFlags.Provisioner))
- t.Logf("channel: %#v", channel)
+ channel := test.Channel(channelName, ns, test.ClusterChannelProvisioner(provisioner))
channels = append(channels, channel)
}
@@ -80,18 +69,16 @@ func TestChannelChain(t *testing.T) {
// create subscriptions that subscribe the first channel, and reply events directly to the second channel
for _, subscriptionName := range subscriptionNames1 {
sub := test.Subscription(subscriptionName, ns, test.ChannelRef(channelNames[0]), nil, test.ReplyStrategyForChannel(channelNames[1]))
- t.Logf("sub: %#v", sub)
subs = append(subs, sub)
}
// create subscriptions that subscribe the second channel, and call the logging service
for _, subscriptionName := range subscriptionNames2 {
sub := test.Subscription(subscriptionName, ns, test.ChannelRef(channelNames[1]), test.SubscriberSpecForService(loggerPodName), nil)
- t.Logf("sub: %#v", sub)
subs = append(subs, sub)
}
// wait for all channels and subscriptions to become ready
- if err := WithChannelsAndSubscriptionsReady(clients, &channels, &subs, t.Logf, cleaner); err != nil {
+ if err := WithChannelsAndSubscriptionsReady(clients, ns, &channels, &subs, t.Logf, cleaner); err != nil {
t.Fatalf("The Channel or Subscription were not marked as Ready: %v", err)
}
@@ -103,7 +90,7 @@ func TestChannelChain(t *testing.T) {
Data: fmt.Sprintf(`{"msg":%q}`, body),
Encoding: test.CloudEventDefaultEncoding,
}
- if err := SendFakeEventToChannel(clients, event, channels[0], ns, t.Logf, cleaner); err != nil {
+ if err := SendFakeEventToChannel(clients, event, channels[0], t.Logf, cleaner); err != nil {
t.Fatalf("Failed to send fake CloudEvent to the channel %q", channels[0].Name)
}
diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go
index 400e20a766e..665d13d5d62 100644
--- a/test/e2e/e2e.go
+++ b/test/e2e/e2e.go
@@ -18,17 +18,17 @@ package e2e
import (
"fmt"
"strings"
+ "sync"
"testing"
"time"
- "k8s.io/apimachinery/pkg/api/errors"
-
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/test"
pkgTest "github.com/knative/pkg/test"
"github.com/knative/pkg/test/logging"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@@ -38,68 +38,112 @@ import (
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
-const (
- DefaultTestNamespace = "e2etest-knative-eventing"
+type clusterChannelProvisioner struct {
+ sync.Mutex
+ name string
+}
+func (ccp *clusterChannelProvisioner) Get() string {
+ ccp.Lock()
+ name := ccp.name
+ ccp.Unlock()
+ return name
+}
+
+func (ccp *clusterChannelProvisioner) Set(name string) {
+ ccp.Lock()
+ ccp.name = name
+ ccp.Unlock()
+}
+
+// ClusterChannelProvisionerToTest hold the CCP that is used to run the test case.
+// It's default to be the first one that is passed through the clusterChannelProvisioners flag.
+// And it can be changed in main_test.go for test case setup.
+var ClusterChannelProvisionerToTest = clusterChannelProvisioner{name: test.EventingFlags.Provisioners[0]}
+
+const (
interval = 1 * time.Second
- timeout = 1 * time.Minute
+ timeout = 2 * time.Minute
)
-// Setup creates the client objects needed in the e2e tests.
-func Setup(t *testing.T, logf logging.FormatLogger) (*test.Clients, *test.Cleaner) {
- if pkgTest.Flags.Namespace == "" {
- pkgTest.Flags.Namespace = DefaultTestNamespace
- }
-
+// Setup validates namespace and provisioner, creates the client objects needed in the e2e tests.
+func Setup(t *testing.T, runInParallel bool, logf logging.FormatLogger) (*test.Clients, string, string, *test.Cleaner) {
clients, err := test.NewClients(
pkgTest.Flags.Kubeconfig,
- pkgTest.Flags.Cluster,
- pkgTest.Flags.Namespace)
+ pkgTest.Flags.Cluster)
if err != nil {
t.Fatalf("Couldn't initialize clients: %v", err)
}
cleaner := test.NewCleaner(logf, clients.Dynamic)
- return clients, cleaner
+ // Get the CCP to run this test case.
+ ccpToTest := ClusterChannelProvisionerToTest.Get()
+
+ // Create a new namespace to run this test case.
+ // Combine the test name and CCP to avoid duplication.
+ baseFuncName := GetBaseFuncName(t.Name())
+ ns := strings.ToLower(baseFuncName) + "-" + ccpToTest
+ CreateNamespaceIfNeeded(t, clients, ns, t.Logf)
+
+ // Run the test case in parallel if needed.
+ if runInParallel {
+ t.Parallel()
+ }
+
+ return clients, ns, ccpToTest, cleaner
+}
+
+// GetBaseFuncName returns the baseFuncName parsed from the fullFuncName.
+// eg. test/e2e.TestMain will return TestMain.
+// TODO(Fredy-Z): many functions in this file can be moved to knative/pkg/test to make it cleaner.
+func GetBaseFuncName(fullFuncName string) string {
+ baseFuncName := fullFuncName[strings.LastIndex(fullFuncName, "/")+1:]
+ baseFuncName = baseFuncName[strings.LastIndex(baseFuncName, ".")+1:]
+ return baseFuncName
}
// TearDown will delete created names using clients.
-func TearDown(clients *test.Clients, cleaner *test.Cleaner, _ logging.FormatLogger) {
+func TearDown(clients *test.Clients, namespace string, cleaner *test.Cleaner, logf logging.FormatLogger) {
cleaner.Clean(true)
+ if err := DeleteNameSpace(clients, namespace); err != nil {
+ logf("Could not delete the namespace %q: %v", namespace, err)
+ }
}
-// CreateChannel will create a Channel
+// CreateChannel will create a Channel.
func CreateChannel(clients *test.Clients, channel *v1alpha1.Channel, _ logging.FormatLogger, cleaner *test.Cleaner) error {
- channels := clients.Eventing.EventingV1alpha1().Channels(pkgTest.Flags.Namespace)
+ namespace := channel.Namespace
+ channels := clients.Eventing.EventingV1alpha1().Channels(namespace)
res, err := channels.Create(channel)
if err != nil {
return err
}
- cleaner.Add(v1alpha1.SchemeGroupVersion.Group, v1alpha1.SchemeGroupVersion.Version, "channels", pkgTest.Flags.Namespace, res.ObjectMeta.Name)
+ cleaner.Add(v1alpha1.SchemeGroupVersion.Group, v1alpha1.SchemeGroupVersion.Version, "channels", namespace, res.ObjectMeta.Name)
return nil
}
-// CreateSubscription will create a Subscription
+// CreateSubscription will create a Subscription.
func CreateSubscription(clients *test.Clients, sub *v1alpha1.Subscription, _ logging.FormatLogger, cleaner *test.Cleaner) error {
- subscriptions := clients.Eventing.EventingV1alpha1().Subscriptions(pkgTest.Flags.Namespace)
+ namespace := sub.Namespace
+ subscriptions := clients.Eventing.EventingV1alpha1().Subscriptions(namespace)
res, err := subscriptions.Create(sub)
if err != nil {
return err
}
- cleaner.Add(v1alpha1.SchemeGroupVersion.Group, v1alpha1.SchemeGroupVersion.Version, "subscriptions", pkgTest.Flags.Namespace, res.ObjectMeta.Name)
+ cleaner.Add(v1alpha1.SchemeGroupVersion.Group, v1alpha1.SchemeGroupVersion.Version, "subscriptions", namespace, res.ObjectMeta.Name)
return nil
}
// WithChannelsAndSubscriptionsReady creates Channels and Subscriptions and waits until all are Ready.
// When they are ready, chans and subs are altered to get the real Channels and Subscriptions.
-func WithChannelsAndSubscriptionsReady(clients *test.Clients, chans *[]*v1alpha1.Channel, subs *[]*v1alpha1.Subscription, logf logging.FormatLogger, cleaner *test.Cleaner) error {
+func WithChannelsAndSubscriptionsReady(clients *test.Clients, namespace string, chans *[]*v1alpha1.Channel, subs *[]*v1alpha1.Subscription, logf logging.FormatLogger, cleaner *test.Cleaner) error {
for _, channel := range *chans {
if err := CreateChannel(clients, channel, logf, cleaner); err != nil {
return err
}
}
- channels := clients.Eventing.EventingV1alpha1().Channels(pkgTest.Flags.Namespace)
+ channels := clients.Eventing.EventingV1alpha1().Channels(namespace)
for i, channel := range *chans {
if err := test.WaitForChannelState(channels, channel.Name, test.IsChannelReady, "ChannelIsReady"); err != nil {
return err
@@ -119,7 +163,7 @@ func WithChannelsAndSubscriptionsReady(clients *test.Clients, chans *[]*v1alpha1
}
}
- subscriptions := clients.Eventing.EventingV1alpha1().Subscriptions(pkgTest.Flags.Namespace)
+ subscriptions := clients.Eventing.EventingV1alpha1().Subscriptions(namespace)
for i, sub := range *subs {
if err := test.WaitForSubscriptionState(subscriptions, sub.Name, test.IsSubscriptionReady, "SubscriptionIsReady"); err != nil {
return err
@@ -138,12 +182,13 @@ func WithChannelsAndSubscriptionsReady(clients *test.Clients, chans *[]*v1alpha1
// CreateBroker will create a Broker.
func CreateBroker(clients *test.Clients, broker *v1alpha1.Broker, logf logging.FormatLogger, cleaner *test.Cleaner) error {
- brokers := clients.Eventing.EventingV1alpha1().Brokers(broker.Namespace)
+ namespace := broker.Namespace
+ brokers := clients.Eventing.EventingV1alpha1().Brokers(namespace)
res, err := brokers.Create(broker)
if err != nil {
return err
}
- cleaner.Add(v1alpha1.SchemeGroupVersion.Group, v1alpha1.SchemeGroupVersion.Version, "brokers", broker.Namespace, res.ObjectMeta.Name)
+ cleaner.Add(v1alpha1.SchemeGroupVersion.Group, v1alpha1.SchemeGroupVersion.Version, "brokers", namespace, res.ObjectMeta.Name)
return nil
}
@@ -172,12 +217,13 @@ func WaitForBrokerReady(clients *test.Clients, broker *v1alpha1.Broker) error {
// CreateTrigger will create a Trigger.
func CreateTrigger(clients *test.Clients, trigger *v1alpha1.Trigger, logf logging.FormatLogger, cleaner *test.Cleaner) error {
- triggers := clients.Eventing.EventingV1alpha1().Triggers(trigger.Namespace)
+ namespace := trigger.Namespace
+ triggers := clients.Eventing.EventingV1alpha1().Triggers(namespace)
res, err := triggers.Create(trigger)
if err != nil {
return err
}
- cleaner.Add(v1alpha1.SchemeGroupVersion.Group, v1alpha1.SchemeGroupVersion.Version, "triggers", trigger.Namespace, res.ObjectMeta.Name)
+ cleaner.Add(v1alpha1.SchemeGroupVersion.Group, v1alpha1.SchemeGroupVersion.Version, "triggers", namespace, res.ObjectMeta.Name)
return nil
}
@@ -201,18 +247,19 @@ func WithTriggerReady(clients *test.Clients, trigger *v1alpha1.Trigger, logf log
return nil
}
-// CreateServiceAccount will create a service account
+// CreateServiceAccount will create a service account.
func CreateServiceAccount(clients *test.Clients, sa *corev1.ServiceAccount, _ logging.FormatLogger, cleaner *test.Cleaner) error {
- sas := clients.Kube.Kube.CoreV1().ServiceAccounts(pkgTest.Flags.Namespace)
+ namespace := sa.Namespace
+ sas := clients.Kube.Kube.CoreV1().ServiceAccounts(namespace)
res, err := sas.Create(sa)
if err != nil {
return err
}
- cleaner.Add(corev1.SchemeGroupVersion.Group, corev1.SchemeGroupVersion.Version, "serviceaccounts", pkgTest.Flags.Namespace, res.ObjectMeta.Name)
+ cleaner.Add(corev1.SchemeGroupVersion.Group, corev1.SchemeGroupVersion.Version, "serviceaccounts", namespace, res.ObjectMeta.Name)
return nil
}
-// CreateClusterRoleBinding will create a service account binding
+// CreateClusterRoleBinding will create a service account binding.
func CreateClusterRoleBinding(clients *test.Clients, crb *rbacv1.ClusterRoleBinding, _ logging.FormatLogger, cleaner *test.Cleaner) error {
clusterRoleBindings := clients.Kube.Kube.RbacV1().ClusterRoleBindings()
res, err := clusterRoleBindings.Create(crb)
@@ -224,12 +271,12 @@ func CreateClusterRoleBinding(clients *test.Clients, crb *rbacv1.ClusterRoleBind
}
// CreateServiceAccountAndBinding creates both ServiceAccount and ClusterRoleBinding with default
-// cluster-admin role
-func CreateServiceAccountAndBinding(clients *test.Clients, name string, logf logging.FormatLogger, cleaner *test.Cleaner) error {
+// cluster-admin role.
+func CreateServiceAccountAndBinding(clients *test.Clients, name string, namespace string, logf logging.FormatLogger, cleaner *test.Cleaner) error {
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: name,
- Namespace: pkgTest.Flags.Namespace,
+ Namespace: namespace,
},
}
err := CreateServiceAccount(clients, sa, logf, cleaner)
@@ -260,12 +307,13 @@ func CreateServiceAccountAndBinding(clients *test.Clients, name string, logf log
return nil
}
-// CreatePodAndServiceReady will create a Pod and Service, and wait for them to become ready
-func CreatePodAndServiceReady(clients *test.Clients, pod *corev1.Pod, svc *corev1.Service, ns string, logf logging.FormatLogger, cleaner *test.Cleaner) (*corev1.Pod, error) {
+// CreatePodAndServiceReady will create a Pod and Service, and wait for them to become ready.
+func CreatePodAndServiceReady(clients *test.Clients, pod *corev1.Pod, svc *corev1.Service, logf logging.FormatLogger, cleaner *test.Cleaner) (*corev1.Pod, error) {
+ namespace := pod.Namespace
if err := CreatePod(clients, pod, logf, cleaner); err != nil {
return nil, fmt.Errorf("Failed to create pod: %v", err)
}
- if err := pkgTest.WaitForAllPodsRunning(clients.Kube, ns); err != nil {
+ if err := pkgTest.WaitForPodRunning(clients.Kube, pod.Name, namespace); err != nil {
return nil, fmt.Errorf("Error waiting for pod to become running: %v", err)
}
logf("Pod %q starts running", pod.Name)
@@ -274,8 +322,8 @@ func CreatePodAndServiceReady(clients *test.Clients, pod *corev1.Pod, svc *corev
return nil, fmt.Errorf("Failed to create service: %v", err)
}
- // Reload pod to get IP
- pod, err := clients.Kube.Kube.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
+ // Reload pod to get IP.
+ pod, err := clients.Kube.Kube.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("Failed to get pod: %v", err)
}
@@ -283,18 +331,19 @@ func CreatePodAndServiceReady(clients *test.Clients, pod *corev1.Pod, svc *corev
return pod, nil
}
-// CreateService will create a Service
+// CreateService will create a Service.
func CreateService(clients *test.Clients, svc *corev1.Service, _ logging.FormatLogger, cleaner *test.Cleaner) error {
- svcs := clients.Kube.Kube.CoreV1().Services(svc.GetNamespace())
+ namespace := svc.Namespace
+ svcs := clients.Kube.Kube.CoreV1().Services(namespace)
res, err := svcs.Create(svc)
if err != nil {
return err
}
- cleaner.Add(corev1.SchemeGroupVersion.Group, corev1.SchemeGroupVersion.Version, "services", res.ObjectMeta.Namespace, res.ObjectMeta.Name)
+ cleaner.Add(corev1.SchemeGroupVersion.Group, corev1.SchemeGroupVersion.Version, "services", namespace, res.ObjectMeta.Name)
return nil
}
-// CreatePod will create a Pod
+// CreatePod will create a Pod.
func CreatePod(clients *test.Clients, pod *corev1.Pod, _ logging.FormatLogger, cleaner *test.Cleaner) error {
res, err := clients.Kube.CreatePod(pod)
if err != nil {
@@ -305,16 +354,16 @@ func CreatePod(clients *test.Clients, pod *corev1.Pod, _ logging.FormatLogger, c
}
// SendFakeEventToChannel will create fake CloudEvent and send it to the given channel.
-func SendFakeEventToChannel(clients *test.Clients, event *test.CloudEvent, channel *v1alpha1.Channel, ns string, logf logging.FormatLogger, cleaner *test.Cleaner) error {
+func SendFakeEventToChannel(clients *test.Clients, event *test.CloudEvent, channel *v1alpha1.Channel, logf logging.FormatLogger, cleaner *test.Cleaner) error {
logf("Sending fake CloudEvent")
logf("Creating event sender pod")
+ namespace := channel.Namespace
url := fmt.Sprintf("http://%s", channel.Status.Address.Hostname)
- pod := test.EventSenderPod(event.Source, ns, url, event)
- logf("Sender pod: %#v", pod)
+ pod := test.EventSenderPod(event.Source, namespace, url, event)
if err := CreatePod(clients, pod, logf, cleaner); err != nil {
return err
}
- if err := pkgTest.WaitForAllPodsRunning(clients.Kube, ns); err != nil {
+ if err := pkgTest.WaitForPodRunning(clients.Kube, pod.Name, namespace); err != nil {
return err
}
logf("Sender pod starts running")
@@ -334,7 +383,7 @@ func WaitForLogContents(clients *test.Clients, logf logging.FormatLogger, podNam
logf("Could not find content %q for %s/%s. Found %q instead", content, podName, containerName, string(logs))
return false, nil
}
- // do not return as we will keep on looking for the other contents in the slice
+ // Do not return as we will keep on looking for the other contents in the slice.
logf("Found content %q for %s/%s in logs %q", content, podName, containerName, string(logs))
}
return true, nil
@@ -371,7 +420,7 @@ func FindAnyLogContents(clients *test.Clients, logf logging.FormatLogger, podNam
}
// WaitForAllTriggersReady will wait until all triggers in the given namespace are ready.
-func WaitForAllTriggersReady(clients *test.Clients, logf logging.FormatLogger, namespace string) error {
+func WaitForAllTriggersReady(clients *test.Clients, namespace string, logf logging.FormatLogger) error {
triggers := clients.Eventing.EventingV1alpha1().Triggers(namespace)
if err := test.WaitForTriggersListState(triggers, test.TriggersReady, "TriggerIsReady"); err != nil {
return err
@@ -379,10 +428,24 @@ func WaitForAllTriggersReady(clients *test.Clients, logf logging.FormatLogger, n
return nil
}
-// LabelNamespace labels the test namespace with the labels map.
-func LabelNamespace(clients *test.Clients, logf logging.FormatLogger, labels map[string]string) error {
- ns := pkgTest.Flags.Namespace
- nsSpec, err := clients.Kube.Kube.CoreV1().Namespaces().Get(ns, metav1.GetOptions{})
+// CreateNamespaceIfNeeded creates a new namespace if it does not exist.
+func CreateNamespaceIfNeeded(t *testing.T, clients *test.Clients, namespace string, logf logging.FormatLogger) {
+ nsSpec, err := clients.Kube.Kube.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
+
+ if err != nil && errors.IsNotFound(err) {
+ nsSpec = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
+ logf("Creating Namespace: %s", namespace)
+ nsSpec, err = clients.Kube.Kube.CoreV1().Namespaces().Create(nsSpec)
+
+ if err != nil {
+ t.Fatalf("Failed to create Namespace: %s; %v", namespace, err)
+ }
+ }
+}
+
+// LabelNamespace labels the given namespace with the labels map.
+func LabelNamespace(clients *test.Clients, namespace string, labels map[string]string, logf logging.FormatLogger) error {
+ nsSpec, err := clients.Kube.Kube.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return err
}
@@ -396,42 +459,11 @@ func LabelNamespace(clients *test.Clients, logf logging.FormatLogger, labels map
return err
}
-// CreateNamespaceIfNeeded creates a new namespace if it does not exist
-func CreateNamespaceIfNeeded(t *testing.T, clients *test.Clients, logf logging.FormatLogger) (string, func()) {
- shutdown := func() {}
- ns := pkgTest.Flags.Namespace
- logf("Namespace: %s", ns)
-
- nsSpec, err := clients.Kube.Kube.CoreV1().Namespaces().Get(ns, metav1.GetOptions{})
-
- if err != nil && errors.IsNotFound(err) {
- nsSpec = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}
- logf("Creating Namespace: %s", ns)
- nsSpec, err = clients.Kube.Kube.CoreV1().Namespaces().Create(nsSpec)
-
- if err != nil {
- t.Fatalf("Failed to create Namespace: %s; %v", ns, err)
- } else {
- shutdown = func() {
- clients.Kube.Kube.CoreV1().Namespaces().Delete(nsSpec.Name, nil)
- // TODO: this is a bit hacky but in order for the tests to work
- // correctly for a clean namespace to be created we need to also
- // wait for it to be removed.
- // To fix this we could generate namespace names.
- // This only happens when the namespace provided does not exist.
- //
- // wait up to 120 seconds for the namespace to be removed.
- logf("Deleting Namespace: %s", ns)
- for i := 0; i < 120; i++ {
- time.Sleep(1 * time.Second)
- if _, err := clients.Kube.Kube.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}); err != nil && errors.IsNotFound(err) {
- logf("Namespace has been deleted")
- // the namespace is gone.
- break
- }
- }
- }
- }
+// DeleteNameSpace deletes the namespace that has the given name.
+func DeleteNameSpace(clients *test.Clients, namespace string) error {
+ _, err := clients.Kube.Kube.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
+ if err == nil || !errors.IsNotFound(err) {
+ return clients.Kube.Kube.CoreV1().Namespaces().Delete(namespace, nil)
}
- return ns, shutdown
+ return err
}
diff --git a/test/e2e/event_transformation_test.go b/test/e2e/event_transformation_test.go
index 095dffd0779..6c7b0e7599d 100644
--- a/test/e2e/event_transformation_test.go
+++ b/test/e2e/event_transformation_test.go
@@ -39,28 +39,18 @@ EventSource ---> Channel ---> Subscription ---> Channel ---> Subscription ---->
-----------> Service(Transformation)
*/
func TestEventTransformation(t *testing.T) {
- if test.EventingFlags.Provisioner == "" {
- t.Fatal("ClusterChannelProvisioner must be set to a non-empty string. Either do not specify --clusterChannelProvisioner or set to something other than the empty string")
- }
-
senderName := "e2e-eventtransformation-sender"
msgPostfix := string(uuid.NewUUID())
channelNames := [2]string{"e2e-eventtransformation1", "e2e-eventtransformation2"}
// subscriptionNames1 corresponds to Subscriptions on channelNames[0]
- subscriptionNames1 := []string{"e2e-eventtransformation-subs11"}
+ subscriptionNames1 := []string{"e2e-eventtransformation-subs11", "e2e-eventtransformation-subs12"}
// subscriptionNames2 corresponds to Subscriptions on channelNames[1]
subscriptionNames2 := []string{"e2e-eventtransformation-subs21", "e2e-eventtransformation-subs22"}
transformationPodName := "e2e-eventtransformation-transformation-pod"
loggerPodName := "e2e-eventtransformation-logger-pod"
- clients, cleaner := Setup(t, t.Logf)
- // verify namespace
- ns, cleanupNS := CreateNamespaceIfNeeded(t, clients, t.Logf)
- defer cleanupNS()
-
- // TearDown() needs to be deferred after cleanupNS(). Otherwise the namespace is deleted and all
- // resources in it. So when TearDown() runs, it spews a lot of not found errors.
- defer TearDown(clients, cleaner, t.Logf)
+ clients, ns, provisioner, cleaner := Setup(t, true, t.Logf)
+ defer TearDown(clients, ns, cleaner, t.Logf)
// create subscriberPods and expose them as services
t.Logf("creating subscriber pods")
@@ -70,7 +60,7 @@ func TestEventTransformation(t *testing.T) {
transformationPodSelector := map[string]string{"e2etest": string(uuid.NewUUID())}
transformationPod := test.EventTransformationPod(transformationPodName, ns, transformationPodSelector, msgPostfix)
transformationSvc := test.Service(transformationPodName, ns, transformationPodSelector)
- transformationPod, err := CreatePodAndServiceReady(clients, transformationPod, transformationSvc, ns, t.Logf, cleaner)
+ transformationPod, err := CreatePodAndServiceReady(clients, transformationPod, transformationSvc, t.Logf, cleaner)
if err != nil {
t.Fatalf("Failed to create transformation pod and service, and get them ready: %v", err)
}
@@ -79,7 +69,7 @@ func TestEventTransformation(t *testing.T) {
loggerPodSelector := map[string]string{"e2etest": string(uuid.NewUUID())}
loggerPod := test.EventLoggerPod(loggerPodName, ns, loggerPodSelector)
loggerSvc := test.Service(loggerPodName, ns, loggerPodSelector)
- loggerPod, err = CreatePodAndServiceReady(clients, loggerPod, loggerSvc, ns, t.Logf, cleaner)
+ loggerPod, err = CreatePodAndServiceReady(clients, loggerPod, loggerSvc, t.Logf, cleaner)
if err != nil {
t.Fatalf("Failed to create logger pod and service, and get them ready: %v", err)
}
@@ -89,9 +79,7 @@ func TestEventTransformation(t *testing.T) {
t.Logf("Creating Channel and Subscription")
channels := make([]*v1alpha1.Channel, 0)
for _, channelName := range channelNames {
- channel := test.Channel(channelName, ns, test.ClusterChannelProvisioner(test.EventingFlags.Provisioner))
- t.Logf("channel: %#v", channel)
-
+ channel := test.Channel(channelName, ns, test.ClusterChannelProvisioner(provisioner))
channels = append(channels, channel)
}
@@ -100,18 +88,16 @@ func TestEventTransformation(t *testing.T) {
// create subscriptions that subscribe the first channel, use the transformation service to transform the events and then forward the transformed events to the second channel
for _, subscriptionName := range subscriptionNames1 {
sub := test.Subscription(subscriptionName, ns, test.ChannelRef(channelNames[0]), test.SubscriberSpecForService(transformationPodName), test.ReplyStrategyForChannel(channelNames[1]))
- t.Logf("sub: %#v", sub)
subs = append(subs, sub)
}
// create subscriptions that subscribe the second channel, and call the logging service
for _, subscriptionName := range subscriptionNames2 {
sub := test.Subscription(subscriptionName, ns, test.ChannelRef(channelNames[1]), test.SubscriberSpecForService(loggerPodName), nil)
- t.Logf("sub: %#v", sub)
subs = append(subs, sub)
}
// wait for all channels and subscriptions to become ready
- if err := WithChannelsAndSubscriptionsReady(clients, &channels, &subs, t.Logf, cleaner); err != nil {
+ if err := WithChannelsAndSubscriptionsReady(clients, ns, &channels, &subs, t.Logf, cleaner); err != nil {
t.Fatalf("The Channels or Subscription were not marked as Ready: %v", err)
}
@@ -123,7 +109,7 @@ func TestEventTransformation(t *testing.T) {
Data: fmt.Sprintf(`{"msg":%q}`, body),
Encoding: test.CloudEventDefaultEncoding,
}
- if err := SendFakeEventToChannel(clients, event, channels[0], ns, t.Logf, cleaner); err != nil {
+ if err := SendFakeEventToChannel(clients, event, channels[0], t.Logf, cleaner); err != nil {
t.Fatalf("Failed to send fake CloudEvent to the channel %q", channels[0].Name)
}
diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go
new file mode 100644
index 00000000000..3a0f513616f
--- /dev/null
+++ b/test/e2e/main_test.go
@@ -0,0 +1,56 @@
+// +build e2e
+
+/*
+Copyright 2019 The Knative Authors
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+
+ "github.com/knative/eventing/test"
+)
+
+// channelTestMap indicates which test cases we want to run for a given CCP.
+var channelTestMap = map[string][]func(t *testing.T){
+ "in-memory-channel": []func(t *testing.T){
+ TestSingleBinaryEvent,
+ TestSingleStructuredEvent,
+ TestEventTransformation,
+ TestChannelChain,
+ TestDefaultBrokerWithManyTriggers,
+ },
+}
+
+func TestMain(t *testing.T) {
+ // if the main test is not indicated to be run, skip it directly.
+ if !test.EventingFlags.RunFromMain {
+ t.Skip()
+ }
+
+ var provisioners = test.EventingFlags.Provisioners
+ for _, provisioner := range provisioners {
+ // set the current provisioner that is used to run the test cases
+ ClusterChannelProvisionerToTest.Set(provisioner)
+ for _, testFunc := range channelTestMap[provisioner] {
+ funcName := runtime.FuncForPC(reflect.ValueOf(testFunc).Pointer()).Name()
+ baseFuncName := GetBaseFuncName(funcName)
+ t.Logf("Running %q with %q ClusterChannelProvisioner", baseFuncName, provisioner)
+ t.Run(baseFuncName, testFunc)
+ }
+ }
+}
diff --git a/test/e2e/single_event_test.go b/test/e2e/single_event_test.go
index 8d1cc907b36..5303baf37a9 100644
--- a/test/e2e/single_event_test.go
+++ b/test/e2e/single_event_test.go
@@ -28,63 +28,48 @@ import (
)
func TestSingleBinaryEvent(t *testing.T) {
- SingleEvent(t, test.CloudEventEncodingBinary)
+ singleEvent(t, test.CloudEventEncodingBinary)
}
func TestSingleStructuredEvent(t *testing.T) {
- SingleEvent(t, test.CloudEventEncodingStructured)
+ singleEvent(t, test.CloudEventEncodingStructured)
}
/*
-SingleEvent tests the following scenario:
+singleEvent tests the following scenario:
EventSource ---> Channel ---> Subscription ---> Service(Logger)
*/
-func SingleEvent(t *testing.T, encoding string) {
- if test.EventingFlags.Provisioner == "" {
- t.Fatal("ClusterChannelProvisioner must be set to a non-empty string. Either do not specify --clusterChannelProvisioner or set to something other than the empty string")
- }
-
- const (
- channelName = "e2e-singleevent"
- senderName = "e2e-singleevent-sender"
- subscriptionName = "e2e-singleevent-subscription"
- loggerPodName = "e2e-singleevent-logger-pod"
- )
+func singleEvent(t *testing.T, encoding string) {
+ channelName := "e2e-singleevent-" + encoding
+ senderName := "e2e-singleevent-sender-" + encoding
+ subscriptionName := "e2e-singleevent-subscription-" + encoding
+ loggerPodName := "e2e-singleevent-logger-pod-" + encoding
- clients, cleaner := Setup(t, t.Logf)
- // verify namespace
- ns, cleanupNS := CreateNamespaceIfNeeded(t, clients, t.Logf)
- defer cleanupNS()
-
- // TearDown() needs to be deferred after cleanupNS(). Otherwise the namespace is deleted and all
- // resources in it. So when TearDown() runs, it spews a lot of not found errors.
- defer TearDown(clients, cleaner, t.Logf)
+ clients, ns, provisioner, cleaner := Setup(t, true, t.Logf)
+ defer TearDown(clients, ns, cleaner, t.Logf)
// create logger pod
t.Logf("creating logger pod")
selector := map[string]string{"e2etest": string(uuid.NewUUID())}
loggerPod := test.EventLoggerPod(loggerPodName, ns, selector)
loggerSvc := test.Service(loggerPodName, ns, selector)
- loggerPod, err := CreatePodAndServiceReady(clients, loggerPod, loggerSvc, ns, t.Logf, cleaner)
+ loggerPod, err := CreatePodAndServiceReady(clients, loggerPod, loggerSvc, t.Logf, cleaner)
if err != nil {
t.Fatalf("Failed to create logger pod and service, and get them ready: %v", err)
}
- // create channel
-
+ // create channel and subscription
t.Logf("Creating Channel and Subscription")
- channel := test.Channel(channelName, ns, test.ClusterChannelProvisioner(test.EventingFlags.Provisioner))
- t.Logf("channel: %#v", channel)
+ channel := test.Channel(channelName, ns, test.ClusterChannelProvisioner(provisioner))
sub := test.Subscription(subscriptionName, ns, test.ChannelRef(channelName), test.SubscriberSpecForService(loggerPodName), nil)
- t.Logf("sub: %#v", sub)
- if err := WithChannelsAndSubscriptionsReady(clients, &[]*v1alpha1.Channel{channel}, &[]*v1alpha1.Subscription{sub}, t.Logf, cleaner); err != nil {
+ if err := WithChannelsAndSubscriptionsReady(clients, ns, &[]*v1alpha1.Channel{channel}, &[]*v1alpha1.Subscription{sub}, t.Logf, cleaner); err != nil {
t.Fatalf("The Channel or Subscription were not marked as Ready: %v", err)
}
- // send fake CloudEvent to the first channel
+ // send fake CloudEvent to the channel
body := fmt.Sprintf("TestSingleEvent %s", uuid.NewUUID())
event := &test.CloudEvent{
Source: senderName,
@@ -92,13 +77,17 @@ func SingleEvent(t *testing.T, encoding string) {
Data: fmt.Sprintf(`{"msg":%q}`, body),
Encoding: encoding,
}
- if err := SendFakeEventToChannel(clients, event, channel, ns, t.Logf, cleaner); err != nil {
+ if err := SendFakeEventToChannel(clients, event, channel, t.Logf, cleaner); err != nil {
t.Fatalf("Failed to send fake CloudEvent to the channel %q", channel.Name)
}
if err := pkgTest.WaitForLogContent(clients.Kube, loggerPodName, loggerPod.Spec.Containers[0].Name, ns, body); err != nil {
- clients.Kube.PodLogs(senderName, "sendevent", ns)
- clients.Kube.PodLogs(senderName, "istio-proxy", ns)
+ if logs, err := clients.Kube.PodLogs(senderName, "sendevent", ns); err != nil {
+ t.Logf("Logs for sendevent container of the sender pod:\n %s", string(logs))
+ }
+ if logs, err := clients.Kube.PodLogs(senderName, "istio-proxy", ns); err != nil {
+ t.Logf("Logs for istio-proxy container of the sender pod:\n %s", string(logs))
+ }
t.Fatalf("String %q not found in logs of logger pod %q: %v", body, loggerPodName, err)
}
}
diff --git a/test/e2e_flags.go b/test/e2e_flags.go
index d61247ccfe5..78597a42d3b 100644
--- a/test/e2e_flags.go
+++ b/test/e2e_flags.go
@@ -19,30 +19,90 @@ limitations under the License.
package test
import (
+ "context"
"flag"
+ "fmt"
+ "strings"
+ "github.com/knative/pkg/logging"
pkgTest "github.com/knative/pkg/test"
- "github.com/knative/pkg/test/logging"
+ testLogging "github.com/knative/pkg/test/logging"
)
-// EventingFlags holds the command line flags specific to knative/eventing
+const (
+ // E2ETestNamespacePrefix is the namespace prefix used for running all e2e tests.
+ E2ETestNamespacePrefix = "e2e-ns"
+ // DefaultClusterChannelProvisioner is the default ClusterChannelProvisioner we will run tests against.
+ DefaultClusterChannelProvisioner = "in-memory-channel"
+ // DefaultBrokerName is the name of the Broker that is automatically created after the current namespace is labeled.
+ DefaultBrokerName = "default"
+)
+
+var logger = logging.FromContext(context.Background()).Named("eventing-e2e-testing")
+
+// validProvisioners is a list of provisioners that Eventing currently support.
+var validProvisioners = []string{DefaultClusterChannelProvisioner}
+
+func isValid(provisioner string) bool {
+ for i := range validProvisioners {
+ if provisioner == validProvisioners[i] {
+ return true
+ }
+ }
+ return false
+}
+
+// EventingFlags holds the command line flags specific to knative/eventing.
var EventingFlags = initializeEventingFlags()
-// EventingEnvironmentFlags holds the e2e flags needed only by the eventing repo
+// Provisioners holds the ClusterChannelProvisioners we want to run test against.
+type Provisioners []string
+
+func (ps *Provisioners) String() string {
+ return fmt.Sprint(*ps)
+}
+
+// Set converts the input string to Provisioners.
+// The default CCP we will test against is in-memory-channel.
+func (ps *Provisioners) Set(value string) error {
+ for _, provisioner := range strings.Split(value, ",") {
+ provisioner := strings.TrimSpace(provisioner)
+ if !isValid(provisioner) {
+ logger.Fatalf("The given provisioner %q is not supported, tests cannot be run.\n", provisioner)
+ }
+
+ *ps = append(*ps, provisioner)
+ }
+ return nil
+}
+
+// EventingEnvironmentFlags holds the e2e flags needed only by the eventing repo.
type EventingEnvironmentFlags struct {
- Provisioner string // The name of the Channel's ClusterChannelProvisioner
+ Provisioners
+ RunFromMain bool
}
func initializeEventingFlags() *EventingEnvironmentFlags {
- var f EventingEnvironmentFlags
+ f := EventingEnvironmentFlags{}
- flag.StringVar(&f.Provisioner, "clusterChannelProvisioner", "in-memory-channel", "The name of the Channel's clusterChannelProvisioner. Only the in-memory-channel is installed by the tests, anything else must be installed before the tests are run.")
+ flag.Var(&f.Provisioners, "clusterChannelProvisioners", "The names of the Channel's clusterChannelProvisioners, which are separated by comma.")
+ flag.BoolVar(&f.RunFromMain, "runFromMain", false, "If runFromMain is set to false, the TestMain will be skipped when we run tests.")
flag.Parse()
- logging.InitializeLogger(pkgTest.Flags.LogVerbose)
+ // If no provisioner is passed through the flag, initialize it as the DefaultClusterChannelProvisioner.
+ if f.Provisioners == nil || len(f.Provisioners) == 0 {
+ f.Provisioners = []string{DefaultClusterChannelProvisioner}
+ }
+
+ // If we are not running from TestMain, only one single provisioner can be specified.
+ if !f.RunFromMain && len(f.Provisioners) != 1 {
+ logger.Fatal("Only one single provisioner can be specified if you are not running from TestMain.")
+ }
+
+ testLogging.InitializeLogger(pkgTest.Flags.LogVerbose)
if pkgTest.Flags.EmitMetrics {
- logging.InitializeMetricExporter("eventing")
+ testLogging.InitializeMetricExporter("eventing")
}
return &f
From c1c40700d4c809d581ef0c16d5a10a97c5f1e7b6 Mon Sep 17 00:00:00 2001
From: Adam Harwayne
Date: Wed, 17 Apr 2019 14:09:26 -0700
Subject: [PATCH 29/76] Add a Deprecated condition on 'in-memory-channel'
Channels. (#1062)
---
.../eventing/v1alpha1/channel_lifecycle.go | 29 ++++++++++-
.../v1alpha1/channel_lifecycle_test.go | 49 +++++++++++++++++++
.../inmemory/channel/reconcile.go | 47 ++++++------------
.../inmemory/channel/reconcile_test.go | 17 +++----
4 files changed, 99 insertions(+), 43 deletions(-)
diff --git a/pkg/apis/eventing/v1alpha1/channel_lifecycle.go b/pkg/apis/eventing/v1alpha1/channel_lifecycle.go
index 7ed67627562..117f7268277 100644
--- a/pkg/apis/eventing/v1alpha1/channel_lifecycle.go
+++ b/pkg/apis/eventing/v1alpha1/channel_lifecycle.go
@@ -16,7 +16,14 @@ limitations under the License.
package v1alpha1
-import duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+import (
+ "time"
+
+ "github.com/knative/pkg/apis"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
var chanCondSet = duckv1alpha1.NewLivingConditionSet(ChannelConditionProvisioned, ChannelConditionAddressable, ChannelConditionProvisionerInstalled)
@@ -77,6 +84,26 @@ func (cs *ChannelStatus) MarkProvisionerNotInstalled(reason, messageFormat strin
chanCondSet.Manage(cs).MarkFalse(ChannelConditionProvisionerInstalled, reason, messageFormat, messageA...)
}
+// MarkDeprecated adds a warning condition that this Channel is deprecated and will stop working in
+// the future. Note that this does not affect the Ready condition.
+func (cs *ChannelStatus) MarkDeprecated(reason, msg string) {
+ dc := duckv1alpha1.Condition{
+ Type: "Deprecated",
+ Reason: reason,
+ Status: v1.ConditionTrue,
+ Severity: duckv1alpha1.ConditionSeverityWarning,
+ Message: msg,
+ LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Now())},
+ }
+ for i, c := range cs.Conditions {
+ if c.Type == dc.Type {
+ cs.Conditions[i] = dc
+ return
+ }
+ }
+ cs.Conditions = append(cs.Conditions, dc)
+}
+
// SetAddress makes this Channel addressable by setting the hostname. It also
// sets the ChannelConditionAddressable to true.
func (cs *ChannelStatus) SetAddress(hostname string) {
diff --git a/pkg/apis/eventing/v1alpha1/channel_lifecycle_test.go b/pkg/apis/eventing/v1alpha1/channel_lifecycle_test.go
index 6c89da501d7..49fce0361c9 100644
--- a/pkg/apis/eventing/v1alpha1/channel_lifecycle_test.go
+++ b/pkg/apis/eventing/v1alpha1/channel_lifecycle_test.go
@@ -23,6 +23,7 @@ import (
"github.com/google/go-cmp/cmp/cmpopts"
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
)
var condReady = duckv1alpha1.Condition{
@@ -39,6 +40,8 @@ var ignoreAllButTypeAndStatus = cmpopts.IgnoreFields(
duckv1alpha1.Condition{},
"LastTransitionTime", "Message", "Reason", "Severity")
+var ignoreLastTransitionTime = cmpopts.IgnoreFields(duckv1alpha1.Condition{}, "LastTransitionTime")
+
func TestChannelGetCondition(t *testing.T) {
tests := []struct {
name string
@@ -188,12 +191,19 @@ func TestChannelIsReady(t *testing.T) {
name string
markProvisioned bool
setAddress bool
+ markDeprecated bool
wantReady bool
}{{
name: "all happy",
markProvisioned: true,
setAddress: true,
wantReady: true,
+ }, {
+ name: "deprecated does not affect happy",
+ markProvisioned: true,
+ setAddress: true,
+ markDeprecated: true,
+ wantReady: true,
}, {
name: "one sad",
markProvisioned: false,
@@ -212,6 +222,9 @@ func TestChannelIsReady(t *testing.T) {
if test.setAddress {
cs.SetAddress("foo.bar")
}
+ if test.markDeprecated {
+ cs.MarkDeprecated("TestReason", "Test Message")
+ }
got := cs.IsReady()
if test.wantReady != got {
t.Errorf("unexpected readiness: want %v, got %v", test.wantReady, got)
@@ -270,3 +283,39 @@ func TestChannelStatus_SetAddressable(t *testing.T) {
})
}
}
+
+func TestChannelStatus_MarkDeprecated(t *testing.T) {
+ testCases := map[string]struct {
+ alreadyPresent bool
+ }{
+ "not present": {
+ alreadyPresent: false,
+ },
+ "already present": {
+ alreadyPresent: true,
+ },
+ }
+ for n, tc := range testCases {
+ t.Run(n, func(t *testing.T) {
+ cs := &ChannelStatus{}
+ if tc.alreadyPresent {
+ cs.MarkDeprecated("AlreadyPresent", "Already present.")
+ }
+ cs.MarkDeprecated("Test", "Test Message")
+ if len(cs.Conditions) != 1 {
+ t.Fatalf("Incorrect number of conditions. Expected 1, actually %v", cs)
+ }
+
+ expected := duckv1alpha1.Condition{
+ Type: "Deprecated",
+ Reason: "Test",
+ Status: v1.ConditionTrue,
+ Severity: duckv1alpha1.ConditionSeverityWarning,
+ Message: "Test Message",
+ }
+ if diff := cmp.Diff(expected, cs.Conditions[0], ignoreLastTransitionTime); diff != "" {
+ t.Errorf("Condition incorrect (-want +got): %s", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/provisioners/inmemory/channel/reconcile.go b/pkg/provisioners/inmemory/channel/reconcile.go
index 5d5f8392a82..e35dc1adea9 100644
--- a/pkg/provisioners/inmemory/channel/reconcile.go
+++ b/pkg/provisioners/inmemory/channel/reconcile.go
@@ -22,7 +22,6 @@ import (
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -39,8 +38,6 @@ const (
channelReconciled = "ChannelReconciled"
channelUpdateStatusFailed = "ChannelUpdateStatusFailed"
k8sServiceCreateFailed = "K8sServiceCreateFailed"
- // TODO after in-memory-channel is retired, asyncProvisionerName should be removed
- defaultProvisionerName = "in-memory-channel"
)
type reconciler struct {
@@ -85,10 +82,15 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
}
logger.Info("Reconciling Channel")
- // Finalizer needs to be removed (even though no finalizers are added) to maintain backwards compatibility
- // with v0.5 in which a finalzier was added. Or else channels will not get deleted after upgrading to 0.6+
+ // Finalizer needs to be removed (even though no finalizers are added) to maintain backwards
+ // compatibility with v0.5 in which a finalizer was added. Or else channels will not get deleted
+ // after upgrading to 0.6+.
if result := util.RemoveFinalizer(c, finalizerName); result == util.FinalizerRemoved {
- r.client.Update(ctx, c)
+ err = r.client.Update(ctx, c)
+ if err != nil {
+ logger.Info("Failed to remove finalizer", zap.Error(err))
+ return reconcile.Result{}, err
+ }
logger.Info("Channel reconciled. Finalizer Removed")
r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q. Finalizer removed.", c.Name)
return reconcile.Result{Requeue: true}, nil
@@ -126,6 +128,10 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
c.Status.InitializeConditions()
+ if usesDeprecatedProvisioner(c) {
+ c.Status.MarkDeprecated("ClusterChannelProvisionerDeprecated", "The `in-memory-channel` ClusterChannelProvisioner is deprecated and will be removed in 0.7. Recommended replacement is `in-memory`.")
+ }
+
// We are syncing K8s Service to talk to this Channel.
svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c))
if err != nil {
@@ -140,29 +146,8 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
return nil
}
-func (r *reconciler) listAllChannels(ctx context.Context) ([]eventingv1alpha1.Channel, error) {
- channels := make([]eventingv1alpha1.Channel, 0)
-
- opts := &client.ListOptions{
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
- for {
- cl := &eventingv1alpha1.ChannelList{}
- if err := r.client.List(ctx, opts, cl); err != nil {
- return nil, err
- }
-
- for _, c := range cl.Items {
- if r.shouldReconcile(&c) {
- channels = append(channels, c)
- }
- }
- if cl.Continue != "" {
- opts.Raw.Continue = cl.Continue
- } else {
- return channels, nil
- }
- }
+func usesDeprecatedProvisioner(c *eventingv1alpha1.Channel) bool {
+ return c.Spec.Provisioner != nil &&
+ c.Spec.Provisioner.Namespace == "" &&
+ c.Spec.Provisioner.Name == "in-memory-channel"
}
diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go
index 0b221854a41..76d75f3c7aa 100644
--- a/pkg/provisioners/inmemory/channel/reconcile_test.go
+++ b/pkg/provisioners/inmemory/channel/reconcile_test.go
@@ -189,9 +189,9 @@ var (
func init() {
// Add types to scheme.
- eventingv1alpha1.AddToScheme(scheme.Scheme)
- corev1.AddToScheme(scheme.Scheme)
- istiov1alpha3.AddToScheme(scheme.Scheme)
+ _ = eventingv1alpha1.AddToScheme(scheme.Scheme)
+ _ = corev1.AddToScheme(scheme.Scheme)
+ _ = istiov1alpha3.AddToScheme(scheme.Scheme)
}
func TestInjectClient(t *testing.T) {
@@ -267,9 +267,6 @@ func TestReconcile(t *testing.T) {
Mocks: controllertesting.Mocks{
MockLists: errorListingK8sService(),
},
- WantPresent: []runtime.Object{
- makeChannel(),
- },
WantErrMsg: testErrorMessage,
WantEvent: []corev1.Event{
events[k8sServiceCreateFailed],
@@ -283,11 +280,9 @@ func TestReconcile(t *testing.T) {
Mocks: controllertesting.Mocks{
MockCreates: errorCreatingK8sService(),
},
- WantPresent: []runtime.Object{
- // TODO: This should have a useful error message saying that the K8s Service failed.
- makeChannel(),
- },
- WantErrMsg: testErrorMessage,
+ // TODO: This should have a useful error message saying that the K8s Service failed.
+ WantPresent: []runtime.Object{},
+ WantErrMsg: testErrorMessage,
WantEvent: []corev1.Event{
events[k8sServiceCreateFailed],
},
From f53dc83e8363d8b2d10f3ff51d46bd292ce3f60c Mon Sep 17 00:00:00 2001
From: Scott Nichols <32305648+n3wscott@users.noreply.github.com>
Date: Wed, 17 Apr 2019 16:01:26 -0700
Subject: [PATCH 30/76] Take advantage of the package alias from cloudevents.
(#1054)
* Take advantage of the package alias from cloudevents.
* use error.
* Fix e2e test images.
---
Gopkg.lock | 8 +-
cmd/broker/ingress/main.go | 18 +--
cmd/pong/main.go | 22 ++--
cmd/sendevent/main.go | 35 +++---
pkg/broker/context.go | 11 +-
pkg/broker/receiver.go | 19 +--
pkg/broker/receiver_test.go | 25 ++--
pkg/broker/ttl.go | 12 +-
test/test_images/logevents/main.go | 17 +--
test/test_images/sendevent/main.go | 42 ++++---
test/test_images/transformevents/main.go | 5 +-
vendor/github.com/cloudevents/sdk-go/alias.go | 111 ++++++++++++++++++
12 files changed, 213 insertions(+), 112 deletions(-)
create mode 100644 vendor/github.com/cloudevents/sdk-go/alias.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 60e009ffec6..4559393000b 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -58,9 +58,10 @@
version = "v2.1.15"
[[projects]]
- digest = "1:fa1c3e6de410f74eb102fd927c838a66feb5b825fdf63d0e82cbbfd1a16db8a1"
+ digest = "1:13e0fa926561dc81e7229d65f5c0aa2cc74b4020d2d3da416fff4ee0946545da"
name = "github.com/cloudevents/sdk-go"
packages = [
+ ".",
"pkg/cloudevents",
"pkg/cloudevents/client",
"pkg/cloudevents/codec",
@@ -1239,11 +1240,8 @@
"cloud.google.com/go/pubsub",
"github.com/Shopify/sarama",
"github.com/bsm/sarama-cluster",
- "github.com/cloudevents/sdk-go/pkg/cloudevents",
- "github.com/cloudevents/sdk-go/pkg/cloudevents/client",
- "github.com/cloudevents/sdk-go/pkg/cloudevents/context",
+ "github.com/cloudevents/sdk-go",
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http",
- "github.com/cloudevents/sdk-go/pkg/cloudevents/types",
"github.com/fsnotify/fsnotify",
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go
index 5317d44320c..672516b69f4 100644
--- a/cmd/broker/ingress/main.go
+++ b/cmd/broker/ingress/main.go
@@ -29,9 +29,7 @@ import (
"sync"
"time"
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
- ceclient "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
- cehttp "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
+ "github.com/cloudevents/sdk-go"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/broker"
"github.com/knative/eventing/pkg/provisioners"
@@ -86,7 +84,7 @@ func main() {
Path: "/",
}
- ceClient, err := ceclient.NewDefault()
+ ceClient, err := cloudevents.NewDefaultClient()
if err != nil {
logger.Fatal("Unable to create CE client", zap.Error(err))
}
@@ -158,7 +156,7 @@ func getRequiredEnv(envKey string) string {
type handler struct {
logger *zap.Logger
- ceClient ceclient.Client
+ ceClient cloudevents.Client
channelURI *url.URL
brokerName string
}
@@ -192,7 +190,7 @@ func (h *handler) Start(stopCh <-chan struct{}) error {
}
func (h *handler) serveHTTP(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error {
- tctx := cehttp.TransportContextFrom(ctx)
+ tctx := cloudevents.HTTPTransportContextFrom(ctx)
if tctx.Method != http.MethodPost {
resp.Status = http.StatusMethodNotAllowed
return nil
@@ -221,7 +219,7 @@ func (h *handler) serveHTTP(ctx context.Context, event cloudevents.Event, resp *
return h.sendEvent(ctx, tctx, event)
}
-func (h *handler) sendEvent(ctx context.Context, tctx cehttp.TransportContext, event cloudevents.Event) error {
+func (h *handler) sendEvent(ctx context.Context, tctx cloudevents.HTTPTransportContext, event cloudevents.Event) error {
sendingCTX := broker.SendingContext(ctx, tctx, h.channelURI)
startTS := time.Now()
@@ -247,7 +245,11 @@ func (h *handler) decrementTTL(event *cloudevents.Event) bool {
return false
}
- event.Context = broker.SetTTL(event.Context, ttl)
+ var err error
+ event.Context, err = broker.SetTTL(event.Context, ttl)
+ if err != nil {
+ h.logger.Error("failed to set TTL", zap.Error(err))
+ }
return true
}
diff --git a/cmd/pong/main.go b/cmd/pong/main.go
index 9288e551bfb..f691d065bb7 100644
--- a/cmd/pong/main.go
+++ b/cmd/pong/main.go
@@ -22,9 +22,7 @@ import (
"flag"
"log"
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
+ "github.com/cloudevents/sdk-go"
"github.com/google/uuid"
)
@@ -43,20 +41,22 @@ func init() {
func receive(event cloudevents.Event, resp *cloudevents.EventResponse) {
log.Printf("Received CloudEvent,\n%s", event)
if event.Type() == pingType {
- resp.RespondWith(200, &cloudevents.Event{
- Context: cloudevents.EventContextV02{
- Type: pongType,
- Source: *types.ParseURLRef("github.com/knative/eventing/cmd/pong/" + id),
- }.AsV02(),
- Data: event.Data,
- })
+ pong := cloudevents.NewEvent()
+ pong.SetType(pongType)
+ pong.SetSource("github.com/knative/eventing/cmd/pong/" + id)
+ if err := pong.SetData(event.Data); err != nil {
+ log.Printf("failed to set data on pong: %s", err)
+ resp.Error(400, "bad data")
+ return
+ }
+ resp.RespondWith(200, &pong)
}
}
func main() {
flag.Parse()
- ce, err := client.NewDefault()
+ ce, err := cloudevents.NewDefaultClient()
if err != nil {
log.Fatalf("failed to create CloudEvent client, %s", err)
}
diff --git a/cmd/sendevent/main.go b/cmd/sendevent/main.go
index f419734f054..53b5a575175 100644
--- a/cmd/sendevent/main.go
+++ b/cmd/sendevent/main.go
@@ -25,12 +25,8 @@ import (
"log"
"os"
+ "github.com/cloudevents/sdk-go"
"github.com/knative/eventing/pkg/utils"
-
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
)
var (
@@ -67,31 +63,34 @@ func main() {
source = fmt.Sprintf("http://%s", utils.GetClusterDomainName())
}
- t, err := http.New(
- http.WithTarget(target),
- http.WithBinaryEncoding(),
+ t, err := cloudevents.NewHTTPTransport(
+ cloudevents.WithTarget(target),
+ cloudevents.WithBinaryEncoding(),
)
if err != nil {
log.Printf("failed to create transport, %v", err)
os.Exit(1)
}
- c, err := client.New(t,
- client.WithTimeNow(),
- client.WithUUIDs(),
+ c, err := cloudevents.NewClient(t,
+ cloudevents.WithTimeNow(),
+ cloudevents.WithUUIDs(),
)
if err != nil {
log.Printf("failed to create client, %v", err)
os.Exit(1)
}
- event := cloudevents.Event{
- Context: cloudevents.EventContextV02{
- ID: eventID,
- Type: eventType,
- Source: *types.ParseURLRef(source),
- }.AsV02(),
- Data: untyped,
+ event := cloudevents.NewEvent()
+ if eventID != "" {
+ event.SetID(eventID)
+ }
+ event.SetType(eventType)
+ event.SetSource(source)
+ if err := event.SetData(untyped); err != nil {
+ log.Printf("failed to set data, %v", err)
+ os.Exit(1)
}
+
if resp, err := c.Send(context.Background(), event); err != nil {
fmt.Printf("Failed to send event to %s: %s\n", target, err)
os.Exit(1)
diff --git a/pkg/broker/context.go b/pkg/broker/context.go
index 7900eac6221..a06be9244d4 100644
--- a/pkg/broker/context.go
+++ b/pkg/broker/context.go
@@ -22,8 +22,7 @@ import (
"net/url"
"strings"
- cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context"
- cehttp "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
+ "github.com/cloudevents/sdk-go"
"k8s.io/apimachinery/pkg/util/sets"
)
@@ -48,20 +47,20 @@ var (
// SendingContext creates the context to use when sending a Cloud Event with ceclient.Client. It
// sets the target and attaches a filtered set of headers from the initial request.
-func SendingContext(ctx context.Context, tctx cehttp.TransportContext, targetURI *url.URL) context.Context {
- sendingCTX := cecontext.WithTarget(ctx, targetURI.String())
+func SendingContext(ctx context.Context, tctx cloudevents.HTTPTransportContext, targetURI *url.URL) context.Context {
+ sendingCTX := cloudevents.ContextWithTarget(ctx, targetURI.String())
h := extractPassThroughHeaders(tctx)
for n, v := range h {
for _, iv := range v {
- sendingCTX = cehttp.ContextWithHeader(sendingCTX, n, iv)
+ sendingCTX = cloudevents.ContextWithHeader(sendingCTX, n, iv)
}
}
return sendingCTX
}
-func extractPassThroughHeaders(tctx cehttp.TransportContext) http.Header {
+func extractPassThroughHeaders(tctx cloudevents.HTTPTransportContext) http.Header {
h := http.Header{}
for n, v := range tctx.Header {
diff --git a/pkg/broker/receiver.go b/pkg/broker/receiver.go
index b9797a78277..d0fb081ae98 100644
--- a/pkg/broker/receiver.go
+++ b/pkg/broker/receiver.go
@@ -23,9 +23,7 @@ import (
"net/url"
"time"
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
- ceclient "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
- cehttp "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
+ "github.com/cloudevents/sdk-go"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path"
"go.uber.org/zap"
@@ -41,13 +39,13 @@ const (
type Receiver struct {
logger *zap.Logger
client client.Client
- ceClient ceclient.Client
+ ceClient cloudevents.Client
}
// New creates a new Receiver and its associated MessageReceiver. The caller is responsible for
// Start()ing the returned MessageReceiver.
func New(logger *zap.Logger, client client.Client) (*Receiver, error) {
- ceClient, err := ceclient.NewDefault()
+ ceClient, err := cloudevents.NewDefaultClient()
if err != nil {
return nil, err
}
@@ -113,7 +111,7 @@ func (r *Receiver) Start(stopCh <-chan struct{}) error {
}
func (r *Receiver) serveHTTP(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error {
- tctx := cehttp.TransportContextFrom(ctx)
+ tctx := cloudevents.HTTPTransportContextFrom(ctx)
if tctx.Method != http.MethodPost {
resp.Status = http.StatusMethodNotAllowed
return nil
@@ -155,9 +153,12 @@ func (r *Receiver) serveHTTP(ctx context.Context, event cloudevents.Event, resp
}
// Reattach the TTL (with the same value) to the response event before sending it to the Broker.
- responseEvent.Context = SetTTL(responseEvent.Context, ttl)
+ responseEvent.Context, err = SetTTL(responseEvent.Context, ttl)
+ if err != nil {
+ return err
+ }
resp.Event = responseEvent
- resp.Context = &cehttp.TransportResponseContext{
+ resp.Context = &cloudevents.HTTPTransportResponseContext{
Header: extractPassThroughHeaders(tctx),
}
@@ -165,7 +166,7 @@ func (r *Receiver) serveHTTP(ctx context.Context, event cloudevents.Event, resp
}
// sendEvent sends an event to a subscriber if the trigger filter passes.
-func (r *Receiver) sendEvent(ctx context.Context, tctx cehttp.TransportContext, trigger types.NamespacedName, event *cloudevents.Event) (*cloudevents.Event, error) {
+func (r *Receiver) sendEvent(ctx context.Context, tctx cloudevents.HTTPTransportContext, trigger types.NamespacedName, event *cloudevents.Event) (*cloudevents.Event, error) {
t, err := r.getTrigger(ctx, trigger)
if err != nil {
r.logger.Info("Unable to get the Trigger", zap.Error(err), zap.Any("triggerRef", trigger))
diff --git a/pkg/broker/receiver_test.go b/pkg/broker/receiver_test.go
index 5a46740c376..ed14a6c0041 100644
--- a/pkg/broker/receiver_test.go
+++ b/pkg/broker/receiver_test.go
@@ -26,9 +26,8 @@ import (
"strings"
"testing"
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
+ "github.com/cloudevents/sdk-go"
cehttp "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
"github.com/google/go-cmp/cmp"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
@@ -64,7 +63,7 @@ func TestReceiver(t *testing.T) {
testCases := map[string]struct {
triggers []*eventingv1alpha1.Trigger
mocks controllertesting.Mocks
- tctx *cehttp.TransportContext
+ tctx *cloudevents.HTTPTransportContext
event *cloudevents.Event
requestFails bool
returnedEvent *cloudevents.Event
@@ -85,7 +84,7 @@ func TestReceiver(t *testing.T) {
expectNewToFail: true,
},
"Not POST": {
- tctx: &cehttp.TransportContext{
+ tctx: &cloudevents.HTTPTransportContext{
Method: "GET",
Host: host,
URI: validPath,
@@ -93,7 +92,7 @@ func TestReceiver(t *testing.T) {
expectedStatus: http.StatusMethodNotAllowed,
},
"Path too short": {
- tctx: &cehttp.TransportContext{
+ tctx: &cloudevents.HTTPTransportContext{
Method: "POST",
Host: host,
URI: "/test-namespace/test-trigger",
@@ -101,7 +100,7 @@ func TestReceiver(t *testing.T) {
expectedErr: true,
},
"Path too long": {
- tctx: &cehttp.TransportContext{
+ tctx: &cloudevents.HTTPTransportContext{
Method: "POST",
Host: host,
URI: "/triggers/test-namespace/test-trigger/extra",
@@ -109,7 +108,7 @@ func TestReceiver(t *testing.T) {
expectedErr: true,
},
"Path without prefix": {
- tctx: &cehttp.TransportContext{
+ tctx: &cloudevents.HTTPTransportContext{
Method: "POST",
Host: host,
URI: "/something/test-namespace/test-trigger",
@@ -117,7 +116,7 @@ func TestReceiver(t *testing.T) {
expectedErr: true,
},
"Bad host": {
- tctx: &cehttp.TransportContext{
+ tctx: &cloudevents.HTTPTransportContext{
Method: "POST",
Host: "badhost-cant-be-parsed-as-a-trigger-name-plus-namespace",
URI: validPath,
@@ -192,7 +191,7 @@ func TestReceiver(t *testing.T) {
triggers: []*eventingv1alpha1.Trigger{
makeTrigger("", ""),
},
- tctx: &cehttp.TransportContext{
+ tctx: &cloudevents.HTTPTransportContext{
Method: "POST",
Host: host,
URI: validPath,
@@ -262,7 +261,7 @@ func TestReceiver(t *testing.T) {
tctx := tc.tctx
if tctx == nil {
- tctx = &cehttp.TransportContext{
+ tctx = &cloudevents.HTTPTransportContext{
Method: http.MethodPost,
Host: host,
URI: validPath,
@@ -411,7 +410,7 @@ func makeEventWithoutTTL() *cloudevents.Event {
return &cloudevents.Event{
Context: cloudevents.EventContextV02{
Type: eventType,
- Source: types.URLRef{
+ Source: cloudevents.URLRef{
URL: url.URL{
Path: eventSource,
},
@@ -428,7 +427,7 @@ func makeEvent() *cloudevents.Event {
}
func addTTLToEvent(e cloudevents.Event) cloudevents.Event {
- e.Context = SetTTL(e.Context, 1)
+ e.Context, _ = SetTTL(e.Context, 1)
return e
}
@@ -436,7 +435,7 @@ func makeDifferentEvent() *cloudevents.Event {
return &cloudevents.Event{
Context: cloudevents.EventContextV02{
Type: "some-other-type",
- Source: types.URLRef{
+ Source: cloudevents.URLRef{
URL: url.URL{
Path: eventSource,
},
diff --git a/pkg/broker/ttl.go b/pkg/broker/ttl.go
index 2cb03da350e..8d7ccdef39f 100644
--- a/pkg/broker/ttl.go
+++ b/pkg/broker/ttl.go
@@ -17,7 +17,7 @@
package broker
import (
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
+ "github.com/cloudevents/sdk-go"
)
const (
@@ -27,11 +27,7 @@ const (
)
// SetTTL sets the TTL into the EventContext. ttl should be a positive integer.
-func SetTTL(ctx cloudevents.EventContext, ttl interface{}) cloudevents.EventContext {
- v2 := ctx.AsV02()
- if v2.Extensions == nil {
- v2.Extensions = make(map[string]interface{})
- }
- v2.Extensions[V02TTLAttribute] = ttl
- return v2
+func SetTTL(ctx cloudevents.EventContext, ttl interface{}) (cloudevents.EventContext, error) {
+ err := ctx.SetExtension(V02TTLAttribute, ttl)
+ return ctx, err
}
diff --git a/test/test_images/logevents/main.go b/test/test_images/logevents/main.go
index 9e891509b50..476edb2d9d8 100644
--- a/test/test_images/logevents/main.go
+++ b/test/test_images/logevents/main.go
@@ -17,28 +17,17 @@ package main
import (
"context"
- "fmt"
"log"
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
+ "github.com/cloudevents/sdk-go"
)
func handler(event cloudevents.Event) {
- // TODO: in version 0.5.0 of cloudevents, below can be deleted.
-
- ctx := event.Context.AsV02()
- var data []byte
- var ok bool
- if data, ok = event.Data.([]byte); !ok {
- fmt.Printf("Got Data Error")
- return
- }
- log.Printf("[%s] %s %s: %+v", ctx.Time.String(), *ctx.ContentType, ctx.Source.String(), string(data))
+ log.Printf("%s", event.String())
}
func main() {
- c, err := client.NewDefault()
+ c, err := cloudevents.NewDefaultClient()
if err != nil {
log.Fatalf("failed to create client, %v", err)
}
diff --git a/test/test_images/sendevent/main.go b/test/test_images/sendevent/main.go
index 82d769d485b..0b384392b38 100644
--- a/test/test_images/sendevent/main.go
+++ b/test/test_images/sendevent/main.go
@@ -26,10 +26,8 @@ import (
"strconv"
"time"
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
+ "github.com/cloudevents/sdk-go"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
)
type Heartbeat struct {
@@ -81,6 +79,15 @@ func main() {
maxMsg = m
}
+ defer func() {
+ var err error
+ r := recover()
+ if r != nil {
+ err = r.(error)
+ fmt.Printf("recovered from panic: %v", err)
+ }
+ }()
+
if delay > 0 {
log.Printf("will sleep for %s", delay)
time.Sleep(delay)
@@ -94,24 +101,24 @@ func main() {
var encodingOption http.Option
switch encoding {
case "binary":
- encodingOption = http.WithBinaryEncoding()
+ encodingOption = cloudevents.WithBinaryEncoding()
case "structured":
- encodingOption = http.WithStructuredEncoding()
+ encodingOption = cloudevents.WithStructuredEncoding()
default:
fmt.Printf("unsupported encoding option: %q\n", encoding)
os.Exit(1)
}
- t, err := http.New(
- http.WithTarget(sink),
+ t, err := cloudevents.NewHTTPTransport(
+ cloudevents.WithTarget(sink),
encodingOption,
)
if err != nil {
log.Fatalf("failed to create transport, %v", err)
}
- c, err := client.New(t,
- client.WithTimeNow(),
- client.WithUUIDs(),
+ c, err := cloudevents.NewClient(t,
+ cloudevents.WithTimeNow(),
+ cloudevents.WithUUIDs(),
)
if err != nil {
log.Fatalf("failed to create client, %v", err)
@@ -130,13 +137,14 @@ func main() {
sequence++
untyped["sequence"] = fmt.Sprintf("%d", sequence)
- event := cloudevents.Event{
- Context: cloudevents.EventContextV02{
- ID: eventID,
- Type: eventType,
- Source: *types.ParseURLRef(source),
- }.AsV02(),
- Data: untyped,
+ event := cloudevents.NewEvent()
+ if eventID != "" {
+ event.SetID(eventID)
+ }
+ event.SetType(eventType)
+ event.SetSource(source)
+ if err := event.SetData(untyped); err != nil {
+ log.Fatalf("failed to set data, %v", err)
}
if resp, err := c.Send(context.Background(), event); err != nil {
diff --git a/test/test_images/transformevents/main.go b/test/test_images/transformevents/main.go
index 76e79e99612..1b14a185a69 100644
--- a/test/test_images/transformevents/main.go
+++ b/test/test_images/transformevents/main.go
@@ -21,8 +21,7 @@ import (
"fmt"
"log"
- "github.com/cloudevents/sdk-go/pkg/cloudevents"
- "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
+ "github.com/cloudevents/sdk-go"
)
type example struct {
@@ -61,7 +60,7 @@ func gotEvent(event cloudevents.Event, resp *cloudevents.EventResponse) error {
func main() {
// parse the command line flags
flag.Parse()
- c, err := client.NewDefault()
+ c, err := cloudevents.NewDefaultClient()
if err != nil {
log.Fatalf("failed to create client, %v", err)
}
diff --git a/vendor/github.com/cloudevents/sdk-go/alias.go b/vendor/github.com/cloudevents/sdk-go/alias.go
new file mode 100644
index 00000000000..2450f309f68
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/alias.go
@@ -0,0 +1,111 @@
+package cloudevents
+
+// Package cloudevents alias' common functions and types to improve discoverability and reduce
+// the number of imports for simple HTTP clients.
+
+import (
+ "github.com/cloudevents/sdk-go/pkg/cloudevents"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/context"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
+)
+
+// Client
+
+type Client = client.Client
+
+// Event
+
+type Event = cloudevents.Event
+type EventResponse = cloudevents.EventResponse
+
+// Context
+
+type EventContext = cloudevents.EventContext
+type EventContextV01 = cloudevents.EventContextV01
+type EventContextV02 = cloudevents.EventContextV02
+type EventContextV03 = cloudevents.EventContextV03
+
+// Custom Types
+
+type Timestamp = types.Timestamp
+type URLRef = types.URLRef
+
+// HTTP Transport
+
+type HTTPTransport = http.Transport
+type HTTPTransportContext = http.TransportContext
+type HTTPTransportResponseContext = http.TransportResponseContext
+type HTTPEncoding = http.Encoding
+
+var (
+ // ContentType Helpers
+
+ StringOfApplicationJSON = cloudevents.StringOfApplicationJSON
+ StringOfApplicationXML = cloudevents.StringOfApplicationXML
+ StringOfApplicationCloudEventsJSON = cloudevents.StringOfApplicationCloudEventsJSON
+ StringOfApplicationCloudEventsBatchJSON = cloudevents.StringOfApplicationCloudEventsBatchJSON
+ StringOfBase64 = cloudevents.StringOfBase64
+
+ Base64 = cloudevents.Base64
+
+ // Client Creation
+
+ NewClient = client.New
+ NewDefaultClient = client.NewDefault
+
+ // Client Options
+
+ WithEventDefaulter = client.WithEventDefaulter
+ WithUUIDs = client.WithUUIDs
+ WithTimeNow = client.WithTimeNow
+
+ // Event Creation
+
+ NewEvent = cloudevents.New
+ VersionV01 = cloudevents.CloudEventsVersionV01
+ VersionV02 = cloudevents.CloudEventsVersionV02
+ VersionV03 = cloudevents.CloudEventsVersionV03
+
+ // Context
+
+ ContextWithTarget = context.WithTarget
+ TargetFromContext = context.TargetFrom
+
+ // Custom Types
+
+ ParseTimestamp = types.ParseTimestamp
+ ParseURLRef = types.ParseURLRef
+
+ // HTTP Transport
+
+ NewHTTPTransport = http.New
+
+ // HTTP Transport Options
+
+ WithTarget = http.WithTarget
+ WithMethod = http.WithMethod
+ WitHHeader = http.WithHeader
+ WithShutdownTimeout = http.WithShutdownTimeout
+ WithEncoding = http.WithEncoding
+ WithBinaryEncoding = http.WithBinaryEncoding
+ WithStructuredEncoding = http.WithStructuredEncoding
+ WithPort = http.WithPort
+ WithPath = http.WithPath
+
+ // HTTP Context
+
+ HTTPTransportContextFrom = http.TransportContextFrom
+ ContextWithHeader = http.ContextWithHeader
+
+ // HTTP Transport Encodings
+
+ HTTPBinaryV01 = http.BinaryV01
+ HTTPStructuredV01 = http.StructuredV01
+ HTTPBinaryV02 = http.BinaryV02
+ HTTPStructuredV02 = http.StructuredV02
+ HTTPBinaryV03 = http.BinaryV03
+ HTTPStructuredV03 = http.StructuredV03
+ HTTPBatchedV03 = http.BatchedV03
+)
From 11710b5829813bb9707426bebfe216c2c5134cad Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Thu, 18 Apr 2019 13:00:26 -0700
Subject: [PATCH 31/76] Combine all context attributes into the ce var
Combining official and extension context attributes into the same struct
makes standardizing extensions easier since filter expressions don't
have to change.
Also switched to using native maps for CEL typing instead of protobuf
structs. Thanks to @TristonianJones for showing the way.
---
pkg/broker/cel.go | 93 ++++-------
.../dev_knative/cloud_events_context.pb.go | 153 ------------------
.../dev_knative/cloud_events_context.proto | 21 ---
pkg/broker/receiver_test.go | 2 +-
4 files changed, 30 insertions(+), 239 deletions(-)
delete mode 100644 pkg/broker/dev_knative/cloud_events_context.pb.go
delete mode 100644 pkg/broker/dev_knative/cloud_events_context.proto
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index 805da83e817..04bff962b8d 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -1,40 +1,28 @@
package broker
import (
- "bytes"
- "encoding/json"
-
"github.com/cloudevents/sdk-go"
- "github.com/golang/protobuf/jsonpb"
- structpb "github.com/golang/protobuf/ptypes/struct"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- celprotos "github.com/knative/eventing/pkg/broker/dev_knative"
"go.uber.org/zap"
)
const (
- // CELVarKeyContext is the CEL variable key used for the CloudEvents event
- // context fields defined in the spec.
+ // CELVarKeyContext is the CEL variable key used for CloudEvents event
+ // context attributes, both official and extension.
CELVarKeyContext = "ce"
- // CELVarKeyExtensions is the CEL variable key used for the CloudEvents event
- // context extensions.
- CELVarKeyExtensions = "ext"
- // CELVarKeyData is the CEL variable key used for the CloudEvents event data.
+ // CELVarKeyData is the CEL variable key used for parsed, structured event
+ // data.
CELVarKeyData = "data"
- // TODO add a key that contains both the extensions and the baseline context
- // vars so extensions can be future proofed.
)
func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) (bool, error) {
e, err := cel.NewEnv(
- cel.Types(&celprotos.CloudEventsContext{}),
cel.Declarations(
- decls.NewIdent(CELVarKeyContext, decls.NewObjectType("dev.knative.CloudEventsContext"), nil),
- decls.NewIdent(CELVarKeyExtensions, decls.NewObjectType("google.protobuf.Struct"), nil),
- decls.NewIdent(CELVarKeyData, decls.NewObjectType("google.protobuf.Struct"), nil),
+ decls.NewIdent(CELVarKeyContext, decls.Dyn, nil),
+ decls.NewIdent(CELVarKeyData, decls.Dyn, nil),
),
)
if err != nil {
@@ -57,48 +45,48 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
return false, err
}
- vars := map[string]interface{}{}
// Set baseline context attributes. The attributes available may not be
// exactly the same as the attributes defined in the current version of the
// CloudEvents spec.
- ceCtx := &celprotos.CloudEventsContext{
- Specversion: event.SpecVersion(),
- Type: event.Type(),
- Source: event.Source(),
- Subject: event.Subject(),
- Id: event.ID(),
+ ce := map[string]interface{}{
+ "specversion": event.SpecVersion(),
+ "type": event.Type(),
+ "source": event.Source(),
+ "subject": event.Subject(),
+ "id": event.ID(),
// TODO Time. Should this be a string or a (cel-native) protobuf timestamp?
- Schemaurl: event.SchemaURL(),
- Datacontenttype: event.DataContentType(),
- Datamediatype: event.DataMediaType(),
- Datacontentencoding: event.DataContentEncoding(),
+ "schemaurl": event.SchemaURL(),
+ "datacontenttype": event.DataContentType(),
+ "datamediatype": event.DataMediaType(),
+ "datacontentencoding": event.DataContentEncoding(),
}
- vars[CELVarKeyContext] = ceCtx
// If the Trigger has requested parsing of extensions, attempt to turn them
// into a dynamic struct.
if ts.Filter.CEL.ParseExtensions {
// TODO should this coerce to V02?
- extStruct, err := ceParsedExtensionsStruct(event.Context.AsV02().Extensions)
- if err != nil {
- r.logger.Error("Failed to parse event context for CEL filtering", zap.String("id", event.Context.AsV02().ID), zap.Error(err))
- } else {
- vars[CELVarKeyExtensions] = extStruct
+ ext := event.Context.AsV02().Extensions
+ if ext != nil {
+ for k, v := range ext {
+ ce[k] = v
+ }
}
}
// If the Trigger has requested parsing of data, attempt to turn them into
// a dynamic struct.
+ data := make(map[string]interface{})
if ts.Filter.CEL.ParseData {
- dataStruct, err := ceParsedDataStruct(event)
+ data, err = ceParsedData(event)
if err != nil {
r.logger.Error("Failed to parse event data for CEL filtering", zap.String("id", event.Context.AsV02().ID), zap.Error(err))
- } else {
- vars[CELVarKeyData] = dataStruct
}
}
- out, _, err := prg.Eval(vars)
+ out, _, err := prg.Eval(map[string]interface{}{
+ CELVarKeyContext: ce,
+ CELVarKeyData: data,
+ })
if err != nil {
return false, err
}
@@ -106,20 +94,7 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
return out == types.True, nil
}
-func ceParsedExtensionsStruct(ext map[string]interface{}) (*structpb.Struct, error) {
- extJSON, err := json.Marshal(ext)
- if err != nil {
- return nil, err
- }
-
- extStruct := &structpb.Struct{}
- if err := jsonpb.Unmarshal(bytes.NewBuffer(extJSON), extStruct); err != nil {
- return nil, err
- }
- return extStruct, nil
-}
-
-func ceParsedDataStruct(event *cloudevents.Event) (*structpb.Struct, error) {
+func ceParsedData(event *cloudevents.Event) (map[string]interface{}, error) {
// TODO CloudEvents SDK might have a better way to do this with data codecs
if event.DataMediaType() == "application/json" {
var decodedData map[string]interface{}
@@ -127,17 +102,7 @@ func ceParsedDataStruct(event *cloudevents.Event) (*structpb.Struct, error) {
if err != nil {
return nil, err
}
- dataJSON, err := json.Marshal(decodedData)
- if err != nil {
- return nil, err
- }
-
- dataStruct := &structpb.Struct{}
- // TODO is there a way to convert a map into a structpb.Struct?
- if err := jsonpb.Unmarshal(bytes.NewBuffer(dataJSON), dataStruct); err != nil {
- return nil, err
- }
- return dataStruct, nil
+ return decodedData, nil
}
return nil, nil
}
diff --git a/pkg/broker/dev_knative/cloud_events_context.pb.go b/pkg/broker/dev_knative/cloud_events_context.pb.go
deleted file mode 100644
index 27d0e119ce9..00000000000
--- a/pkg/broker/dev_knative/cloud_events_context.pb.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: cloud_events_context.proto
-
-package dev_knative
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// CloudEventsContext defines the standard attributes that are always available
-// in the CEL evaluation environment. For compatibility reasons, these
-// attributes may not be exactly the same as the attributes in the current
-// version of the CloudEvents spec.
-type CloudEventsContext struct {
- Specversion string `protobuf:"bytes,1,opt,name=specversion,proto3" json:"specversion,omitempty"`
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"`
- Subject string `protobuf:"bytes,4,opt,name=subject,proto3" json:"subject,omitempty"`
- Id string `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"`
- Schemaurl string `protobuf:"bytes,7,opt,name=schemaurl,proto3" json:"schemaurl,omitempty"`
- Datacontenttype string `protobuf:"bytes,8,opt,name=datacontenttype,proto3" json:"datacontenttype,omitempty"`
- Datamediatype string `protobuf:"bytes,9,opt,name=datamediatype,proto3" json:"datamediatype,omitempty"`
- Datacontentencoding string `protobuf:"bytes,10,opt,name=datacontentencoding,proto3" json:"datacontentencoding,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CloudEventsContext) Reset() { *m = CloudEventsContext{} }
-func (m *CloudEventsContext) String() string { return proto.CompactTextString(m) }
-func (*CloudEventsContext) ProtoMessage() {}
-func (*CloudEventsContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_a3df9ee1ac825df6, []int{0}
-}
-
-func (m *CloudEventsContext) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CloudEventsContext.Unmarshal(m, b)
-}
-func (m *CloudEventsContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CloudEventsContext.Marshal(b, m, deterministic)
-}
-func (m *CloudEventsContext) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CloudEventsContext.Merge(m, src)
-}
-func (m *CloudEventsContext) XXX_Size() int {
- return xxx_messageInfo_CloudEventsContext.Size(m)
-}
-func (m *CloudEventsContext) XXX_DiscardUnknown() {
- xxx_messageInfo_CloudEventsContext.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CloudEventsContext proto.InternalMessageInfo
-
-func (m *CloudEventsContext) GetSpecversion() string {
- if m != nil {
- return m.Specversion
- }
- return ""
-}
-
-func (m *CloudEventsContext) GetType() string {
- if m != nil {
- return m.Type
- }
- return ""
-}
-
-func (m *CloudEventsContext) GetSource() string {
- if m != nil {
- return m.Source
- }
- return ""
-}
-
-func (m *CloudEventsContext) GetSubject() string {
- if m != nil {
- return m.Subject
- }
- return ""
-}
-
-func (m *CloudEventsContext) GetId() string {
- if m != nil {
- return m.Id
- }
- return ""
-}
-
-func (m *CloudEventsContext) GetSchemaurl() string {
- if m != nil {
- return m.Schemaurl
- }
- return ""
-}
-
-func (m *CloudEventsContext) GetDatacontenttype() string {
- if m != nil {
- return m.Datacontenttype
- }
- return ""
-}
-
-func (m *CloudEventsContext) GetDatamediatype() string {
- if m != nil {
- return m.Datamediatype
- }
- return ""
-}
-
-func (m *CloudEventsContext) GetDatacontentencoding() string {
- if m != nil {
- return m.Datacontentencoding
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*CloudEventsContext)(nil), "dev.knative.CloudEventsContext")
-}
-
-func init() { proto.RegisterFile("cloud_events_context.proto", fileDescriptor_a3df9ee1ac825df6) }
-
-var fileDescriptor_a3df9ee1ac825df6 = []byte{
- // 236 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x4a, 0x03, 0x31,
- 0x10, 0xc6, 0xe9, 0xba, 0xee, 0x76, 0xa7, 0xf8, 0x87, 0x11, 0x64, 0x10, 0x0f, 0x45, 0x3c, 0xf4,
- 0x54, 0x04, 0x1f, 0xa1, 0x78, 0xf1, 0xe8, 0x0b, 0x94, 0x34, 0x19, 0x34, 0xda, 0x26, 0xcb, 0x66,
- 0x12, 0xf4, 0xb5, 0x7c, 0x42, 0xe9, 0xa8, 0xb8, 0x8a, 0xb7, 0x7c, 0xbf, 0xef, 0x17, 0x86, 0x19,
- 0xb8, 0xb0, 0xdb, 0x98, 0xdd, 0x9a, 0x0b, 0x07, 0x49, 0x6b, 0x1b, 0x83, 0xf0, 0xab, 0x2c, 0xfb,
- 0x21, 0x4a, 0xc4, 0x99, 0xe3, 0xb2, 0x7c, 0x09, 0x46, 0x7c, 0xe1, 0xab, 0xf7, 0x0a, 0x70, 0xb5,
- 0x77, 0xef, 0x54, 0x5d, 0x7d, 0x9a, 0x38, 0x87, 0x59, 0xea, 0xd9, 0x16, 0x1e, 0x92, 0x8f, 0x81,
- 0x26, 0xf3, 0xc9, 0xa2, 0x7b, 0x18, 0x23, 0x44, 0xa8, 0xe5, 0xad, 0x67, 0xaa, 0xb4, 0xd2, 0x37,
- 0x9e, 0x43, 0x93, 0x62, 0x1e, 0x2c, 0xd3, 0x81, 0xd2, 0xaf, 0x84, 0x04, 0x6d, 0xca, 0x9b, 0x67,
- 0xb6, 0x42, 0xb5, 0x16, 0xdf, 0x11, 0x8f, 0xa1, 0xf2, 0x8e, 0x0e, 0x15, 0x56, 0xde, 0xe1, 0x25,
- 0x74, 0xc9, 0x3e, 0xf1, 0xce, 0xe4, 0x61, 0x4b, 0xad, 0xe2, 0x1f, 0x80, 0x0b, 0x38, 0x71, 0x46,
- 0x8c, 0xae, 0x13, 0x44, 0xc7, 0x4f, 0xd5, 0xf9, 0x8b, 0xf1, 0x1a, 0x8e, 0xf6, 0x68, 0xc7, 0xce,
- 0x1b, 0xf5, 0x3a, 0xf5, 0x7e, 0x43, 0xbc, 0x81, 0xb3, 0xd1, 0x47, 0x0e, 0x36, 0x3a, 0x1f, 0x1e,
- 0x09, 0xd4, 0xfd, 0xaf, 0xba, 0xaf, 0xa7, 0xcd, 0x69, 0xbb, 0x69, 0xf4, 0x90, 0xb7, 0x1f, 0x01,
- 0x00, 0x00, 0xff, 0xff, 0xe0, 0x00, 0x3a, 0xb0, 0x66, 0x01, 0x00, 0x00,
-}
diff --git a/pkg/broker/dev_knative/cloud_events_context.proto b/pkg/broker/dev_knative/cloud_events_context.proto
deleted file mode 100644
index d1ef0ee7340..00000000000
--- a/pkg/broker/dev_knative/cloud_events_context.proto
+++ /dev/null
@@ -1,21 +0,0 @@
-syntax = "proto3";
-
-package dev.knative;
-
-// CloudEventsContext defines the standard attributes that are always available
-// in the CEL evaluation environment. For compatibility reasons, these
-// attributes may not be exactly the same as the attributes in the current
-// version of the CloudEvents spec.
-message CloudEventsContext {
- string specversion = 1;
- string type = 2;
- string source = 3;
- string subject = 4;
- string id = 5;
- // Reserve field 6 for time. Unclear how to represent this in CEL.
- reserved 6;
- string schemaurl = 7;
- string datacontenttype = 8;
- string datamediatype = 9;
- string datacontentencoding = 10;
-}
diff --git a/pkg/broker/receiver_test.go b/pkg/broker/receiver_test.go
index 5c760e0573b..1f0346f9019 100644
--- a/pkg/broker/receiver_test.go
+++ b/pkg/broker/receiver_test.go
@@ -200,7 +200,7 @@ func TestReceiver(t *testing.T) {
},
"Dispatch succeeded - CEL parsed extensions": {
triggers: []*TriggerBuilder{
- Trigger().SubscriberURI().FilterCEL(`ext.foo == "bar"`).CELParseExtensions(),
+ Trigger().SubscriberURI().FilterCEL(`ce.foo == "bar"`).CELParseExtensions(),
},
event: Event().Extension("foo", "bar"),
expectedDispatch: true,
From 83b079fbb3fbd962ac61ff19da7c81f01b9c62f7 Mon Sep 17 00:00:00 2001
From: Scott Nichols <32305648+n3wscott@users.noreply.github.com>
Date: Thu, 18 Apr 2019 13:22:26 -0700
Subject: [PATCH 32/76] Reconcile using pkg/controller (#1057)
* Move subscription controller to pkg/controller.
* fix finalizers.,
* merge main funcs.
* need to shuffle some stuff.
* Use pkg rec/testing.
* oh man testing testing testing.
* update deps.
* update deps.
* update deps.
* Found a bug in patchMergeKey for type struct, it has to be a key.
* clean up, adding final test.
* add pkg controller to main.
* fix up contrib.
* Fix main.
* ran codegen.
* cleanup.
* add link to issue to remove patch.
* adding back ref as deprecated ref.
* update codegen.
* remove untrue comment.
* use subscription in comment.
* don't use testing alias, move reconciler out of v1alpha1.
* use logconfig.
* update deps.
---
Gopkg.lock | 181 +-
Gopkg.toml | 29 +-
cmd/controller/main.go | 105 +-
config/300-channel.yaml | 5 +-
.../gcppubsub/pkg/controller/channel/names.go | 2 +-
.../pkg/controller/channel/names_test.go | 5 +-
.../pkg/controller/channel/reconcile.go | 56 +-
.../pkg/controller/channel/reconcile_test.go | 40 +-
.../pkg/dispatcher/dispatcher/reconcile.go | 9 +-
.../dispatcher/dispatcher/reconcile_test.go | 38 +-
contrib/gcppubsub/pkg/util/status.go | 15 +-
.../pkg/controller/channel/reconcile_test.go | 2 +-
contrib/kafka/pkg/dispatcher/dispatcher.go | 8 +-
.../kafka/pkg/dispatcher/dispatcher_test.go | 61 +-
.../pkg/controller/channel/reconcile_test.go | 2 +-
.../pkg/dispatcher/dispatcher/dispatcher.go | 2 +-
.../dispatcher/dispatcher/dispatcher_test.go | 16 +-
.../natss/pkg/dispatcher/dispatcher/types.go | 8 +-
docs/spec/spec.md | 2 +-
hack/k8s-dynamic-fake-simple.patch | 13 +
hack/update-deps.sh | 8 +
pkg/apis/duck/v1alpha1/subscribable_types.go | 27 +-
.../duck/v1alpha1/subscribable_types_test.go | 17 +-
.../duck/v1alpha1/zz_generated.deepcopy.go | 4 +-
.../cluster_channel_provisioner_types.go | 1 -
.../eventing/v1alpha1/subscription_types.go | 1 -
.../v1alpha1/clusterchannelprovisioner.go | 16 +
.../fake/fake_clusterchannelprovisioner.go | 11 +
.../v1alpha1/fake/fake_subscription.go | 12 +
.../typed/eventing/v1alpha1/subscription.go | 17 +
pkg/logging/logging.go | 37 +-
pkg/provisioners/channel_util.go | 2 +-
pkg/provisioners/channel_util_test.go | 2 +-
pkg/reconciler/handler.go | 33 +
pkg/reconciler/reconciler.go | 170 +
pkg/reconciler/stats.go | 31 +
.../subscription/subscription.go | 261 +-
.../subscription/subscription_test.go | 700 +++
pkg/reconciler/testing/channel.go | 76 +
pkg/reconciler/testing/factory.go | 95 +
pkg/reconciler/testing/listers.go | 124 +
.../reconciler/testing/service.go | 34 +-
pkg/reconciler/testing/subscription.go | 128 +
pkg/reconciler/testing/unstructured.go | 65 +
.../v1alpha1/subscription/provider_test.go | 92 -
.../subscription/subscription_test.go | 1417 ------
third_party/VENDOR-LICENSE | 677 ++-
.../monitoring/apiv3/alert_policy_client.go | 278 ++
.../go/monitoring/apiv3/doc.go | 94 +
.../go/monitoring/apiv3/group_client.go | 361 ++
.../go/monitoring/apiv3/metric_client.go | 452 ++
.../apiv3/notification_channel_client.go | 375 ++
.../go/monitoring/apiv3/path_funcs.go | 107 +
.../monitoring/apiv3/uptime_check_client.go | 361 ++
vendor/cloud.google.com/go/trace/apiv2/doc.go | 94 +
.../go/trace/apiv2/path_funcs.go | 43 +
.../go/trace/apiv2/trace_client.go | 152 +
.../exporter/stackdriver/AUTHORS | 1 +
.../exporter/stackdriver}/LICENSE | 0
.../exporter/stackdriver/label.go | 33 +
.../exporter/stackdriver/metrics.go | 547 +++
.../aws_identity_doc_utils.go | 53 +
.../monitoredresource/gcp_metadata_config.go | 90 +
.../monitoredresource/monitored_resources.go | 217 +
.../exporter/stackdriver/sanitize.go | 50 +
.../exporter/stackdriver/stackdriver.go | 346 ++
.../exporter/stackdriver/stats.go | 571 +++
.../exporter/stackdriver/trace.go | 178 +
.../exporter/stackdriver/trace_proto.go | 277 ++
vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 +
vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 +
.../aws/aws-sdk-go/aws/awserr/error.go | 145 +
.../aws/aws-sdk-go/aws/awserr/types.go | 194 +
.../aws/aws-sdk-go/aws/awsutil/copy.go | 108 +
.../aws/aws-sdk-go/aws/awsutil/equal.go | 27 +
.../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 +
.../aws/aws-sdk-go/aws/awsutil/prettify.go | 113 +
.../aws-sdk-go/aws/awsutil/string_value.go | 88 +
.../aws/aws-sdk-go/aws/client/client.go | 96 +
.../aws-sdk-go/aws/client/default_retryer.go | 116 +
.../aws/aws-sdk-go/aws/client/logger.go | 190 +
.../aws/client/metadata/client_info.go | 13 +
.../github.com/aws/aws-sdk-go/aws/config.go | 536 +++
.../aws/aws-sdk-go/aws/context_1_5.go | 37 +
.../aws/aws-sdk-go/aws/context_1_9.go | 11 +
.../aws-sdk-go/aws/context_background_1_5.go | 56 +
.../aws-sdk-go/aws/context_background_1_7.go | 20 +
.../aws/aws-sdk-go/aws/context_sleep.go | 24 +
.../aws/aws-sdk-go/aws/convert_types.go | 387 ++
.../aws-sdk-go/aws/corehandlers/handlers.go | 228 +
.../aws/corehandlers/param_validator.go | 17 +
.../aws-sdk-go/aws/corehandlers/user_agent.go | 37 +
.../aws/credentials/chain_provider.go | 100 +
.../aws-sdk-go/aws/credentials/credentials.go | 292 ++
.../ec2rolecreds/ec2_role_provider.go | 178 +
.../aws/credentials/endpointcreds/provider.go | 198 +
.../aws/credentials/env_provider.go | 74 +
.../aws/credentials/processcreds/provider.go | 425 ++
.../shared_credentials_provider.go | 150 +
.../aws/credentials/static_provider.go | 55 +
.../stscreds/assume_role_provider.go | 299 ++
.../github.com/aws/aws-sdk-go/aws/csm/doc.go | 46 +
.../aws/aws-sdk-go/aws/csm/enable.go | 67 +
.../aws/aws-sdk-go/aws/csm/metric.go | 109 +
.../aws/aws-sdk-go/aws/csm/metric_chan.go | 54 +
.../aws-sdk-go/aws/csm/metric_exception.go | 26 +
.../aws/aws-sdk-go/aws/csm/reporter.go | 260 ++
.../aws/aws-sdk-go/aws/defaults/defaults.go | 207 +
.../aws-sdk-go/aws/defaults/shared_config.go | 27 +
vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 +
.../aws/aws-sdk-go/aws/ec2metadata/api.go | 169 +
.../aws/aws-sdk-go/aws/ec2metadata/service.go | 152 +
.../aws/aws-sdk-go/aws/endpoints/decode.go | 188 +
.../aws/aws-sdk-go/aws/endpoints/defaults.go | 4158 +++++++++++++++++
.../aws/endpoints/dep_service_ids.go | 141 +
.../aws/aws-sdk-go/aws/endpoints/doc.go | 66 +
.../aws/aws-sdk-go/aws/endpoints/endpoints.go | 449 ++
.../aws/aws-sdk-go/aws/endpoints/v3model.go | 307 ++
.../aws/endpoints/v3model_codegen.go | 351 ++
.../github.com/aws/aws-sdk-go/aws/errors.go | 13 +
.../aws/aws-sdk-go/aws/jsonvalue.go | 12 +
.../github.com/aws/aws-sdk-go/aws/logger.go | 118 +
.../aws/request/connection_reset_error.go | 19 +
.../request/connection_reset_error_other.go | 11 +
.../aws/aws-sdk-go/aws/request/handlers.go | 277 ++
.../aws-sdk-go/aws/request/http_request.go | 24 +
.../aws-sdk-go/aws/request/offset_reader.go | 60 +
.../aws/aws-sdk-go/aws/request/request.go | 673 +++
.../aws/aws-sdk-go/aws/request/request_1_7.go | 39 +
.../aws/aws-sdk-go/aws/request/request_1_8.go | 33 +
.../aws-sdk-go/aws/request/request_context.go | 14 +
.../aws/request/request_context_1_6.go | 14 +
.../aws/request/request_pagination.go | 264 ++
.../aws/aws-sdk-go/aws/request/retryer.go | 163 +
.../aws/request/timeout_read_closer.go | 94 +
.../aws/aws-sdk-go/aws/request/validation.go | 286 ++
.../aws/aws-sdk-go/aws/request/waiter.go | 295 ++
.../aws/session/cabundle_transport.go | 26 +
.../aws/session/cabundle_transport_1_5.go | 22 +
.../aws/session/cabundle_transport_1_6.go | 23 +
.../aws/aws-sdk-go/aws/session/doc.go | 273 ++
.../aws/aws-sdk-go/aws/session/env_config.go | 236 +
.../aws/aws-sdk-go/aws/session/session.go | 719 +++
.../aws-sdk-go/aws/session/shared_config.go | 329 ++
.../aws-sdk-go/aws/signer/v4/header_rules.go | 82 +
.../aws/aws-sdk-go/aws/signer/v4/options.go | 7 +
.../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 +
.../aws/aws-sdk-go/aws/signer/v4/v4.go | 796 ++++
vendor/github.com/aws/aws-sdk-go/aws/types.go | 201 +
vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 +
.../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 +
.../github.com/aws/aws-sdk-go/aws/version.go | 8 +
.../aws/aws-sdk-go/internal/ini/ast.go | 120 +
.../aws-sdk-go/internal/ini/comma_token.go | 11 +
.../aws-sdk-go/internal/ini/comment_token.go | 35 +
.../aws/aws-sdk-go/internal/ini/doc.go | 29 +
.../aws-sdk-go/internal/ini/empty_token.go | 4 +
.../aws/aws-sdk-go/internal/ini/expression.go | 24 +
.../aws/aws-sdk-go/internal/ini/fuzz.go | 17 +
.../aws/aws-sdk-go/internal/ini/ini.go | 51 +
.../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 +
.../aws/aws-sdk-go/internal/ini/ini_parser.go | 347 ++
.../aws-sdk-go/internal/ini/literal_tokens.go | 324 ++
.../aws-sdk-go/internal/ini/newline_token.go | 30 +
.../aws-sdk-go/internal/ini/number_helper.go | 152 +
.../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 +
.../aws-sdk-go/internal/ini/parse_error.go | 43 +
.../aws-sdk-go/internal/ini/parse_stack.go | 60 +
.../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 +
.../aws/aws-sdk-go/internal/ini/skipper.go | 45 +
.../aws/aws-sdk-go/internal/ini/statement.go | 35 +
.../aws/aws-sdk-go/internal/ini/value_util.go | 284 ++
.../aws/aws-sdk-go/internal/ini/visitor.go | 166 +
.../aws/aws-sdk-go/internal/ini/walker.go | 25 +
.../aws/aws-sdk-go/internal/ini/ws_token.go | 24 +
.../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 +
.../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 +
.../internal/sdkrand/locked_source.go | 29 +
.../aws/aws-sdk-go/internal/sdkuri/path.go | 23 +
.../internal/shareddefaults/ecs_container.go | 12 +
.../internal/shareddefaults/shared_config.go | 40 +
.../aws/aws-sdk-go/private/protocol/host.go | 68 +
.../private/protocol/host_prefix.go | 54 +
.../private/protocol/idempotency.go | 75 +
.../aws-sdk-go/private/protocol/jsonvalue.go | 76 +
.../aws-sdk-go/private/protocol/payload.go | 81 +
.../private/protocol/query/build.go | 36 +
.../protocol/query/queryutil/queryutil.go | 246 +
.../private/protocol/query/unmarshal.go | 39 +
.../private/protocol/query/unmarshal_error.go | 74 +
.../aws-sdk-go/private/protocol/rest/build.go | 300 ++
.../private/protocol/rest/payload.go | 45 +
.../private/protocol/rest/unmarshal.go | 225 +
.../aws-sdk-go/private/protocol/timestamp.go | 72 +
.../aws-sdk-go/private/protocol/unmarshal.go | 21 +
.../private/protocol/xml/xmlutil/build.go | 306 ++
.../private/protocol/xml/xmlutil/unmarshal.go | 272 ++
.../protocol/xml/xmlutil/xml_to_struct.go | 148 +
.../aws/aws-sdk-go/service/sts/api.go | 2401 ++++++++++
.../aws/aws-sdk-go/service/sts/doc.go | 72 +
.../aws/aws-sdk-go/service/sts/errors.go | 73 +
.../aws/aws-sdk-go/service/sts/service.go | 95 +
.../opencensus-proto/AUTHORS | 1 +
.../opencensus-proto/LICENSE | 202 +
.../gen-go/agent/common/v1/common.pb.go | 356 ++
.../gen-go/metrics/v1/metrics.pb.go | 1370 ++++++
.../gen-go/resource/v1/resource.pb.go | 99 +
.../protobuf/ptypes/struct/struct.pb.go | 440 ++
.../protobuf/ptypes/wrappers/wrappers.pb.go | 443 ++
.../github.com/jmespath/go-jmespath/LICENSE | 13 +
vendor/github.com/jmespath/go-jmespath/api.go | 49 +
.../go-jmespath/astnodetype_string.go | 16 +
.../jmespath/go-jmespath/functions.go | 842 ++++
.../jmespath/go-jmespath/interpreter.go | 418 ++
.../github.com/jmespath/go-jmespath/lexer.go | 420 ++
.../github.com/jmespath/go-jmespath/parser.go | 603 +++
.../jmespath/go-jmespath/toktype_string.go | 16 +
.../github.com/jmespath/go-jmespath/util.go | 185 +
vendor/github.com/knative/build/AUTHORS | 8 -
.../knative/build/cmd/controller/kodata/HEAD | 1 -
.../build/cmd/controller/kodata/LICENSE | 1 -
.../cmd/controller/kodata/VENDOR-LICENSE | 1 -
.../knative/build/cmd/creds-init/kodata/HEAD | 1 -
.../build/cmd/creds-init/kodata/LICENSE | 1 -
.../cmd/creds-init/kodata/VENDOR-LICENSE | 1 -
.../knative/build/cmd/git-init/kodata/HEAD | 1 -
.../knative/build/cmd/git-init/kodata/LICENSE | 1 -
.../build/cmd/git-init/kodata/VENDOR-LICENSE | 1 -
.../knative/build/cmd/logs/kodata/HEAD | 1 -
.../knative/build/cmd/logs/kodata/LICENSE | 1 -
.../build/cmd/logs/kodata/VENDOR-LICENSE | 1 -
.../knative/build/cmd/nop/kodata/HEAD | 1 -
.../knative/build/cmd/nop/kodata/LICENSE | 1 -
.../build/cmd/nop/kodata/VENDOR-LICENSE | 1 -
.../knative/build/cmd/webhook/kodata/HEAD | 1 -
.../knative/build/cmd/webhook/kodata/LICENSE | 1 -
.../build/cmd/webhook/kodata/VENDOR-LICENSE | 1 -
.../knative/build/config/300-imagecache.yaml | 1 -
.../build/v1alpha1/build_template_types.go | 117 -
.../v1alpha1/build_template_validation.go | 89 -
.../pkg/apis/build/v1alpha1/build_types.go | 337 --
.../apis/build/v1alpha1/build_validation.go | 132 -
.../v1alpha1/cluster_build_template_types.go | 76 -
.../build/v1alpha1/metadata_validation.go | 47 -
.../build/v1alpha1/target_path_validation.go | 69 -
.../build/v1alpha1/zz_generated.deepcopy.go | 566 ---
.../knative/build/test/panic/kodata/LICENSE | 1 -
.../build/test/panic/kodata/VENDOR-LICENSE | 1 -
.../build/test/workingdir/kodata/LICENSE | 1 -
.../test/workingdir/kodata/VENDOR-LICENSE | 1 -
.../apis/istio/authentication}/register.go | 7 +-
.../apis/istio/authentication/v1alpha1/doc.go | 22 +
.../authentication/v1alpha1/policy_types.go | 345 ++
.../authentication}/v1alpha1/register.go | 21 +-
.../v1alpha1/zz_generated.deepcopy.go | 259 +
.../istio/v1alpha3/virtualservice_types.go | 44 +-
.../istio/v1alpha3/zz_generated.deepcopy.go | 127 +-
.../client/clientset/versioned/clientset.go | 120 +
.../pkg/client/clientset/versioned/doc.go | 20 +
.../versioned/fake/clientset_generated.go | 94 +
.../client/clientset/versioned/fake/doc.go | 20 +
.../clientset/versioned/fake/register.go | 58 +
.../client/clientset/versioned/scheme/doc.go | 20 +
.../clientset/versioned/scheme/register.go | 58 +
.../v1alpha1/authentication_client.go | 90 +
.../typed/authentication}/v1alpha1/doc.go | 7 +-
.../typed/authentication/v1alpha1/fake/doc.go | 20 +
.../fake/fake_authentication_client.go | 40 +
.../v1alpha1/fake/fake_policy.go | 128 +
.../v1alpha1/generated_expansion.go | 21 +
.../typed/authentication/v1alpha1/policy.go | 157 +
.../typed/istio/v1alpha3/destinationrule.go | 157 +
.../versioned/typed/istio/v1alpha3/doc.go | 20 +
.../typed/istio/v1alpha3/fake/doc.go | 20 +
.../v1alpha3/fake/fake_destinationrule.go | 128 +
.../typed/istio/v1alpha3/fake/fake_gateway.go | 128 +
.../istio/v1alpha3/fake/fake_istio_client.go | 48 +
.../v1alpha3/fake/fake_virtualservice.go | 128 +
.../versioned/typed/istio/v1alpha3/gateway.go | 157 +
.../istio/v1alpha3/generated_expansion.go | 25 +
.../typed/istio/v1alpha3/istio_client.go | 100 +
.../typed/istio/v1alpha3/virtualservice.go | 157 +
.../listers/istio/v1alpha3/destinationrule.go | 94 +
.../istio/v1alpha3/expansion_generated.go | 43 +
.../client/listers/istio/v1alpha3/gateway.go | 94 +
.../listers/istio/v1alpha3/virtualservice.go | 94 +
.../knative/pkg/controller/controller.go | 379 ++
.../knative/pkg/controller/helper.go | 67 +
.../knative/pkg/controller/stats_reporter.go | 137 +
.../knative/pkg/logging/testing/util.go | 68 +
.../github.com/knative/pkg/metrics/config.go | 254 +
.../metrics/doc.go} | 13 +-
.../knative/pkg/metrics/exporter.go | 85 +
.../knative/pkg/metrics/gcp_metadata.go | 40 +
.../pkg/metrics/metricskey/constants.go | 79 +
.../pkg/metrics/monitored_resources.go | 53 +
.../pkg/metrics/prometheus_exporter.go | 74 +
.../github.com/knative/pkg/metrics/record.go | 56 +
.../pkg/metrics/stackdriver_exporter.go | 141 +
.../knative/pkg/reconciler/testing/actions.go | 76 +
.../knative/pkg/reconciler/testing/clock.go | 29 +
.../knative/pkg/reconciler/testing/events.go | 44 +
.../testing/generate_name_reactor.go | 86 +
.../knative/pkg/reconciler/testing/hooks.go | 183 +
.../pkg/reconciler/testing/reactions.go | 66 +
.../knative/pkg/reconciler/testing/sorter.go | 93 +
.../knative/pkg/reconciler/testing/stats.go | 40 +
.../knative/pkg/reconciler/testing/table.go | 367 ++
.../knative/pkg/reconciler/testing/tracker.go | 34 +
.../knative/pkg/reconciler/testing/util.go | 85 +
.../tracker/doc.go} | 17 +-
.../github.com/knative/pkg/tracker/enqueue.go | 169 +
.../knative/pkg/tracker/interface.go | 33 +
vendor/go.uber.org/zap/internal/ztest/doc.go | 24 +
.../go.uber.org/zap/internal/ztest/timeout.go | 59 +
.../go.uber.org/zap/internal/ztest/writer.go | 96 +
vendor/go.uber.org/zap/zaptest/doc.go | 22 +
vendor/go.uber.org/zap/zaptest/logger.go | 140 +
vendor/go.uber.org/zap/zaptest/testingt.go | 47 +
vendor/go.uber.org/zap/zaptest/timeout.go | 45 +
vendor/go.uber.org/zap/zaptest/writer.go | 44 +
.../api/distribution/distribution.pb.go | 632 +++
.../genproto/googleapis/api/label/label.pb.go | 139 +
.../googleapis/api/metric/metric.pb.go | 397 ++
.../api/monitoredres/monitored_resource.pb.go | 294 ++
.../devtools/cloudtrace/v2/trace.pb.go | 1411 ++++++
.../devtools/cloudtrace/v2/tracing.pb.go | 227 +
.../googleapis/monitoring/v3/alert.pb.go | 966 ++++
.../monitoring/v3/alert_service.pb.go | 672 +++
.../googleapis/monitoring/v3/common.pb.go | 898 ++++
.../monitoring/v3/dropped_labels.pb.go | 104 +
.../googleapis/monitoring/v3/group.pb.go | 157 +
.../monitoring/v3/group_service.pb.go | 948 ++++
.../googleapis/monitoring/v3/metric.pb.go | 234 +
.../monitoring/v3/metric_service.pb.go | 1222 +++++
.../monitoring/v3/mutation_record.pb.go | 100 +
.../monitoring/v3/notification.pb.go | 374 ++
.../monitoring/v3/notification_service.pb.go | 1319 ++++++
.../monitoring/v3/span_context.pb.go | 99 +
.../googleapis/monitoring/v3/uptime.pb.go | 969 ++++
.../monitoring/v3/uptime_service.pb.go | 793 ++++
.../apimachinery/pkg/api/errors/errors.go | 24 +
.../apimachinery/pkg/apis/meta/v1/types.go | 4 +
.../k8s.io/apimachinery/pkg/runtime/codec.go | 20 +
.../runtime/serializer/streaming/streaming.go | 2 +-
.../k8s.io/client-go/dynamic/fake/simple.go | 2 +-
.../kubernetes/fake/clientset_generated.go | 372 ++
.../k8s.io/client-go/kubernetes/fake/doc.go | 20 +
.../client-go/kubernetes/fake/register.go | 116 +
.../v1alpha1/fake/doc.go | 20 +
.../fake/fake_admissionregistration_client.go | 40 +
.../fake/fake_initializerconfiguration.go | 120 +
.../admissionregistration/v1beta1/fake/doc.go | 20 +
.../fake/fake_admissionregistration_client.go | 44 +
.../fake/fake_mutatingwebhookconfiguration.go | 120 +
.../fake_validatingwebhookconfiguration.go | 120 +
.../kubernetes/typed/apps/v1/fake/doc.go | 20 +
.../typed/apps/v1/fake/fake_apps_client.go | 56 +
.../apps/v1/fake/fake_controllerrevision.go | 128 +
.../typed/apps/v1/fake/fake_daemonset.go | 140 +
.../typed/apps/v1/fake/fake_deployment.go | 140 +
.../typed/apps/v1/fake/fake_replicaset.go | 140 +
.../typed/apps/v1/fake/fake_statefulset.go | 140 +
.../kubernetes/typed/apps/v1beta1/fake/doc.go | 20 +
.../apps/v1beta1/fake/fake_apps_client.go | 52 +
.../v1beta1/fake/fake_controllerrevision.go | 128 +
.../apps/v1beta1/fake/fake_deployment.go | 140 +
.../typed/apps/v1beta1/fake/fake_scale.go | 25 +
.../apps/v1beta1/fake/fake_statefulset.go | 140 +
.../kubernetes/typed/apps/v1beta2/fake/doc.go | 20 +
.../apps/v1beta2/fake/fake_apps_client.go | 60 +
.../v1beta2/fake/fake_controllerrevision.go | 128 +
.../typed/apps/v1beta2/fake/fake_daemonset.go | 140 +
.../apps/v1beta2/fake/fake_deployment.go | 140 +
.../apps/v1beta2/fake/fake_replicaset.go | 140 +
.../typed/apps/v1beta2/fake/fake_scale.go | 25 +
.../apps/v1beta2/fake/fake_statefulset.go | 162 +
.../typed/authentication/v1/fake/doc.go | 20 +
.../v1/fake/fake_authentication_client.go | 40 +
.../v1/fake/fake_tokenreview.go | 24 +
.../v1/fake/fake_tokenreview_expansion.go | 27 +
.../typed/authentication/v1beta1/fake/doc.go | 20 +
.../fake/fake_authentication_client.go | 40 +
.../v1beta1/fake/fake_tokenreview.go | 24 +
.../fake/fake_tokenreview_expansion.go | 27 +
.../typed/authorization/v1/fake/doc.go | 20 +
.../v1/fake/fake_authorization_client.go | 52 +
.../v1/fake/fake_localsubjectaccessreview.go | 25 +
...fake_localsubjectaccessreview_expansion.go | 27 +
.../v1/fake/fake_selfsubjectaccessreview.go | 24 +
.../fake_selfsubjectaccessreview_expansion.go | 27 +
.../v1/fake/fake_selfsubjectrulesreview.go | 24 +
.../fake_selfsubjectrulesreview_expansion.go | 27 +
.../v1/fake/fake_subjectaccessreview.go | 24 +
.../fake_subjectaccessreview_expansion.go | 30 +
.../typed/authorization/v1beta1/fake/doc.go | 20 +
.../v1beta1/fake/fake_authorization_client.go | 52 +
.../v1beta1/fake/fake_generated_expansion.go | 17 +
.../fake/fake_localsubjectaccessreview.go | 25 +
...fake_localsubjectaccessreview_expansion.go | 27 +
.../fake/fake_selfsubjectaccessreview.go | 24 +
.../fake_selfsubjectaccessreview_expansion.go | 27 +
.../fake/fake_selfsubjectrulesreview.go | 24 +
.../fake_selfsubjectrulesreview_expansion.go | 27 +
.../v1beta1/fake/fake_subjectaccessreview.go | 24 +
.../fake_subjectaccessreview_expansion.go | 27 +
.../typed/autoscaling/v1/fake/doc.go | 20 +
.../v1/fake/fake_autoscaling_client.go | 40 +
.../v1/fake/fake_horizontalpodautoscaler.go | 140 +
.../typed/autoscaling/v2beta1/fake/doc.go | 20 +
.../v2beta1/fake/fake_autoscaling_client.go | 40 +
.../fake/fake_horizontalpodautoscaler.go | 140 +
.../typed/autoscaling/v2beta2/fake/doc.go | 20 +
.../v2beta2/fake/fake_autoscaling_client.go | 40 +
.../fake/fake_horizontalpodautoscaler.go | 140 +
.../kubernetes/typed/batch/v1/fake/doc.go | 20 +
.../typed/batch/v1/fake/fake_batch_client.go | 40 +
.../typed/batch/v1/fake/fake_job.go | 140 +
.../typed/batch/v1beta1/fake/doc.go | 20 +
.../batch/v1beta1/fake/fake_batch_client.go | 40 +
.../typed/batch/v1beta1/fake/fake_cronjob.go | 140 +
.../typed/batch/v2alpha1/fake/doc.go | 20 +
.../batch/v2alpha1/fake/fake_batch_client.go | 40 +
.../typed/batch/v2alpha1/fake/fake_cronjob.go | 140 +
.../typed/certificates/v1beta1/fake/doc.go | 20 +
.../v1beta1/fake/fake_certificates_client.go | 40 +
.../fake/fake_certificatesigningrequest.go | 131 +
...ake_certificatesigningrequest_expansion.go | 31 +
.../typed/coordination/v1beta1/fake/doc.go | 20 +
.../v1beta1/fake/fake_coordination_client.go | 40 +
.../coordination/v1beta1/fake/fake_lease.go | 128 +
.../kubernetes/typed/core/v1/fake/doc.go | 20 +
.../core/v1/fake/fake_componentstatus.go | 120 +
.../typed/core/v1/fake/fake_configmap.go | 128 +
.../typed/core/v1/fake/fake_core_client.go | 100 +
.../typed/core/v1/fake/fake_endpoints.go | 128 +
.../typed/core/v1/fake/fake_event.go | 128 +
.../core/v1/fake/fake_event_expansion.go | 89 +
.../typed/core/v1/fake/fake_limitrange.go | 128 +
.../typed/core/v1/fake/fake_namespace.go | 123 +
.../core/v1/fake/fake_namespace_expansion.go | 37 +
.../typed/core/v1/fake/fake_node.go | 131 +
.../typed/core/v1/fake/fake_node_expansion.go | 32 +
.../core/v1/fake/fake_persistentvolume.go | 131 +
.../v1/fake/fake_persistentvolumeclaim.go | 140 +
.../kubernetes/typed/core/v1/fake/fake_pod.go | 140 +
.../typed/core/v1/fake/fake_pod_expansion.go | 58 +
.../typed/core/v1/fake/fake_podtemplate.go | 128 +
.../v1/fake/fake_replicationcontroller.go | 163 +
.../typed/core/v1/fake/fake_resourcequota.go | 140 +
.../typed/core/v1/fake/fake_secret.go | 128 +
.../typed/core/v1/fake/fake_service.go | 132 +
.../core/v1/fake/fake_service_expansion.go | 26 +
.../typed/core/v1/fake/fake_serviceaccount.go | 128 +
.../v1/fake/fake_serviceaccount_expansion.go | 31 +
.../typed/events/v1beta1/fake/doc.go | 20 +
.../typed/events/v1beta1/fake/fake_event.go | 128 +
.../events/v1beta1/fake/fake_events_client.go | 40 +
.../typed/extensions/v1beta1/fake/doc.go | 20 +
.../extensions/v1beta1/fake/fake_daemonset.go | 140 +
.../v1beta1/fake/fake_deployment.go | 162 +
.../v1beta1/fake/fake_deployment_expansion.go | 33 +
.../v1beta1/fake/fake_extensions_client.go | 60 +
.../extensions/v1beta1/fake/fake_ingress.go | 140 +
.../v1beta1/fake/fake_podsecuritypolicy.go | 120 +
.../v1beta1/fake/fake_replicaset.go | 162 +
.../extensions/v1beta1/fake/fake_scale.go | 25 +
.../v1beta1/fake/fake_scale_expansion.go | 47 +
.../typed/networking/v1/fake/doc.go | 20 +
.../v1/fake/fake_networking_client.go | 40 +
.../networking/v1/fake/fake_networkpolicy.go | 128 +
.../typed/policy/v1beta1/fake/doc.go | 20 +
.../policy/v1beta1/fake/fake_eviction.go | 25 +
.../v1beta1/fake/fake_eviction_expansion.go | 33 +
.../v1beta1/fake/fake_poddisruptionbudget.go | 140 +
.../v1beta1/fake/fake_podsecuritypolicy.go | 120 +
.../policy/v1beta1/fake/fake_policy_client.go | 48 +
.../kubernetes/typed/rbac/v1/fake/doc.go | 20 +
.../typed/rbac/v1/fake/fake_clusterrole.go | 120 +
.../rbac/v1/fake/fake_clusterrolebinding.go | 120 +
.../typed/rbac/v1/fake/fake_rbac_client.go | 52 +
.../typed/rbac/v1/fake/fake_role.go | 128 +
.../typed/rbac/v1/fake/fake_rolebinding.go | 128 +
.../typed/rbac/v1alpha1/fake/doc.go | 20 +
.../rbac/v1alpha1/fake/fake_clusterrole.go | 120 +
.../v1alpha1/fake/fake_clusterrolebinding.go | 120 +
.../rbac/v1alpha1/fake/fake_rbac_client.go | 52 +
.../typed/rbac/v1alpha1/fake/fake_role.go | 128 +
.../rbac/v1alpha1/fake/fake_rolebinding.go | 128 +
.../kubernetes/typed/rbac/v1beta1/fake/doc.go | 20 +
.../rbac/v1beta1/fake/fake_clusterrole.go | 120 +
.../v1beta1/fake/fake_clusterrolebinding.go | 120 +
.../rbac/v1beta1/fake/fake_rbac_client.go | 52 +
.../typed/rbac/v1beta1/fake/fake_role.go | 128 +
.../rbac/v1beta1/fake/fake_rolebinding.go | 128 +
.../typed/scheduling/v1alpha1/fake/doc.go | 20 +
.../v1alpha1/fake/fake_priorityclass.go | 120 +
.../v1alpha1/fake/fake_scheduling_client.go | 40 +
.../typed/scheduling/v1beta1/fake/doc.go | 20 +
.../v1beta1/fake/fake_priorityclass.go | 120 +
.../v1beta1/fake/fake_scheduling_client.go | 40 +
.../typed/settings/v1alpha1/fake/doc.go | 20 +
.../settings/v1alpha1/fake/fake_podpreset.go | 128 +
.../v1alpha1/fake/fake_settings_client.go | 40 +
.../kubernetes/typed/storage/v1/fake/doc.go | 20 +
.../storage/v1/fake/fake_storage_client.go | 40 +
.../storage/v1/fake/fake_storageclass.go | 120 +
.../typed/storage/v1alpha1/fake/doc.go | 20 +
.../v1alpha1/fake/fake_storage_client.go | 40 +
.../v1alpha1/fake/fake_volumeattachment.go | 131 +
.../typed/storage/v1beta1/fake/doc.go | 20 +
.../v1beta1/fake/fake_storage_client.go | 44 +
.../storage/v1beta1/fake/fake_storageclass.go | 120 +
.../v1beta1/fake/fake_volumeattachment.go | 131 +
514 files changed, 67374 insertions(+), 3469 deletions(-)
create mode 100644 hack/k8s-dynamic-fake-simple.patch
create mode 100644 pkg/reconciler/handler.go
create mode 100644 pkg/reconciler/reconciler.go
create mode 100644 pkg/reconciler/stats.go
rename pkg/reconciler/{v1alpha1 => }/subscription/subscription.go (61%)
create mode 100644 pkg/reconciler/subscription/subscription_test.go
create mode 100644 pkg/reconciler/testing/channel.go
create mode 100644 pkg/reconciler/testing/factory.go
create mode 100644 pkg/reconciler/testing/listers.go
rename vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_defaults.go => pkg/reconciler/testing/service.go (53%)
create mode 100644 pkg/reconciler/testing/subscription.go
create mode 100644 pkg/reconciler/testing/unstructured.go
delete mode 100644 pkg/reconciler/v1alpha1/subscription/provider_test.go
delete mode 100644 pkg/reconciler/v1alpha1/subscription/subscription_test.go
create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go
create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/doc.go
create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/group_client.go
create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go
create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go
create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go
create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go
create mode 100644 vendor/cloud.google.com/go/trace/apiv2/doc.go
create mode 100644 vendor/cloud.google.com/go/trace/apiv2/path_funcs.go
create mode 100644 vendor/cloud.google.com/go/trace/apiv2/trace_client.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS
rename vendor/{github.com/knative/build => contrib.go.opencensus.io/exporter/stackdriver}/LICENSE (100%)
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt
create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/logger.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/version.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go
create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
create mode 100644 vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
create mode 100644 vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE
create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go
create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go
create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go
create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go
create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go
create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go
create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go
delete mode 100644 vendor/github.com/knative/build/AUTHORS
delete mode 120000 vendor/github.com/knative/build/cmd/controller/kodata/HEAD
delete mode 120000 vendor/github.com/knative/build/cmd/controller/kodata/LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/controller/kodata/VENDOR-LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/creds-init/kodata/HEAD
delete mode 120000 vendor/github.com/knative/build/cmd/creds-init/kodata/LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/creds-init/kodata/VENDOR-LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/git-init/kodata/HEAD
delete mode 120000 vendor/github.com/knative/build/cmd/git-init/kodata/LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/git-init/kodata/VENDOR-LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/logs/kodata/HEAD
delete mode 120000 vendor/github.com/knative/build/cmd/logs/kodata/LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/logs/kodata/VENDOR-LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/nop/kodata/HEAD
delete mode 120000 vendor/github.com/knative/build/cmd/nop/kodata/LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/nop/kodata/VENDOR-LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/webhook/kodata/HEAD
delete mode 120000 vendor/github.com/knative/build/cmd/webhook/kodata/LICENSE
delete mode 120000 vendor/github.com/knative/build/cmd/webhook/kodata/VENDOR-LICENSE
delete mode 120000 vendor/github.com/knative/build/config/300-imagecache.yaml
delete mode 100644 vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_types.go
delete mode 100644 vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_validation.go
delete mode 100644 vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_types.go
delete mode 100644 vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_validation.go
delete mode 100644 vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_types.go
delete mode 100644 vendor/github.com/knative/build/pkg/apis/build/v1alpha1/metadata_validation.go
delete mode 100644 vendor/github.com/knative/build/pkg/apis/build/v1alpha1/target_path_validation.go
delete mode 100644 vendor/github.com/knative/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go
delete mode 120000 vendor/github.com/knative/build/test/panic/kodata/LICENSE
delete mode 120000 vendor/github.com/knative/build/test/panic/kodata/VENDOR-LICENSE
delete mode 120000 vendor/github.com/knative/build/test/workingdir/kodata/LICENSE
delete mode 120000 vendor/github.com/knative/build/test/workingdir/kodata/VENDOR-LICENSE
rename vendor/github.com/knative/{build/pkg/apis/build => pkg/apis/istio/authentication}/register.go (82%)
create mode 100644 vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go
create mode 100644 vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go
rename vendor/github.com/knative/{build/pkg/apis/build => pkg/apis/istio/authentication}/v1alpha1/register.go (74%)
create mode 100644 vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/doc.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/fake/clientset_generated.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/fake/doc.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/fake/register.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go
rename vendor/github.com/knative/{build/pkg/apis/build => pkg/client/clientset/versioned/typed/authentication}/v1alpha1/doc.go (78%)
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/doc.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_authentication_client.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_policy.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/doc.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_destinationrule.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_gateway.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_istio_client.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_virtualservice.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go
create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go
create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go
create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go
create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go
create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go
create mode 100644 vendor/github.com/knative/pkg/controller/controller.go
create mode 100644 vendor/github.com/knative/pkg/controller/helper.go
create mode 100644 vendor/github.com/knative/pkg/controller/stats_reporter.go
create mode 100644 vendor/github.com/knative/pkg/logging/testing/util.go
create mode 100644 vendor/github.com/knative/pkg/metrics/config.go
rename vendor/github.com/knative/{build/pkg/apis/build/v1alpha1/build_template_interface.go => pkg/metrics/doc.go} (72%)
create mode 100644 vendor/github.com/knative/pkg/metrics/exporter.go
create mode 100644 vendor/github.com/knative/pkg/metrics/gcp_metadata.go
create mode 100644 vendor/github.com/knative/pkg/metrics/metricskey/constants.go
create mode 100644 vendor/github.com/knative/pkg/metrics/monitored_resources.go
create mode 100644 vendor/github.com/knative/pkg/metrics/prometheus_exporter.go
create mode 100644 vendor/github.com/knative/pkg/metrics/record.go
create mode 100644 vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/actions.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/clock.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/events.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/generate_name_reactor.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/hooks.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/reactions.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/sorter.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/stats.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/table.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/tracker.go
create mode 100644 vendor/github.com/knative/pkg/reconciler/testing/util.go
rename vendor/github.com/knative/{build/pkg/apis/build/v1alpha1/cluster_build_template_validation.go => pkg/tracker/doc.go} (65%)
create mode 100644 vendor/github.com/knative/pkg/tracker/enqueue.go
create mode 100644 vendor/github.com/knative/pkg/tracker/interface.go
create mode 100644 vendor/go.uber.org/zap/internal/ztest/doc.go
create mode 100644 vendor/go.uber.org/zap/internal/ztest/timeout.go
create mode 100644 vendor/go.uber.org/zap/internal/ztest/writer.go
create mode 100644 vendor/go.uber.org/zap/zaptest/doc.go
create mode 100644 vendor/go.uber.org/zap/zaptest/logger.go
create mode 100644 vendor/go.uber.org/zap/zaptest/testingt.go
create mode 100644 vendor/go.uber.org/zap/zaptest/timeout.go
create mode 100644 vendor/go.uber.org/zap/zaptest/writer.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/fake/register.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 4559393000b..7d6b6d09c1b 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -2,21 +2,33 @@
[[projects]]
- digest = "1:b1831dd9e209127129e5e967b26fe69f92ce95b32776b6a6d91612be6b2a45fe"
+ digest = "1:04a6834455a0a97cdd9bccc7394576ddc7a8a75f69fdc1431303c543df03bcac"
name = "cloud.google.com/go"
packages = [
"compute/metadata",
"iam",
"internal/optional",
"internal/version",
+ "monitoring/apiv3",
"pubsub",
"pubsub/apiv1",
"pubsub/internal/distribution",
+ "trace/apiv2",
]
pruneopts = "NUT"
revision = "0ebda48a7f143b1cce9eb37a8c1106ac762a3430"
version = "v0.34.0"
+[[projects]]
+ digest = "1:b6eb7c2538ec2999a072c0e372a18d7b7e3aedac249f26e159586fa5f892909f"
+ name = "contrib.go.opencensus.io/exporter/stackdriver"
+ packages = [
+ ".",
+ "monitoredresource",
+ ]
+ pruneopts = "NUT"
+ revision = "c06c82c832edca4eaf7b0241bd655560a1be0346"
+
[[projects]]
digest = "1:a074ae0f4788ea4c4c7045ab37f21943920bc20cf6ff8afcb2d971154cfa87ab"
name = "github.com/Shopify/sarama"
@@ -33,6 +45,44 @@
pruneopts = "NUT"
revision = "f0300d1749da6fa982027e449ec0c7a145510c3c"
+[[projects]]
+ digest = "1:bb3d285967352e57fa2d0bf8792712700a63b7eb41d500d742c2d2e67b52cf8e"
+ name = "github.com/aws/aws-sdk-go"
+ packages = [
+ "aws",
+ "aws/awserr",
+ "aws/awsutil",
+ "aws/client",
+ "aws/client/metadata",
+ "aws/corehandlers",
+ "aws/credentials",
+ "aws/credentials/ec2rolecreds",
+ "aws/credentials/endpointcreds",
+ "aws/credentials/processcreds",
+ "aws/credentials/stscreds",
+ "aws/csm",
+ "aws/defaults",
+ "aws/ec2metadata",
+ "aws/endpoints",
+ "aws/request",
+ "aws/session",
+ "aws/signer/v4",
+ "internal/ini",
+ "internal/sdkio",
+ "internal/sdkrand",
+ "internal/sdkuri",
+ "internal/shareddefaults",
+ "private/protocol",
+ "private/protocol/query",
+ "private/protocol/query/queryutil",
+ "private/protocol/rest",
+ "private/protocol/xml/xmlutil",
+ "service/sts",
+ ]
+ pruneopts = "NUT"
+ revision = "56c1def75689cceec1fa6f14c2eedb4b798827f9"
+ version = "v1.19.11"
+
[[projects]]
branch = "master"
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
@@ -57,6 +107,18 @@
revision = "c618e605e15c0d7535f6c96ff8efbb0dba4fd66c"
version = "v2.1.15"
+[[projects]]
+ digest = "1:fa965c1fd0f17153f608037e109e62104058bc1d08d44849867795fd306fa8b8"
+ name = "github.com/census-instrumentation/opencensus-proto"
+ packages = [
+ "gen-go/agent/common/v1",
+ "gen-go/metrics/v1",
+ "gen-go/resource/v1",
+ ]
+ pruneopts = "NUT"
+ revision = "7f2434bc10da710debe5c4315ed6d4df454b4024"
+ version = "v0.1.0"
+
[[projects]]
digest = "1:13e0fa926561dc81e7229d65f5c0aa2cc74b4020d2d3da416fff4ee0946545da"
name = "github.com/cloudevents/sdk-go"
@@ -188,7 +250,7 @@
revision = "24b0969c4cb722950103eed87108c8d291a8df00"
[[projects]]
- digest = "1:0f7f0d9512487860d967bd31b4a9668316e53630fd71cb57a84ccf97c852df84"
+ digest = "1:2f01b7e14109d05096661dcb8bab93736d7ba97b4b6a54e1f415282ca3162297"
name = "github.com/golang/protobuf"
packages = [
"proto",
@@ -197,7 +259,9 @@
"ptypes/any",
"ptypes/duration",
"ptypes/empty",
+ "ptypes/struct",
"ptypes/timestamp",
+ "ptypes/wrappers",
]
pruneopts = "NUT"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
@@ -338,6 +402,13 @@
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
version = "v0.3.6"
+[[projects]]
+ digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885"
+ name = "github.com/jmespath/go-jmespath"
+ packages = ["."]
+ pruneopts = "NUT"
+ revision = "c2b33e84"
+
[[projects]]
digest = "1:da62aa6632d04e080b8a8b85a59ed9ed1550842a0099a55f3ae3a20d02a3745a"
name = "github.com/joho/godotenv"
@@ -354,31 +425,36 @@
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
[[projects]]
- digest = "1:0836bde83bdc49aae7710e22d04e2555d9bd25cd133fbb78508c492307081aea"
- name = "github.com/knative/build"
- packages = [
- "pkg/apis/build",
- "pkg/apis/build/v1alpha1",
- ]
- pruneopts = "NUT"
- revision = "dd3ceb3323922b899a0a913f885fcf59943e7b59"
-
-[[projects]]
- digest = "1:244b7861fc13a97a1c937ce845f36311dfa00284cd25599831559843cba86ded"
+ digest = "1:b657ec75371e8baf47023f7eb4f20d78e2a744f51ec824b40dd6cf74ad6fdaf4"
name = "github.com/knative/pkg"
packages = [
"apis",
"apis/duck",
"apis/duck/v1alpha1",
"apis/istio",
+ "apis/istio/authentication",
+ "apis/istio/authentication/v1alpha1",
"apis/istio/common/v1alpha1",
"apis/istio/v1alpha3",
"changeset",
+ "client/clientset/versioned",
+ "client/clientset/versioned/fake",
+ "client/clientset/versioned/scheme",
+ "client/clientset/versioned/typed/authentication/v1alpha1",
+ "client/clientset/versioned/typed/authentication/v1alpha1/fake",
+ "client/clientset/versioned/typed/istio/v1alpha3",
+ "client/clientset/versioned/typed/istio/v1alpha3/fake",
+ "client/listers/istio/v1alpha3",
"configmap",
+ "controller",
"kmeta",
"kmp",
"logging",
"logging/logkey",
+ "logging/testing",
+ "metrics",
+ "metrics/metricskey",
+ "reconciler/testing",
"signals",
"system",
"system/testing",
@@ -388,10 +464,11 @@
"test/monitoring",
"test/spoof",
"test/zipkin",
+ "tracker",
"webhook",
]
pruneopts = "NUT"
- revision = "1095a4eab01cb7f5bbfa4dd5d048d730a00980e3"
+ revision = "418e675f88c29db0217475a74e61ebfa239f0f50"
[[projects]]
branch = "master"
@@ -656,7 +733,7 @@
version = "v1.1.0"
[[projects]]
- digest = "1:9368dabe955bb73491890133c1dd1ad212f16a7c47e26b331b5db0a5edbc38af"
+ digest = "1:6463628b4e2c5680bf069a15bf109ee6dd8ffc54e20447a23639167128c2c4ed"
name = "go.uber.org/zap"
packages = [
".",
@@ -664,12 +741,13 @@
"internal/bufferpool",
"internal/color",
"internal/exit",
+ "internal/ztest",
"zapcore",
+ "zaptest",
"zaptest/observer",
]
pruneopts = "NUT"
- revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982"
- version = "v1.9.1"
+ revision = "67bc79d13d155c02fd008f721863ff8cc5f30659"
[[projects]]
branch = "master"
@@ -824,11 +902,17 @@
[[projects]]
branch = "master"
- digest = "1:5bc750ea612e0650b4095b019e70fc7530c0a30a33445fbd0b79c5a54a729908"
+ digest = "1:dba8c4bdaeca16a46cf1a667fe54260501055ab54ebe020baef12ac6477b9e73"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
+ "googleapis/api/distribution",
+ "googleapis/api/label",
+ "googleapis/api/metric",
+ "googleapis/api/monitoredres",
+ "googleapis/devtools/cloudtrace/v2",
"googleapis/iam/v1",
+ "googleapis/monitoring/v3",
"googleapis/pubsub/v1",
"googleapis/rpc/status",
"protobuf/field_mask",
@@ -926,11 +1010,11 @@
"storage/v1beta1",
]
pruneopts = "NUT"
- revision = "6db15a15d2d3874a6c3ddb2140ac9f3bc7058428"
- version = "kubernetes-1.12.5"
+ revision = "145d52631d00cbfe68490d19ae4f0f501fd31a95"
+ version = "kubernetes-1.12.6"
[[projects]]
- digest = "1:71d28556d46329081174d64fc8d55dc8317294b7e610825d464730b16b4e78aa"
+ digest = "1:58293c3a67fbca0336cd3cc2140dfe630545694452cf73980d2e4b6e6ce2660c"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/equality",
@@ -982,11 +1066,11 @@
"third_party/forked/golang/reflect",
]
pruneopts = "NUT"
- revision = "49ce2735e5074ffc3f8190c8406cf51a96302dad"
- version = "kubernetes-1.12.5"
+ revision = "01f179d85dbce0f2e0e4351a92394b38694b7cae"
+ version = "kubernetes-1.12.6"
[[projects]]
- digest = "1:9de26569b5a8cc8c8c96051c305e980af8644064a64da29dfd0601245d22a438"
+ digest = "1:f15a0d552b80277323d01ee8acc6750bb57bdae997bc63bf192d84fb1f0b9136"
name = "k8s.io/client-go"
packages = [
"discovery",
@@ -1038,38 +1122,70 @@
"informers/storage/v1alpha1",
"informers/storage/v1beta1",
"kubernetes",
+ "kubernetes/fake",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
+ "kubernetes/typed/admissionregistration/v1alpha1/fake",
"kubernetes/typed/admissionregistration/v1beta1",
+ "kubernetes/typed/admissionregistration/v1beta1/fake",
"kubernetes/typed/apps/v1",
+ "kubernetes/typed/apps/v1/fake",
"kubernetes/typed/apps/v1beta1",
+ "kubernetes/typed/apps/v1beta1/fake",
"kubernetes/typed/apps/v1beta2",
+ "kubernetes/typed/apps/v1beta2/fake",
"kubernetes/typed/authentication/v1",
+ "kubernetes/typed/authentication/v1/fake",
"kubernetes/typed/authentication/v1beta1",
+ "kubernetes/typed/authentication/v1beta1/fake",
"kubernetes/typed/authorization/v1",
+ "kubernetes/typed/authorization/v1/fake",
"kubernetes/typed/authorization/v1beta1",
+ "kubernetes/typed/authorization/v1beta1/fake",
"kubernetes/typed/autoscaling/v1",
+ "kubernetes/typed/autoscaling/v1/fake",
"kubernetes/typed/autoscaling/v2beta1",
+ "kubernetes/typed/autoscaling/v2beta1/fake",
"kubernetes/typed/autoscaling/v2beta2",
+ "kubernetes/typed/autoscaling/v2beta2/fake",
"kubernetes/typed/batch/v1",
+ "kubernetes/typed/batch/v1/fake",
"kubernetes/typed/batch/v1beta1",
+ "kubernetes/typed/batch/v1beta1/fake",
"kubernetes/typed/batch/v2alpha1",
+ "kubernetes/typed/batch/v2alpha1/fake",
"kubernetes/typed/certificates/v1beta1",
+ "kubernetes/typed/certificates/v1beta1/fake",
"kubernetes/typed/coordination/v1beta1",
+ "kubernetes/typed/coordination/v1beta1/fake",
"kubernetes/typed/core/v1",
+ "kubernetes/typed/core/v1/fake",
"kubernetes/typed/events/v1beta1",
+ "kubernetes/typed/events/v1beta1/fake",
"kubernetes/typed/extensions/v1beta1",
+ "kubernetes/typed/extensions/v1beta1/fake",
"kubernetes/typed/networking/v1",
+ "kubernetes/typed/networking/v1/fake",
"kubernetes/typed/policy/v1beta1",
+ "kubernetes/typed/policy/v1beta1/fake",
"kubernetes/typed/rbac/v1",
+ "kubernetes/typed/rbac/v1/fake",
"kubernetes/typed/rbac/v1alpha1",
+ "kubernetes/typed/rbac/v1alpha1/fake",
"kubernetes/typed/rbac/v1beta1",
+ "kubernetes/typed/rbac/v1beta1/fake",
"kubernetes/typed/scheduling/v1alpha1",
+ "kubernetes/typed/scheduling/v1alpha1/fake",
"kubernetes/typed/scheduling/v1beta1",
+ "kubernetes/typed/scheduling/v1beta1/fake",
"kubernetes/typed/settings/v1alpha1",
+ "kubernetes/typed/settings/v1alpha1/fake",
"kubernetes/typed/storage/v1",
+ "kubernetes/typed/storage/v1/fake",
"kubernetes/typed/storage/v1alpha1",
+ "kubernetes/typed/storage/v1alpha1/fake",
"kubernetes/typed/storage/v1beta1",
+ "kubernetes/typed/storage/v1beta1/fake",
"listers/admissionregistration/v1alpha1",
"listers/admissionregistration/v1beta1",
"listers/apps/v1",
@@ -1132,8 +1248,8 @@
"util/workqueue",
]
pruneopts = "NUT"
- revision = "701b913670036511e3d752318272c97f1a2a2edd"
- version = "kubernetes-1.12.5"
+ revision = "78295b709ec6fa5be12e35892477a326dea2b5d3"
+ version = "kubernetes-1.12.6"
[[projects]]
digest = "1:26b81b5e76e3f84ea5140da4f74649576e470f79091d2ef8e0d1b5000bc636ca"
@@ -1161,7 +1277,7 @@
]
pruneopts = "T"
revision = "b1289fc74931d4b6b04bd1a259acfc88a2cb0a66"
- version = "kubernetes-1.12.5"
+ version = "kubernetes-1.12.6"
[[projects]]
branch = "master"
@@ -1246,15 +1362,19 @@
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/google/uuid",
- "github.com/knative/build/pkg/apis/build/v1alpha1",
"github.com/knative/pkg/apis",
"github.com/knative/pkg/apis/duck",
"github.com/knative/pkg/apis/duck/v1alpha1",
"github.com/knative/pkg/apis/istio/v1alpha3",
+ "github.com/knative/pkg/client/clientset/versioned/fake",
+ "github.com/knative/pkg/client/listers/istio/v1alpha3",
"github.com/knative/pkg/configmap",
+ "github.com/knative/pkg/controller",
"github.com/knative/pkg/kmp",
"github.com/knative/pkg/logging",
"github.com/knative/pkg/logging/logkey",
+ "github.com/knative/pkg/logging/testing",
+ "github.com/knative/pkg/reconciler/testing",
"github.com/knative/pkg/signals",
"github.com/knative/pkg/system",
"github.com/knative/pkg/system/testing",
@@ -1303,8 +1423,13 @@
"k8s.io/client-go/discovery/fake",
"k8s.io/client-go/dynamic",
"k8s.io/client-go/dynamic/fake",
+ "k8s.io/client-go/informers",
"k8s.io/client-go/kubernetes",
+ "k8s.io/client-go/kubernetes/fake",
"k8s.io/client-go/kubernetes/scheme",
+ "k8s.io/client-go/kubernetes/typed/core/v1",
+ "k8s.io/client-go/listers/apps/v1",
+ "k8s.io/client-go/listers/core/v1",
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
"k8s.io/client-go/rest",
"k8s.io/client-go/testing",
diff --git a/Gopkg.toml b/Gopkg.toml
index bed42ea2792..524bd6aff1c 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -10,7 +10,6 @@ required = [
"k8s.io/code-generator/cmd/informer-gen",
"github.com/knative/test-infra/scripts",
"github.com/knative/test-infra/tools/dep-collector",
- "github.com/knative/build/pkg/apis/build/v1alpha1",
]
[prune]
@@ -50,33 +49,36 @@ required = [
name = "gopkg.in/yaml.v2"
version = "v2.2.1"
+# TODO(https://github.com/knative/eventing/issues/1065): if we get to update k8s.io to > 1.13, we can remove
+# k8s-dynamic-fake-simple.patch in ./hack/update-deps.sh
+#
# Overridden to ensure compatibility with GKE
# GKE version as of 2019-01-24 is 1.11
# controller-runtime 0.1.9 requires at least 1.12
[[override]]
name = "k8s.io/api"
- version = "kubernetes-1.12.5"
+ version = "kubernetes-1.12.6"
# Overridden to ensure compatibility with GKE
# GKE version as of 2019-01-24 is 1.11
# controller-runtime 0.1.9 requires at least 1.12
[[override]]
name = "k8s.io/apimachinery"
- version = "kubernetes-1.12.5"
+ version = "kubernetes-1.12.6"
# Overridden to ensure compatibility with GKE
# GKE version as of 2019-01-24 is 1.11
# controller-runtime 0.1.9 requires at least 1.12
[[override]]
name = "k8s.io/code-generator"
- version = "kubernetes-1.12.5"
+ version = "kubernetes-1.12.6"
# Overridden to ensure compatibility with GKE
# GKE version as of 2019-01-24 is 1.11
# controller-runtime 0.1.9 requires at least 1.12
[[override]]
name = "k8s.io/client-go"
- version = "kubernetes-1.12.5"
+ version = "kubernetes-1.12.6"
# This is the commit at which k8s depends on this in 1.11
# It seems to be broken at HEAD.
@@ -98,10 +100,10 @@ required = [
# Constrain the version of knative/pkg we would like to import.
# This controls when we upgrade apis independently of Serving.
-[[constraint]]
+[[override]]
name = "github.com/knative/pkg"
- # HEAD as of 2019-04-15
- revision = "1095a4eab01cb7f5bbfa4dd5d048d730a00980e3"
+ # HEAD as of 2019-04-17
+ revision = "418e675f88c29db0217475a74e61ebfa239f0f50"
# TODO why is this overridden?
[[override]]
@@ -131,3 +133,14 @@ required = [
[[constraint]]
name = "github.com/cloudevents/sdk-go"
version = "=0.6.0"
+
+[[override]]
+ name = "contrib.go.opencensus.io/exporter/stackdriver"
+ # HEAD as of 2019-02-11
+ # Needed because this includes a fix to support Stackdriver built-in metrics
+ revision = "c06c82c832edca4eaf7b0241bd655560a1be0346"
+
+# needed because pkg upgraded
+[[override]]
+ name = "go.uber.org/zap"
+ revision = "67bc79d13d155c02fd008f721863ff8cc5f30659"
\ No newline at end of file
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 82851fc1bce..2eea7fdd832 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -19,34 +19,40 @@ package main
import (
"context"
"flag"
+ "github.com/knative/eventing/pkg/reconciler/subscription"
"log"
"net/http"
"os"
"time"
+ "k8s.io/apimachinery/pkg/runtime"
+ kubeinformers "k8s.io/client-go/informers"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
+ _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ informers "github.com/knative/eventing/pkg/client/informers/externalversions"
"github.com/knative/eventing/pkg/logconfig"
+ "github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/reconciler"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/channel"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/namespace"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/subscription"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"github.com/knative/pkg/configmap"
- "github.com/knative/pkg/logging"
+ kncontroller "github.com/knative/pkg/controller"
"github.com/knative/pkg/logging/logkey"
"github.com/knative/pkg/signals"
"github.com/knative/pkg/system"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/kubernetes"
controllerruntime "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
- // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
- // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
const (
@@ -68,19 +74,10 @@ func main() {
flag.Parse()
logf.SetLogger(logf.ZapLogger(false))
- // Read the logging config and setup a logger.
- cm := getLoggingConfigOrDie()
-
- config, err := logging.NewConfigFromMap(cm, logconfig.Controller)
- if err != nil {
- log.Fatalf("Error parsing logging configuration: %v", err)
- }
- logger, atomicLevel := logging.NewLoggerFromConfig(config, logconfig.Controller)
+ logger, atomicLevel := setupLogger()
defer logger.Sync()
logger = logger.With(zap.String(logkey.ControllerType, logconfig.Controller))
- logger.Info("Starting the controller")
-
// set up signals so we handle the first shutdown signal gracefully
stopCh := signals.SetupSignalHandler()
@@ -89,6 +86,69 @@ func main() {
logger.Fatalf("Error building kubeconfig: %v", err)
}
+ go startPkgController(stopCh, cfg, logger, atomicLevel)
+ go startControllerRuntime(stopCh, cfg, logger, atomicLevel)
+ <-stopCh
+}
+
+func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.SugaredLogger, atomicLevel zap.AtomicLevel) {
+ logger = logger.With(zap.String("controller/impl", "pkg"))
+ logger.Info("Starting the controller")
+
+ const numControllers = 1
+ cfg.QPS = numControllers * rest.DefaultQPS
+ cfg.Burst = numControllers * rest.DefaultBurst
+ opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
+
+ kubeInformerFactory := kubeinformers.NewSharedInformerFactory(opt.KubeClientSet, opt.ResyncPeriod)
+ eventingInformerFactory := informers.NewSharedInformerFactory(opt.EventingClientSet, opt.ResyncPeriod)
+
+ subscriptionInformer := eventingInformerFactory.Eventing().V1alpha1().Subscriptions()
+ // TODO: remove unused after done integrating all controllers.
+ //deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
+ //coreServiceInformer := kubeInformerFactory.Core().V1().Services()
+ configMapInformer := kubeInformerFactory.Core().V1().ConfigMaps()
+
+ // Build all of our controllers, with the clients constructed above.
+ // Add new controllers to this array.
+ controllers := []*kncontroller.Impl{
+ subscription.NewController(
+ opt,
+ subscriptionInformer,
+ ),
+ }
+ if len(controllers) != numControllers {
+ logger.Fatalf("Number of controllers and QPS settings mismatch: %d != %d", len(controllers), numControllers)
+ }
+
+ // Watch the logging config map and dynamically update logging levels.
+ opt.ConfigMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller))
+ // TODO: Watch the observability config map and dynamically update metrics exporter.
+ //opt.ConfigMapWatcher.Watch(metrics.ObservabilityConfigName, metrics.UpdateExporterFromConfigMap(component, logger))
+ if err := opt.ConfigMapWatcher.Start(stopCh); err != nil {
+ logger.Fatalw("failed to start configuration manager", zap.Error(err))
+ }
+
+ // Start all of the informers and wait for them to sync.
+ logger.Info("Starting informers.")
+ if err := kncontroller.StartInformers(
+ stopCh,
+ subscriptionInformer.Informer(),
+ configMapInformer.Informer(),
+ ); err != nil {
+ logger.Fatalf("Failed to start informers: %v", err)
+ }
+
+ // Start all of the controllers.
+ logger.Info("Starting controllers.")
+ go kncontroller.StartAll(stopCh, controllers...)
+}
+
+// TODO: remove after done integrating all controllers.
+func startControllerRuntime(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.SugaredLogger, atomicLevel zap.AtomicLevel) {
+ logger = logger.With(zap.String("controller/impl", "cr"))
+ logger.Info("Starting the controller")
+
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building kubernetes clientset: %v", err)
@@ -96,7 +156,7 @@ func main() {
// Watch the logging config map and dynamically update logging levels.
configMapWatcher := configmap.NewInformedWatcher(kubeClient, system.Namespace())
- configMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller, logconfig.Controller))
+ configMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller))
if err = configMapWatcher.Start(stopCh); err != nil {
logger.Fatalf("Failed to start controller config map watcher: %v", err)
}
@@ -121,7 +181,6 @@ func main() {
// Add each controller's ProvideController func to this list to have the
// manager run it.
providers := []ProvideFunc{
- subscription.ProvideController,
channel.ProvideController,
broker.ProvideController(
broker.ReconcilerArgs{
@@ -168,6 +227,16 @@ func init() {
flag.BoolVar(&hardcodedLoggingConfig, "hardCodedLoggingConfig", false, "If true, use the hard coded logging config. It is intended to be used only when debugging outside a Kubernetes cluster.")
}
+func setupLogger() (*zap.SugaredLogger, zap.AtomicLevel) {
+ // Set up our logger.
+ loggingConfigMap := getLoggingConfigOrDie()
+ loggingConfig, err := logging.NewConfigFromMap(loggingConfigMap)
+ if err != nil {
+ log.Fatalf("Error parsing logging configuration: %v", err)
+ }
+ return logging.NewLoggerFromConfig(loggingConfig, logconfig.Controller)
+}
+
func getLoggingConfigOrDie() map[string]string {
if hardcodedLoggingConfig {
return map[string]string{
diff --git a/config/300-channel.yaml b/config/300-channel.yaml
index 9fff1f225db..d5d03bf9654 100644
--- a/config/300-channel.yaml
+++ b/config/300-channel.yaml
@@ -74,7 +74,7 @@ spec:
type: array
items:
required:
- - ref
+ - uid
properties:
ref:
type: object
@@ -96,6 +96,9 @@ spec:
uid:
type: string
minLength: 1
+ uid:
+ type: string
+ minLength: 1
subscriberURI:
type: string
minLength: 1
diff --git a/contrib/gcppubsub/pkg/controller/channel/names.go b/contrib/gcppubsub/pkg/controller/channel/names.go
index 3efa1808b09..990210b8012 100644
--- a/contrib/gcppubsub/pkg/controller/channel/names.go
+++ b/contrib/gcppubsub/pkg/controller/channel/names.go
@@ -34,5 +34,5 @@ func generateTopicName(c *v1alpha1.Channel) string {
// subscriber.
// Note that this requires the subscriber's ref to be set correctly.
func generateSubName(cs *eventduck.ChannelSubscriberSpec) string {
- return utils.TopicNameWithUID("_", cs.Ref.Name, cs.Ref.UID)
+ return utils.TopicNameWithUID("_", string(cs.DeprecatedRef.Name), cs.UID)
}
diff --git a/contrib/gcppubsub/pkg/controller/channel/names_test.go b/contrib/gcppubsub/pkg/controller/channel/names_test.go
index d3157457a85..9014ed61e43 100644
--- a/contrib/gcppubsub/pkg/controller/channel/names_test.go
+++ b/contrib/gcppubsub/pkg/controller/channel/names_test.go
@@ -17,11 +17,11 @@ limitations under the License.
package channel
import (
+ v1 "k8s.io/api/core/v1"
"testing"
"github.com/knative/eventing/pkg/apis/duck/v1alpha1"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -44,10 +44,11 @@ func TestGenerateTopicName(t *testing.T) {
func TestGenerateSubName(t *testing.T) {
expected := "knative-eventing-channel_sub-name_sub-uid"
actual := generateSubName(&v1alpha1.ChannelSubscriberSpec{
- Ref: &v1.ObjectReference{
+ DeprecatedRef: &v1.ObjectReference{
Name: "sub-name",
UID: "sub-uid",
},
+ UID: "sub-uid",
})
if expected != actual {
t.Errorf("Expected '%s'. Actual '%s'", expected, actual)
diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile.go b/contrib/gcppubsub/pkg/controller/channel/reconcile.go
index b446f11de3c..aa1dc66c7be 100644
--- a/contrib/gcppubsub/pkg/controller/channel/reconcile.go
+++ b/contrib/gcppubsub/pkg/controller/channel/reconcile.go
@@ -19,6 +19,7 @@ package channel
import (
"context"
"fmt"
+ "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
ccpcontroller "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner"
pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util"
@@ -28,7 +29,7 @@ import (
"github.com/knative/eventing/pkg/reconciler/names"
"go.uber.org/zap"
"golang.org/x/oauth2/google"
- v1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
@@ -82,7 +83,7 @@ type reconciler struct {
// defaultSecret and defaultSecretKey are the K8s Secret and key in that secret that contain a
// JSON format GCP service account token, see
// https://cloud.google.com/iam/docs/creating-managing-service-account-keys#iam-service-account-keys-create-gcloud
- defaultSecret *v1.ObjectReference
+ defaultSecret *corev1.ObjectReference
defaultSecretKey string
}
@@ -132,12 +133,12 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
// regardless of the error.
} else {
logging.FromContext(ctx).Info("Channel reconciled")
- r.recorder.Eventf(c, v1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name)
+ r.recorder.Eventf(c, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %q", c.Name)
}
if err = util.UpdateChannel(ctx, r.client, c); err != nil {
logging.FromContext(ctx).Info("Error updating Channel Status", zap.Error(err))
- r.recorder.Eventf(c, v1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update Channel's status: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update Channel's status: %v", err)
return reconcile.Result{}, err
}
@@ -173,7 +174,7 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
originalPCS, err := pubsubutil.GetInternalStatus(c)
if err != nil {
logging.FromContext(ctx).Error("Unable to read the status.internal", zap.Error(err))
- r.recorder.Eventf(c, v1.EventTypeWarning, channelReadStatusFailed, "Failed to read Channel's status.internal: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, channelReadStatusFailed, "Failed to read Channel's status.internal: %v", err)
return false, err
}
@@ -181,7 +182,7 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
gcpCreds, err := pubsubutil.GetCredentials(ctx, r.client, r.defaultSecret, r.defaultSecretKey)
if err != nil {
logging.FromContext(ctx).Info("Unable to generate GCP creds", zap.Error(err))
- r.recorder.Eventf(c, v1.EventTypeWarning, gcpCredentialsReadFailed, "Failed to generate GCP credentials: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, gcpCredentialsReadFailed, "Failed to generate GCP credentials: %v", err)
return false, err
}
@@ -194,12 +195,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
// Topic is nil because it is only used for sub creation, not deletion.
err = r.syncSubscriptions(ctx, originalPCS, gcpCreds, nil, subsToSync)
if err != nil {
- r.recorder.Eventf(c, v1.EventTypeWarning, subscriptionSyncFailed, "Failed to sync Subscription for the Channel: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, subscriptionSyncFailed, "Failed to sync Subscription for the Channel: %v", err)
return false, err
}
err = r.deleteTopic(ctx, originalPCS, gcpCreds)
if err != nil {
- r.recorder.Eventf(c, v1.EventTypeWarning, topicDeleteFailed, "Failed to delete Topic for the Channel: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, topicDeleteFailed, "Failed to delete Topic for the Channel: %v", err)
return false, err
}
util.RemoveFinalizer(c, finalizerName)
@@ -219,12 +220,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
// only at the status, not the spec.
persist, plannedPCS, subsToSync, err := r.planGcpResources(ctx, c, originalPCS)
if err != nil {
- r.recorder.Eventf(c, v1.EventTypeWarning, gcpResourcesPlanFailed, "Failed to plan Channel's resources: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, gcpResourcesPlanFailed, "Failed to plan Channel's resources: %v", err)
return false, err
}
if persist == persistStatus {
if err = pubsubutil.SetInternalStatus(ctx, c, plannedPCS); err != nil {
- r.recorder.Eventf(c, v1.EventTypeWarning, gcpResourcesPersistFailed, "Failed to persist Channel's resources: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, gcpResourcesPersistFailed, "Failed to persist Channel's resources: %v", err)
return false, err
}
// Persist this and run another reconcile loop to enact it.
@@ -233,31 +234,31 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
svc, err := r.createK8sService(ctx, c)
if err != nil {
- r.recorder.Eventf(c, v1.EventTypeWarning, k8sServiceCreateFailed, "Failed to reconcile Channel's K8s Service: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, k8sServiceCreateFailed, "Failed to reconcile Channel's K8s Service: %v", err)
return false, err
}
err = r.createVirtualService(ctx, c, svc)
if err != nil {
- r.recorder.Eventf(c, v1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err)
return false, err
}
topic, err := r.createTopic(ctx, plannedPCS, gcpCreds)
if err != nil {
- r.recorder.Eventf(c, v1.EventTypeWarning, topicCreateFailed, "Failed to reconcile Topic for the Channel: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, topicCreateFailed, "Failed to reconcile Topic for the Channel: %v", err)
return false, err
}
err = r.syncSubscriptions(ctx, plannedPCS, gcpCreds, topic, subsToSync)
if err != nil {
- r.recorder.Eventf(c, v1.EventTypeWarning, subscriptionSyncFailed, "Failed to reconcile Subscription for the Channel: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, subscriptionSyncFailed, "Failed to reconcile Subscription for the Channel: %v", err)
return false, err
}
// Now that the subs have synced successfully, remove the old ones from the status.
plannedPCS.Subscriptions = subsToSync.subsToCreate
if err = pubsubutil.SetInternalStatus(ctx, c, plannedPCS); err != nil {
- r.recorder.Eventf(c, v1.EventTypeWarning, subscriptionDeleteFailed, "Failed to delete old Subscriptions from the Channel's status: %v", err)
+ r.recorder.Eventf(c, corev1.EventTypeWarning, subscriptionDeleteFailed, "Failed to delete old Subscriptions from the Channel's status: %v", err)
return false, err
}
@@ -308,13 +309,13 @@ func (r *reconciler) planGcpResources(ctx context.Context, c *eventingv1alpha1.C
existingSubs := make(map[types.UID]pubsubutil.GcpPubSubSubscriptionStatus, len(originalPCS.Subscriptions))
for _, existingSub := range originalPCS.Subscriptions {
// I don't think this can ever happen, but let's just be sure.
- if existingSub.Ref != nil && existingSub.Ref.UID != "" {
- existingSubs[existingSub.Ref.UID] = existingSub
+ if existingSub.UID != "" {
+ existingSubs[existingSub.UID] = existingSub
}
}
if c.Spec.Subscribable != nil {
for _, subscriber := range c.Spec.Subscribable.Subscribers {
- if subscriber.Ref == nil || subscriber.Ref.UID == "" {
+ if subscriber.UID == "" {
return noNeedToPersist, nil, nil, fmt.Errorf("empty reference UID: %v", subscriber)
}
// Have we already synced this Subscription before? If so, reuse its existing
@@ -322,18 +323,21 @@ func (r *reconciler) planGcpResources(ctx context.Context, c *eventingv1alpha1.C
// to be persisted before processing (as it only affects the dispatcher, not anything in
// GCP).
var subscription string
- if existingSub, present := existingSubs[subscriber.Ref.UID]; present {
- delete(existingSubs, subscriber.Ref.UID)
+ if existingSub, present := existingSubs[subscriber.UID]; present {
+ delete(existingSubs, subscriber.UID)
subscription = existingSub.Subscription
} else {
persist = persistStatus
subscription = generateSubName(&subscriber)
}
subsToSync.subsToCreate = append(subsToSync.subsToCreate, pubsubutil.GcpPubSubSubscriptionStatus{
- Ref: subscriber.Ref,
- SubscriberURI: subscriber.SubscriberURI,
- ReplyURI: subscriber.ReplyURI,
- Subscription: subscription,
+ ChannelSubscriberSpec: v1alpha1.ChannelSubscriberSpec{
+ DeprecatedRef: subscriber.DeprecatedRef,
+ UID: subscriber.UID,
+ SubscriberURI: subscriber.SubscriberURI,
+ ReplyURI: subscriber.ReplyURI,
+ },
+ Subscription: subscription,
})
}
}
@@ -355,7 +359,7 @@ func (r *reconciler) planGcpResources(ctx context.Context, c *eventingv1alpha1.C
return persist, newPCS, subsToSync, nil
}
-func (r *reconciler) createK8sService(ctx context.Context, c *eventingv1alpha1.Channel) (*v1.Service, error) {
+func (r *reconciler) createK8sService(ctx context.Context, c *eventingv1alpha1.Channel) (*corev1.Service, error) {
svc, err := util.CreateK8sService(ctx, r.client, c)
if err != nil {
logging.FromContext(ctx).Info("Error creating the Channel's K8s Service", zap.Error(err))
@@ -366,7 +370,7 @@ func (r *reconciler) createK8sService(ctx context.Context, c *eventingv1alpha1.C
return svc, nil
}
-func (r *reconciler) createVirtualService(ctx context.Context, c *eventingv1alpha1.Channel, svc *v1.Service) error {
+func (r *reconciler) createVirtualService(ctx context.Context, c *eventingv1alpha1.Channel, svc *corev1.Service) error {
_, err := util.CreateVirtualService(ctx, r.client, c, svc)
if err != nil {
logging.FromContext(ctx).Info("Error creating the Virtual Service for the Channel", zap.Error(err))
diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go
index 771557768a8..b041972563b 100644
--- a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go
+++ b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go
@@ -74,16 +74,18 @@ var (
subscribers = &v1alpha1.Subscribable{
Subscribers: []v1alpha1.ChannelSubscriberSpec{
{
- Ref: &corev1.ObjectReference{
+ DeprecatedRef: &corev1.ObjectReference{
Name: "sub-name",
UID: "sub-uid",
},
+ UID: "sub-uid",
},
{
- Ref: &corev1.ObjectReference{
+ DeprecatedRef: &corev1.ObjectReference{
Name: "sub-2-name",
UID: "sub-2-uid",
},
+ UID: "sub-2-uid",
},
},
}
@@ -545,7 +547,7 @@ func TestReconcile(t *testing.T) {
WantPresent: []runtime.Object{
makeChannelWithFinalizerAndSubscriberWithoutUID(),
},
- WantErrMsg: "empty reference UID: {&ObjectReference{Kind:,Namespace:,Name:,UID:,APIVersion:,ResourceVersion:,FieldPath:,} http://foo/ }",
+ WantErrMsg: "empty reference UID: {nil http://foo/ }",
WantEvent: []corev1.Event{
events[gcpResourcesPlanFailed],
},
@@ -971,9 +973,7 @@ func makeChannelWithFinalizerAndSubscriberWithoutUID() *eventingv1alpha1.Channel
c.Spec.Subscribable = &v1alpha1.Subscribable{
Subscribers: []v1alpha1.ChannelSubscriberSpec{
{
- Ref: &corev1.ObjectReference{
- UID: "",
- },
+ UID: "",
SubscriberURI: "http://foo/",
},
},
@@ -1002,9 +1002,12 @@ func makeChannelWithFinalizerAndPossiblyOutdatedPlan(outdated bool) *eventingv1a
}
for _, plannedSubUID := range plannedSubUIDs {
sub := pubsubutil.GcpPubSubSubscriptionStatus{
- Ref: &corev1.ObjectReference{
- Name: string(plannedSubUID),
- UID: plannedSubUID,
+ ChannelSubscriberSpec: v1alpha1.ChannelSubscriberSpec{
+ DeprecatedRef: &corev1.ObjectReference{
+ Name: string(plannedSubUID),
+ UID: plannedSubUID,
+ },
+ UID: plannedSubUID,
},
Subscription: "will-be-retained-in-the-plan-without-recalculation",
}
@@ -1023,16 +1026,18 @@ func makeChannelWithFinalizerAndPossiblyOutdatedPlan(outdated bool) *eventingv1a
c.Spec.Subscribable = &v1alpha1.Subscribable{
Subscribers: []v1alpha1.ChannelSubscriberSpec{
{
- Ref: &corev1.ObjectReference{
+ DeprecatedRef: &corev1.ObjectReference{
Name: "keep-sub",
UID: "keep-sub",
},
+ UID: "keep-sub",
},
{
- Ref: &corev1.ObjectReference{
+ DeprecatedRef: &corev1.ObjectReference{
Name: "add-sub",
UID: "add-sub",
},
+ UID: "add-sub",
},
},
}
@@ -1048,10 +1053,13 @@ func addSubscribers(c *eventingv1alpha1.Channel, subscribable *v1alpha1.Subscrib
}
for _, sub := range subscribable.Subscribers {
pcs.Subscriptions = append(pcs.Subscriptions, pubsubutil.GcpPubSubSubscriptionStatus{
- Ref: sub.Ref,
- ReplyURI: sub.ReplyURI,
- SubscriberURI: sub.SubscriberURI,
- Subscription: "test-subscription-id",
+ ChannelSubscriberSpec: v1alpha1.ChannelSubscriberSpec{
+ DeprecatedRef: sub.DeprecatedRef,
+ UID: sub.UID,
+ ReplyURI: sub.ReplyURI,
+ SubscriberURI: sub.SubscriberURI,
+ },
+ Subscription: "test-subscription-id",
})
}
err = pubsubutil.SetInternalStatus(context.Background(), c, pcs)
@@ -1128,7 +1136,7 @@ func makeVirtualService() *istiov1alpha3.VirtualService {
Rewrite: &istiov1alpha3.HTTPRewrite{
Authority: fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()),
},
- Route: []istiov1alpha3.DestinationWeight{{
+ Route: []istiov1alpha3.HTTPRouteDestination{{
Destination: istiov1alpha3.Destination{
Host: "in-memory-channel-clusterbus.knative-eventing.svc." + utils.GetClusterDomainName(),
Port: istiov1alpha3.PortSelector{
diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go
index 0186fbfbbe3..23d0884d981 100644
--- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go
+++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go
@@ -51,7 +51,7 @@ const (
)
type channelName = types.NamespacedName
-type subscriptionName = types.NamespacedName
+type subscriptionName = types.UID
type empty struct{}
// reconciler reconciles Channels with the gcp-pubsub provisioner. It sets up hanging polling for
@@ -198,11 +198,8 @@ func key(c *eventingv1alpha1.Channel) channelName {
// subscriptionKey creates the second index into reconciler.subscriptions, based on the Subscriber's
// name.
-func subscriptionKey(sub *pubsubutil.GcpPubSubSubscriptionStatus) subscriptionName {
- return types.NamespacedName{
- Namespace: sub.Ref.Namespace,
- Name: sub.Ref.Name,
- }
+func subscriptionKey(sub *pubsubutil.GcpPubSubSubscriptionStatus) types.UID {
+ return sub.UID
}
// stopAllSubscriptions stops listening to all GCP PubSub Subscriptions for the given Channel.
diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go
index 7ed8937283e..2b8d5ae0544 100644
--- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go
+++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go
@@ -79,16 +79,10 @@ var (
subscribers = &v1alpha1.Subscribable{
Subscribers: []v1alpha1.ChannelSubscriberSpec{
{
- Ref: &corev1.ObjectReference{
- Name: "sub-name",
- UID: "sub-uid",
- },
+ UID: "sub-uid",
},
{
- Ref: &corev1.ObjectReference{
- Name: "sub-2-name",
- UID: "sub-2-uid",
- },
+ UID: "sub-2-uid",
},
},
}
@@ -181,10 +175,7 @@ func TestReconcile(t *testing.T) {
},
OtherTestData: map[string]interface{}{
shouldBeCanceled: map[channelName]subscriptionName{
- key(makeChannel()): {
- Namespace: cNamespace,
- Name: "sub-name",
- },
+ key(makeChannel()): "sub-name",
},
},
WantPresent: []runtime.Object{
@@ -302,7 +293,7 @@ func TestReconcile(t *testing.T) {
},
},
shouldNotBeCanceled: map[channelName]subscriptionName{
- key(makeChannel()): {Namespace: subscribers.Subscribers[0].Ref.Namespace, Name: subscribers.Subscribers[0].Ref.Name},
+ key(makeChannel()): subscribers.Subscribers[0].UID,
},
},
WantPresent: []runtime.Object{
@@ -327,7 +318,7 @@ func TestReconcile(t *testing.T) {
},
},
shouldBeCanceled: map[channelName]subscriptionName{
- key(makeChannel()): {Namespace: cNamespace, Name: "old-sub"},
+ key(makeChannel()): "old-sub",
},
},
WantPresent: []runtime.Object{
@@ -352,7 +343,7 @@ func TestReconcile(t *testing.T) {
},
},
shouldBeCanceled: map[channelName]subscriptionName{
- key(makeChannel()): {Namespace: cNamespace, Name: "old-sub"},
+ key(makeChannel()): "old-sub",
},
},
WantPresent: []runtime.Object{
@@ -458,10 +449,13 @@ func TestReceiveFunc(t *testing.T) {
for n, tc := range testCases {
t.Run(n, func(t *testing.T) {
sub := util.GcpPubSubSubscriptionStatus{
- SubscriberURI: "subscriber-uri",
- ReplyURI: "reply-uri",
- Subscription: "foo",
+ ChannelSubscriberSpec: v1alpha1.ChannelSubscriberSpec{
+ SubscriberURI: "subscriber-uri",
+ ReplyURI: "reply-uri",
+ },
+ Subscription: "foo",
}
+
defaults := provisioners.DispatchDefaults{
Namespace: cNamespace,
}
@@ -620,9 +614,11 @@ func addSubscribers(c *eventingv1alpha1.Channel, subscribable *v1alpha1.Subscrib
}
for _, sub := range subscribable.Subscribers {
pcs.Subscriptions = append(pcs.Subscriptions, util.GcpPubSubSubscriptionStatus{
- Ref: sub.Ref,
- ReplyURI: sub.ReplyURI,
- SubscriberURI: sub.SubscriberURI,
+ ChannelSubscriberSpec: v1alpha1.ChannelSubscriberSpec{
+ UID: sub.UID,
+ ReplyURI: sub.ReplyURI,
+ SubscriberURI: sub.SubscriberURI,
+ },
})
}
err = util.SetInternalStatus(context.Background(), c, pcs)
diff --git a/contrib/gcppubsub/pkg/util/status.go b/contrib/gcppubsub/pkg/util/status.go
index 7518abb9f2a..b5680e6c7c2 100644
--- a/contrib/gcppubsub/pkg/util/status.go
+++ b/contrib/gcppubsub/pkg/util/status.go
@@ -19,6 +19,7 @@ package util
import (
"context"
"encoding/json"
+ "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/logging"
@@ -41,20 +42,14 @@ type GcpPubSubChannelStatus struct {
Topic string `json:"topic,omitempty"`
// Subscriptions is the list of Knative Eventing Subscriptions to this Channel, each paired with
// the PubSub Subscription in GCP that represents it.
- Subscriptions []GcpPubSubSubscriptionStatus `json:"subscriptions,omitempty"`
+ // +patchMergeKey=uid
+ // +patchStrategy=merge
+ Subscriptions []GcpPubSubSubscriptionStatus `json:"subscriptions,omitempty" patchStrategy:"merge" patchMergeKey:"uid"`
}
// GcpPubSubSubscriptionStatus represents the saved status of a gcp-pubsub Channel.
type GcpPubSubSubscriptionStatus struct {
- // Ref is a reference to the Knative Eventing Subscription that this status represents.
- // +optional
- Ref *corev1.ObjectReference `json:"ref,omitempty"`
- // SubscriberURI is a copy of the SubscriberURI of this Subscription.
- // +optional
- SubscriberURI string `json:"subscriberURI,omitempty"`
- // ReplyURI is a copy of the ReplyURI of this Subscription.
- // +optional
- ReplyURI string `json:"replyURI,omitempty"`
+ v1alpha1.ChannelSubscriberSpec
// Subscription is the name of the PubSub Subscription resource in GCP that represents this
// Knative Eventing Subscription.
diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go
index 67253e20ce6..aeec9ab7de1 100644
--- a/contrib/kafka/pkg/controller/channel/reconcile_test.go
+++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go
@@ -556,7 +556,7 @@ func makeVirtualService() *istiov1alpha3.VirtualService {
Rewrite: &istiov1alpha3.HTTPRewrite{
Authority: fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()),
},
- Route: []istiov1alpha3.DestinationWeight{{
+ Route: []istiov1alpha3.HTTPRouteDestination{{
Destination: istiov1alpha3.Destination{
Host: "kafka-provisioner.knative-testing.svc." + utils.GetClusterDomainName(),
Port: istiov1alpha3.PortSelector{
diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go
index adad489be49..a7f362f2575 100644
--- a/contrib/kafka/pkg/dispatcher/dispatcher.go
+++ b/contrib/kafka/pkg/dispatcher/dispatcher.go
@@ -78,8 +78,7 @@ func (c *saramaCluster) GetConsumerMode() cluster.ConsumerMode {
}
type subscription struct {
- Namespace string
- Name string
+ UID string
SubscriberURI string
ReplyURI string
}
@@ -168,7 +167,7 @@ func (d *KafkaDispatcher) subscribe(channelRef provisioners.ChannelReference, su
topicName := topicUtils.TopicName(controller.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name)
- group := fmt.Sprintf("%s.%s.%s", controller.Name, sub.Namespace, sub.Name)
+ group := fmt.Sprintf("%s.%s", controller.Name, sub.UID)
consumer, err := d.kafkaCluster.NewConsumer(group, []string{topicName})
if err != nil {
@@ -320,8 +319,7 @@ func toKafkaMessage(channel provisioners.ChannelReference, message *provisioners
func newSubscription(spec eventingduck.ChannelSubscriberSpec) subscription {
return subscription{
- Name: spec.Ref.Name,
- Namespace: spec.Ref.Namespace,
+ UID: string(spec.UID),
SubscriberURI: spec.SubscriberURI,
ReplyURI: spec.ReplyURI,
}
diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go
index 8c333e7c56e..04e312777e4 100644
--- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go
+++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go
@@ -16,7 +16,6 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go.uber.org/zap"
- v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
@@ -224,15 +223,11 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
- Ref: &v1.ObjectReference{
- Name: "subscription-1",
- },
+ UID: "subscription-1",
SubscriberURI: "http://test/subscriber",
},
{
- Ref: &v1.ObjectReference{
- Name: "subscription-2",
- },
+ UID: "subscription-2",
SubscriberURI: "http://test/subscriber",
},
},
@@ -252,15 +247,11 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
- Ref: &v1.ObjectReference{
- Name: "subscription-1",
- },
+ UID: "subscription-1",
SubscriberURI: "http://test/subscriber",
},
{
- Ref: &v1.ObjectReference{
- Name: "subscription-2",
- },
+ UID: "subscription-2",
SubscriberURI: "http://test/subscriber",
}}}}},
},
@@ -272,15 +263,11 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
- Ref: &v1.ObjectReference{
- Name: "subscription-2",
- },
+ UID: "subscription-2",
SubscriberURI: "http://test/subscriber",
},
{
- Ref: &v1.ObjectReference{
- Name: "subscription-3",
- },
+ UID: "subscription-3",
SubscriberURI: "http://test/subscriber",
},
},
@@ -301,15 +288,11 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
- Ref: &v1.ObjectReference{
- Name: "subscription-1",
- },
+ UID: "subscription-1",
SubscriberURI: "http://test/subscriber",
},
{
- Ref: &v1.ObjectReference{
- Name: "subscription-2",
- },
+ UID: "subscription-2",
SubscriberURI: "http://test/subscriber",
},
},
@@ -322,9 +305,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
- Ref: &v1.ObjectReference{
- Name: "subscription-1",
- },
+ UID: "subscription-1",
SubscriberURI: "http://test/subscriber",
},
},
@@ -336,15 +317,11 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
- Ref: &v1.ObjectReference{
- Name: "subscription-3",
- },
+ UID: "subscription-3",
SubscriberURI: "http://test/subscriber",
},
{
- Ref: &v1.ObjectReference{
- Name: "subscription-4",
- },
+ UID: "subscription-4",
SubscriberURI: "http://test/subscriber",
},
},
@@ -376,7 +353,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
oldSubscribers := sets.NewString()
for _, subMap := range d.kafkaConsumers {
for sub := range subMap {
- oldSubscribers.Insert(sub.Name)
+ oldSubscribers.Insert(sub.UID)
}
}
if diff := sets.NewString(tc.unsubscribes...).Difference(oldSubscribers); diff.Len() != 0 {
@@ -399,7 +376,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
var newSubscribers []string
for _, subMap := range d.kafkaConsumers {
for sub := range subMap {
- newSubscribers = append(newSubscribers, sub.Name)
+ newSubscribers = append(newSubscribers, sub.UID)
}
}
@@ -505,8 +482,7 @@ func TestSubscribe(t *testing.T) {
}
subRef := subscription{
- Name: "test-sub",
- Namespace: "test-ns",
+ UID: "test-sub",
SubscriberURI: server.URL[7:],
}
err := d.subscribe(channelRef, subRef)
@@ -548,8 +524,7 @@ func TestPartitionConsumer(t *testing.T) {
Namespace: "test-ns",
}
subRef := subscription{
- Name: "test-sub",
- Namespace: "test-ns",
+ UID: "test-sub",
SubscriberURI: server.URL[7:],
}
err := d.subscribe(channelRef, subRef)
@@ -592,8 +567,7 @@ func TestSubscribeError(t *testing.T) {
}
subRef := subscription{
- Name: "test-sub",
- Namespace: "test-ns",
+ UID: "test-sub",
}
err := d.subscribe(channelRef, subRef)
if err == nil {
@@ -616,8 +590,7 @@ func TestUnsubscribeUnknownSub(t *testing.T) {
}
subRef := subscription{
- Name: "test-sub",
- Namespace: "test-ns",
+ UID: "test-sub",
}
if err := d.unsubscribe(channelRef, subRef); err != nil {
t.Errorf("Unsubscribe error: %v", err)
diff --git a/contrib/natss/pkg/controller/channel/reconcile_test.go b/contrib/natss/pkg/controller/channel/reconcile_test.go
index 18d0cf5291a..6c4cbaf144e 100644
--- a/contrib/natss/pkg/controller/channel/reconcile_test.go
+++ b/contrib/natss/pkg/controller/channel/reconcile_test.go
@@ -247,7 +247,7 @@ func makeVirtualService() *istiov1alpha3.VirtualService {
Rewrite: &istiov1alpha3.HTTPRewrite{
Authority: fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()),
},
- Route: []istiov1alpha3.DestinationWeight{{
+ Route: []istiov1alpha3.HTTPRouteDestination{{
Destination: istiov1alpha3.Destination{
Host: "kafka-provisioner.knative-eventing.svc." + utils.GetClusterDomainName(),
Port: istiov1alpha3.PortSelector{
diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
index fa6675b32cb..f9c8963a4f0 100644
--- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
+++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
@@ -237,7 +237,7 @@ func (s *SubscriptionsSupervisor) subscribe(channel provisioners.ChannelReferenc
return
}
s.logger.Sugar().Infof("NATSS message received from subject: %v; sequence: %v; timestamp: %v, headers: '%s'", msg.Subject, msg.Sequence, msg.Timestamp, message.Headers)
- if err := s.dispatcher.DispatchMessage(&message, subscription.SubscriberURI, subscription.ReplyURI, provisioners.DispatchDefaults{Namespace: subscription.Namespace}); err != nil {
+ if err := s.dispatcher.DispatchMessage(&message, subscription.SubscriberURI, subscription.ReplyURI, provisioners.DispatchDefaults{Namespace: channel.Namespace}); err != nil {
s.logger.Error("Failed to dispatch message: ", zap.Error(err))
return
}
diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go
index 3af212f2811..54dab1b13b1 100644
--- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go
+++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go
@@ -55,18 +55,10 @@ var (
subscribers = &v1alpha1.Subscribable{
Subscribers: []v1alpha1.ChannelSubscriberSpec{
{
- Ref: &corev1.ObjectReference{
- Name: "sub-name1",
- Namespace: "sub-namespace1",
- UID: "sub-uid1",
- },
+ UID: "sub-uid1",
},
{
- Ref: &corev1.ObjectReference{
- Name: "sub-name2",
- Namespace: "sub-namespace2",
- UID: "sub-uid2",
- },
+ UID: "sub-uid2",
},
},
}
@@ -118,7 +110,7 @@ func TestSubscribeUnsubscribe(t *testing.T) {
logger.Info("TestSubscribeUnsubscribe()")
cRef := provisioners.ChannelReference{Namespace: "test_namespace", Name: "test_channel"}
- sRef := subscriptionReference{Name: "sub_name_2", Namespace: "sub_namespace_2", SubscriberURI: "", ReplyURI: ""}
+ sRef := subscriptionReference{UID: "sub_name_2", SubscriberURI: "", ReplyURI: ""}
// subscribe to a channel
if _, err := s.subscribe(cRef, sRef); err != nil {
@@ -133,7 +125,7 @@ func TestMalformedMessage(t *testing.T) {
logger.Info("TestMalformedMessage()")
cRef := provisioners.ChannelReference{Namespace: "test_namespace", Name: "test_channel"}
- sRef := subscriptionReference{Name: "sub_name", Namespace: "sub_namespace", SubscriberURI: "", ReplyURI: ""}
+ sRef := subscriptionReference{UID: "sub_name", SubscriberURI: "", ReplyURI: ""}
// subscribe to a channel
if _, err := s.subscribe(cRef, sRef); err != nil {
diff --git a/contrib/natss/pkg/dispatcher/dispatcher/types.go b/contrib/natss/pkg/dispatcher/dispatcher/types.go
index 69e5ae6dd15..487e454d570 100644
--- a/contrib/natss/pkg/dispatcher/dispatcher/types.go
+++ b/contrib/natss/pkg/dispatcher/dispatcher/types.go
@@ -23,21 +23,19 @@ import (
)
type subscriptionReference struct {
- Name string
- Namespace string
+ UID string
SubscriberURI string
ReplyURI string
}
func newSubscriptionReference(spec eventingduck.ChannelSubscriberSpec) subscriptionReference {
return subscriptionReference{
- Name: spec.Ref.Name,
- Namespace: spec.Ref.Namespace,
+ UID: string(spec.UID),
SubscriberURI: spec.SubscriberURI,
ReplyURI: spec.ReplyURI,
}
}
func (r *subscriptionReference) String() string {
- return fmt.Sprintf("%s.%s", r.Name, r.Namespace)
+ return fmt.Sprintf("%s", r.UID)
}
diff --git a/docs/spec/spec.md b/docs/spec/spec.md
index fc4db4f657e..74c076657bc 100644
--- a/docs/spec/spec.md
+++ b/docs/spec/spec.md
@@ -245,7 +245,7 @@ a Channel system that receives and delivers events._
| Field | Type | Description | Constraints |
| ------------- | --------------- | -------------------------------------------------------------- | -------------- |
-| ref | ObjectReference | The Subscription this ChannelSubscriberSpec was resolved from. | |
+| uid | String | The Subscription UID this ChannelSubscriberSpec was resolved from. | |
| subscriberURI | String | The URI name of the endpoint for the subscriber. | Must be a URL. |
| replyURI | String | The URI name of the endpoint for the reply. | Must be a URL. |
diff --git a/hack/k8s-dynamic-fake-simple.patch b/hack/k8s-dynamic-fake-simple.patch
new file mode 100644
index 00000000000..37e8835ec79
--- /dev/null
+++ b/hack/k8s-dynamic-fake-simple.patch
@@ -0,0 +1,13 @@
+diff --git a/vendor/k8s.io/client-go/dynamic/fake/simple.go b/vendor/k8s.io/client-go/dynamic/fake/simple.go
+index 13e2d805..dde45892 100644
+--- a/vendor/k8s.io/client-go/dynamic/fake/simple.go
++++ b/vendor/k8s.io/client-go/dynamic/fake/simple.go
+@@ -45,7 +45,7 @@ func NewSimpleDynamicClient(scheme *runtime.Scheme, objects ...runtime.Object) *
+ }
+ }
+
+- cs := &FakeDynamicClient{}
++ cs := &FakeDynamicClient{scheme: scheme}
+ cs.AddReactor("*", "*", testing.ObjectReaction(o))
+ cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
+ gvr := action.GetResource()
diff --git a/hack/update-deps.sh b/hack/update-deps.sh
index 5d7bd82fc59..dbe3ee462f6 100755
--- a/hack/update-deps.sh
+++ b/hack/update-deps.sh
@@ -30,3 +30,11 @@ rm -rf $(find vendor/ -name 'BUILD.bazel')
update_licenses third_party/VENDOR-LICENSE \
$(find . -name "*.go" | grep -v vendor | xargs grep "package main" | cut -d: -f1 | xargs -n1 dirname | uniq)
+
+
+# HACK HACK HACK
+# TODO(https://github.com/knative/eventing/issues/1065): remove when we can update top 1.13.0 k8s clients.
+# k8s.io/client-go/dynamic/fake/simple.go has a bug until > v1.13.0, they did not set the scheme in the fake dynamic client.
+# Because this is only for testing code to work, adding patch to update deps.
+# produced with git diff origin/master HEAD -- vendor/k8s.io/client-go/dynamic/fake/simple.go > ./hack/k8s-dynamic-fake-simple.patch
+git apply ${REPO_ROOT_DIR}/hack/k8s-dynamic-fake-simple.patch
\ No newline at end of file
diff --git a/pkg/apis/duck/v1alpha1/subscribable_types.go b/pkg/apis/duck/v1alpha1/subscribable_types.go
index 9b8502fdbc9..740a035ead3 100644
--- a/pkg/apis/duck/v1alpha1/subscribable_types.go
+++ b/pkg/apis/duck/v1alpha1/subscribable_types.go
@@ -21,6 +21,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
)
// Subscribable is the schema for the subscribable portion of the spec
@@ -28,9 +29,9 @@ import (
type Subscribable struct {
// TODO: What is actually required here for Channel spec.
// This is the list of subscriptions for this channel.
- // +patchMergeKey=ref
+ // +patchMergeKey=uid
// +patchStrategy=merge
- Subscribers []ChannelSubscriberSpec `json:"subscribers,omitempty" patchStrategy:"merge" patchMergeKey:"ref"`
+ Subscribers []ChannelSubscriberSpec `json:"subscribers,omitempty" patchStrategy:"merge" patchMergeKey:"uid"`
}
// ChannelSubscriberSpec defines a single subscriber to a Channel.
@@ -39,8 +40,12 @@ type Subscribable struct {
// ReplyURI is the endpoint for the reply
// At least one of SubscriberURI and ReplyURI must be present
type ChannelSubscriberSpec struct {
+ // Deprecated: use UID.
// +optional
- Ref *corev1.ObjectReference `json:"ref,omitempty"`
+ DeprecatedRef *corev1.ObjectReference `json:"ref,omitempty" yaml:"ref,omitempty"`
+ // UID is used to understand the origin of the subscriber.
+ // +optional
+ UID types.UID `json:"uid,omitempty"`
// +optional
SubscriberURI string `json:"subscriberURI,omitempty"`
// +optional
@@ -74,23 +79,11 @@ func (c *Channel) Populate() {
c.Spec.Subscribable = &Subscribable{
// Populate ALL fields
Subscribers: []ChannelSubscriberSpec{{
- Ref: &corev1.ObjectReference{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "Subscription",
- Name: "subscription1",
- Namespace: "default",
- UID: "2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1",
- },
+ UID: "2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1",
SubscriberURI: "call1",
ReplyURI: "sink2",
}, {
- Ref: &corev1.ObjectReference{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "Subscription",
- Name: "subscription2",
- Namespace: "default",
- UID: "34c5aec8-deb6-11e8-9f32-f2801f1b9fd1",
- },
+ UID: "34c5aec8-deb6-11e8-9f32-f2801f1b9fd1",
SubscriberURI: "call2",
ReplyURI: "sink2",
}},
diff --git a/pkg/apis/duck/v1alpha1/subscribable_types_test.go b/pkg/apis/duck/v1alpha1/subscribable_types_test.go
index c14bbc70d92..ccc6512ab36 100644
--- a/pkg/apis/duck/v1alpha1/subscribable_types_test.go
+++ b/pkg/apis/duck/v1alpha1/subscribable_types_test.go
@@ -20,7 +20,6 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
- corev1 "k8s.io/api/core/v1"
)
func TestGetFullType(t *testing.T) {
@@ -50,23 +49,11 @@ func TestPopulate(t *testing.T) {
Spec: ChannelSpec{
Subscribable: &Subscribable{
Subscribers: []ChannelSubscriberSpec{{
- Ref: &corev1.ObjectReference{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "Subscription",
- Name: "subscription1",
- Namespace: "default",
- UID: "2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1",
- },
+ UID: "2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1",
SubscriberURI: "call1",
ReplyURI: "sink2",
}, {
- Ref: &corev1.ObjectReference{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "Subscription",
- Name: "subscription2",
- Namespace: "default",
- UID: "34c5aec8-deb6-11e8-9f32-f2801f1b9fd1",
- },
+ UID: "34c5aec8-deb6-11e8-9f32-f2801f1b9fd1",
SubscriberURI: "call2",
ReplyURI: "sink2",
}},
diff --git a/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go
index da9a113ddb7..e1885fcac5e 100644
--- a/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go
@@ -101,8 +101,8 @@ func (in *ChannelSpec) DeepCopy() *ChannelSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChannelSubscriberSpec) DeepCopyInto(out *ChannelSubscriberSpec) {
*out = *in
- if in.Ref != nil {
- in, out := &in.Ref, &out.Ref
+ if in.DeprecatedRef != nil {
+ in, out := &in.DeprecatedRef, &out.DeprecatedRef
*out = new(v1.ObjectReference)
**out = **in
}
diff --git a/pkg/apis/eventing/v1alpha1/cluster_channel_provisioner_types.go b/pkg/apis/eventing/v1alpha1/cluster_channel_provisioner_types.go
index f38984327dc..09f8ccfed94 100644
--- a/pkg/apis/eventing/v1alpha1/cluster_channel_provisioner_types.go
+++ b/pkg/apis/eventing/v1alpha1/cluster_channel_provisioner_types.go
@@ -26,7 +26,6 @@ import (
)
// +genclient
-// +genclient:noStatus
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/pkg/apis/eventing/v1alpha1/subscription_types.go b/pkg/apis/eventing/v1alpha1/subscription_types.go
index 0085b796197..6e03585190e 100644
--- a/pkg/apis/eventing/v1alpha1/subscription_types.go
+++ b/pkg/apis/eventing/v1alpha1/subscription_types.go
@@ -26,7 +26,6 @@ import (
)
// +genclient
-// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:defaulter-gen=true
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/clusterchannelprovisioner.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/clusterchannelprovisioner.go
index 951b0f919eb..910eb0aec16 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/clusterchannelprovisioner.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/clusterchannelprovisioner.go
@@ -37,6 +37,7 @@ type ClusterChannelProvisionersGetter interface {
type ClusterChannelProvisionerInterface interface {
Create(*v1alpha1.ClusterChannelProvisioner) (*v1alpha1.ClusterChannelProvisioner, error)
Update(*v1alpha1.ClusterChannelProvisioner) (*v1alpha1.ClusterChannelProvisioner, error)
+ UpdateStatus(*v1alpha1.ClusterChannelProvisioner) (*v1alpha1.ClusterChannelProvisioner, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha1.ClusterChannelProvisioner, error)
@@ -113,6 +114,21 @@ func (c *clusterChannelProvisioners) Update(clusterChannelProvisioner *v1alpha1.
return
}
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *clusterChannelProvisioners) UpdateStatus(clusterChannelProvisioner *v1alpha1.ClusterChannelProvisioner) (result *v1alpha1.ClusterChannelProvisioner, err error) {
+ result = &v1alpha1.ClusterChannelProvisioner{}
+ err = c.client.Put().
+ Resource("clusterchannelprovisioners").
+ Name(clusterChannelProvisioner.Name).
+ SubResource("status").
+ Body(clusterChannelProvisioner).
+ Do().
+ Into(result)
+ return
+}
+
// Delete takes name of the clusterChannelProvisioner and deletes it. Returns an error if one occurs.
func (c *clusterChannelProvisioners) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_clusterchannelprovisioner.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_clusterchannelprovisioner.go
index 785d0af0b42..0c908f50172 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_clusterchannelprovisioner.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_clusterchannelprovisioner.go
@@ -94,6 +94,17 @@ func (c *FakeClusterChannelProvisioners) Update(clusterChannelProvisioner *v1alp
return obj.(*v1alpha1.ClusterChannelProvisioner), err
}
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeClusterChannelProvisioners) UpdateStatus(clusterChannelProvisioner *v1alpha1.ClusterChannelProvisioner) (*v1alpha1.ClusterChannelProvisioner, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(clusterchannelprovisionersResource, "status", clusterChannelProvisioner), &v1alpha1.ClusterChannelProvisioner{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterChannelProvisioner), err
+}
+
// Delete takes name of the clusterChannelProvisioner and deletes it. Returns an error if one occurs.
func (c *FakeClusterChannelProvisioners) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_subscription.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_subscription.go
index 85a728524aa..e8f4d302ca3 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_subscription.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_subscription.go
@@ -100,6 +100,18 @@ func (c *FakeSubscriptions) Update(subscription *v1alpha1.Subscription) (result
return obj.(*v1alpha1.Subscription), err
}
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeSubscriptions) UpdateStatus(subscription *v1alpha1.Subscription) (*v1alpha1.Subscription, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(subscriptionsResource, "status", c.ns, subscription), &v1alpha1.Subscription{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Subscription), err
+}
+
// Delete takes name of the subscription and deletes it. Returns an error if one occurs.
func (c *FakeSubscriptions) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/subscription.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/subscription.go
index dde8b0ac06b..f06468c1027 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/subscription.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/subscription.go
@@ -37,6 +37,7 @@ type SubscriptionsGetter interface {
type SubscriptionInterface interface {
Create(*v1alpha1.Subscription) (*v1alpha1.Subscription, error)
Update(*v1alpha1.Subscription) (*v1alpha1.Subscription, error)
+ UpdateStatus(*v1alpha1.Subscription) (*v1alpha1.Subscription, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha1.Subscription, error)
@@ -120,6 +121,22 @@ func (c *subscriptions) Update(subscription *v1alpha1.Subscription) (result *v1a
return
}
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *subscriptions) UpdateStatus(subscription *v1alpha1.Subscription) (result *v1alpha1.Subscription, err error) {
+ result = &v1alpha1.Subscription{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("subscriptions").
+ Name(subscription.Name).
+ SubResource("status").
+ Body(subscription).
+ Do().
+ Into(result)
+ return
+}
+
// Delete takes name of the subscription and deletes it. Returns an error if one occurs.
func (c *subscriptions) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
diff --git a/pkg/logging/logging.go b/pkg/logging/logging.go
index 5ed21a42528..296c175225e 100644
--- a/pkg/logging/logging.go
+++ b/pkg/logging/logging.go
@@ -19,11 +19,13 @@ package logging
import (
"context"
-
"github.com/knative/pkg/logging"
"go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
)
+const ConfigMapNameEnv = "CONFIG_LOGGING_NAME"
+
func WithLogger(ctx context.Context, logger *zap.Logger) context.Context {
return logging.WithLogger(ctx, logger.Sugar())
}
@@ -36,3 +38,36 @@ func With(ctx context.Context, fields ...zap.Field) context.Context {
logger := FromContext(ctx)
return WithLogger(ctx, logger.With(fields...))
}
+
+var components = []string{"controller", "webhook"}
+
+// NewLogger creates a logger with the supplied configuration.
+// In addition to the logger, it returns AtomicLevel that can
+// be used to change the logging level at runtime.
+// If configuration is empty, a fallback configuration is used.
+// If configuration cannot be used to instantiate a logger,
+// the same fallback configuration is used.
+func NewLogger(configJSON string, levelOverride string) (*zap.SugaredLogger, zap.AtomicLevel) {
+ return logging.NewLogger(configJSON, levelOverride)
+}
+
+// NewLoggerFromConfig creates a logger using the provided Config
+func NewLoggerFromConfig(config *logging.Config, name string) (*zap.SugaredLogger, zap.AtomicLevel) {
+ return logging.NewLoggerFromConfig(config, name)
+}
+
+// NewConfigFromMap creates a LoggingConfig from the supplied map
+func NewConfigFromMap(data map[string]string) (*logging.Config, error) {
+ return logging.NewConfigFromMap(data, components...)
+}
+
+// NewConfigFromConfigMap creates a LoggingConfig from the supplied ConfigMap
+func NewConfigFromConfigMap(configMap *corev1.ConfigMap) (*logging.Config, error) {
+ return logging.NewConfigFromConfigMap(configMap, components...)
+}
+
+// UpdateLevelFromConfigMap returns a helper func that can be used to update the logging level
+// when a config map is updated
+func UpdateLevelFromConfigMap(logger *zap.SugaredLogger, atomicLevel zap.AtomicLevel, levelKey string) func(configMap *corev1.ConfigMap) {
+ return logging.UpdateLevelFromConfigMap(logger, atomicLevel, levelKey, components...)
+}
diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go
index 497dae5adc8..d31d490b139 100644
--- a/pkg/provisioners/channel_util.go
+++ b/pkg/provisioners/channel_util.go
@@ -368,7 +368,7 @@ func newVirtualService(channel *eventingv1alpha1.Channel, svc *corev1.Service) *
Rewrite: &istiov1alpha3.HTTPRewrite{
Authority: channelHostName(channel.Name, channel.Namespace),
},
- Route: []istiov1alpha3.DestinationWeight{{
+ Route: []istiov1alpha3.HTTPRouteDestination{{
Destination: istiov1alpha3.Destination{
Host: destinationHost,
Port: istiov1alpha3.PortSelector{
diff --git a/pkg/provisioners/channel_util_test.go b/pkg/provisioners/channel_util_test.go
index 848dda5d8f3..502c1fd5909 100644
--- a/pkg/provisioners/channel_util_test.go
+++ b/pkg/provisioners/channel_util_test.go
@@ -676,7 +676,7 @@ func makeVirtualService() *istiov1alpha3.VirtualService {
Rewrite: &istiov1alpha3.HTTPRewrite{
Authority: fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()),
},
- Route: []istiov1alpha3.DestinationWeight{{
+ Route: []istiov1alpha3.HTTPRouteDestination{{
Destination: istiov1alpha3.Destination{
Host: fmt.Sprintf("%s-dispatcher.knative-testing.svc.%s", clusterChannelProvisionerName, utils.GetClusterDomainName()),
Port: istiov1alpha3.PortSelector{
diff --git a/pkg/reconciler/handler.go b/pkg/reconciler/handler.go
new file mode 100644
index 00000000000..0c395fe90a3
--- /dev/null
+++ b/pkg/reconciler/handler.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package reconciler
+
+import (
+ "github.com/knative/pkg/controller"
+ "k8s.io/client-go/tools/cache"
+)
+
+// Handler wraps the provided handler function into a cache.ResourceEventHandler
+// that sends all events to the given handler. For Updates, only the new object
+// is forwarded.
+func Handler(h func(interface{})) cache.ResourceEventHandler {
+ return cache.ResourceEventHandlerFuncs{
+ AddFunc: h,
+ UpdateFunc: controller.PassNew(h),
+ DeleteFunc: h,
+ }
+}
diff --git a/pkg/reconciler/reconciler.go b/pkg/reconciler/reconciler.go
new file mode 100644
index 00000000000..a15d1246916
--- /dev/null
+++ b/pkg/reconciler/reconciler.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package reconciler
+
+import (
+ "github.com/knative/pkg/configmap"
+ "github.com/knative/pkg/system"
+ "time"
+
+ "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/scheme"
+ typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/record"
+
+ clientset "github.com/knative/eventing/pkg/client/clientset/versioned"
+ eventingScheme "github.com/knative/eventing/pkg/client/clientset/versioned/scheme"
+ "github.com/knative/pkg/logging/logkey"
+)
+
+// Options defines the common reconciler options.
+// We define this to reduce the boilerplate argument list when
+// creating our controllers.
+type Options struct {
+ KubeClientSet kubernetes.Interface
+ DynamicClientSet dynamic.Interface
+
+ EventingClientSet clientset.Interface
+ //CachingClientSet cachingclientset.Interface
+
+ Recorder record.EventRecorder
+ //StatsReporter StatsReporter
+
+ ConfigMapWatcher configmap.Watcher
+ Logger *zap.SugaredLogger
+
+ ResyncPeriod time.Duration
+ StopChannel <-chan struct{}
+}
+
+// This is mutable for testing.
+var resetPeriod = 30 * time.Second
+
+func NewOptionsOrDie(cfg *rest.Config, logger *zap.SugaredLogger, stopCh <-chan struct{}) Options {
+ kubeClient := kubernetes.NewForConfigOrDie(cfg)
+ eventingClient := clientset.NewForConfigOrDie(cfg)
+ dynamicClient := dynamic.NewForConfigOrDie(cfg)
+
+ configMapWatcher := configmap.NewInformedWatcher(kubeClient, system.Namespace())
+
+ return Options{
+ KubeClientSet: kubeClient,
+ DynamicClientSet: dynamicClient,
+ EventingClientSet: eventingClient,
+ ConfigMapWatcher: configMapWatcher,
+ Logger: logger,
+ ResyncPeriod: 10 * time.Hour, // Based on controller-runtime default.
+ StopChannel: stopCh,
+ }
+}
+
+// GetTrackerLease returns a multiple of the resync period to use as the
+// duration for tracker leases. This attempts to ensure that resyncs happen to
+// refresh leases frequently enough that we don't miss updates to tracked
+// objects.
+func (o Options) GetTrackerLease() time.Duration {
+ return o.ResyncPeriod * 3
+}
+
+// Base implements the core controller logic, given a Reconciler.
+type Base struct {
+ // KubeClientSet allows us to talk to the k8s for core APIs
+ KubeClientSet kubernetes.Interface
+
+ // EventingClientSet allows us to configure Eventing objects
+ EventingClientSet clientset.Interface
+
+ // DynamicClientSet allows us to configure pluggable Build objects
+ DynamicClientSet dynamic.Interface
+
+ // ConfigMapWatcher allows us to watch for ConfigMap changes.
+ ConfigMapWatcher configmap.Watcher
+
+ // Recorder is an event recorder for recording Event resources to the
+ // Kubernetes API.
+ Recorder record.EventRecorder
+
+ // StatsReporter reports reconciler's metrics.
+ //StatsReporter StatsReporter
+
+ // Sugared logger is easier to use but is not as performant as the
+ // raw logger. In performance critical paths, call logger.Desugar()
+ // and use the returned raw logger instead. In addition to the
+ // performance benefits, raw logger also preserves type-safety at
+ // the expense of slightly greater verbosity.
+ Logger *zap.SugaredLogger
+}
+
+// NewBase instantiates a new instance of Base implementing
+// the common & boilerplate code between our reconcilers.
+func NewBase(opt Options, controllerAgentName string) *Base {
+ // Enrich the logs with controller name
+ logger := opt.Logger.Named(controllerAgentName).With(zap.String(logkey.ControllerType, controllerAgentName))
+
+ recorder := opt.Recorder
+ if recorder == nil {
+ // Create event broadcaster
+ logger.Debug("Creating event broadcaster")
+ eventBroadcaster := record.NewBroadcaster()
+ watches := []watch.Interface{
+ eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
+ eventBroadcaster.StartRecordingToSink(
+ &typedcorev1.EventSinkImpl{Interface: opt.KubeClientSet.CoreV1().Events("")}),
+ }
+ recorder = eventBroadcaster.NewRecorder(
+ scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
+ go func() {
+ <-opt.StopChannel
+ for _, w := range watches {
+ w.Stop()
+ }
+ }()
+ }
+
+ //statsReporter := opt.StatsReporter
+ //if statsReporter == nil {
+ // logger.Debug("Creating stats reporter")
+ // var err error
+ // statsReporter, err = NewStatsReporter(controllerAgentName)
+ // if err != nil {
+ // logger.Fatal(err)
+ // }
+ //}
+
+ base := &Base{
+ KubeClientSet: opt.KubeClientSet,
+ EventingClientSet: opt.EventingClientSet,
+ DynamicClientSet: opt.DynamicClientSet,
+ ConfigMapWatcher: opt.ConfigMapWatcher,
+ Recorder: recorder,
+ //StatsReporter: statsReporter,
+ Logger: logger,
+ }
+
+ return base
+}
+
+func init() {
+ // Add eventing types to the default Kubernetes Scheme so Events can be
+ // logged for eventing types.
+ eventingScheme.AddToScheme(scheme.Scheme)
+}
diff --git a/pkg/reconciler/stats.go b/pkg/reconciler/stats.go
new file mode 100644
index 00000000000..adde2b36abc
--- /dev/null
+++ b/pkg/reconciler/stats.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package reconciler
+
+import (
+ "github.com/knative/pkg/controller"
+ "go.uber.org/zap"
+)
+
+// MustNewStatsReporter creates a new instance of StatsReporter. Panics if creation fails.
+func MustNewStatsReporter(reconciler string, logger *zap.SugaredLogger) controller.StatsReporter {
+ stats, err := controller.NewStatsReporter(reconciler)
+ if err != nil {
+ logger.Fatalw("Failed to initialize the stats reporter", zap.Error(err))
+ }
+ return stats
+}
diff --git a/pkg/reconciler/v1alpha1/subscription/subscription.go b/pkg/reconciler/subscription/subscription.go
similarity index 61%
rename from pkg/reconciler/v1alpha1/subscription/subscription.go
rename to pkg/reconciler/subscription/subscription.go
index 12e1cb1a821..e241a1b0a70 100644
--- a/pkg/reconciler/v1alpha1/subscription/subscription.go
+++ b/pkg/reconciler/subscription/subscription.go
@@ -18,33 +18,36 @@ package subscription
import (
"context"
+ "encoding/json"
"fmt"
+ "k8s.io/apimachinery/pkg/labels"
+ "reflect"
+ "time"
eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ listers "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
"github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/reconciler"
"github.com/knative/eventing/pkg/utils/resolve"
"github.com/knative/pkg/apis/duck"
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ "github.com/knative/pkg/controller"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
- "k8s.io/client-go/dynamic"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/record"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
- "sigs.k8s.io/controller-runtime/pkg/source"
+ "k8s.io/client-go/tools/cache"
)
const (
+ // ReconcilerName is the name of the reconciler
+ ReconcilerName = "Subscriptions"
// controllerAgentName is the string used by this controller to identify
// itself when creating events.
controllerAgentName = "subscription-controller"
@@ -60,57 +63,59 @@ const (
resultResolveFailed = "ResultResolveFailed"
)
-type reconciler struct {
- client client.Client
- restConfig *rest.Config
- dynamicClient dynamic.Interface
- recorder record.EventRecorder
- logger *zap.Logger
+type Reconciler struct {
+ *reconciler.Base
+
+ // listers index properties about resources
+ subscriptionLister listers.SubscriptionLister
}
-// Verify the struct implements reconcile.Reconciler
-var _ reconcile.Reconciler = &reconciler{}
+// Check that our Reconciler implements controller.Reconciler
+var _ controller.Reconciler = (*Reconciler)(nil)
-// ProvideController returns a Subscription controller.
-func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) {
- // Setup a new controller to Reconcile Subscriptions.
- c, err := controller.New(controllerAgentName, mgr, controller.Options{
- Reconciler: &reconciler{
- recorder: mgr.GetRecorder(controllerAgentName),
- logger: logger,
- },
- })
- if err != nil {
- return nil, err
- }
+// NewController initializes the controller and is called by the generated code
+// Registers event handlers to enqueue events
+func NewController(
+ opt reconciler.Options,
+ subscriptionInformer eventinginformers.SubscriptionInformer,
+) *controller.Impl {
- // Watch Subscription events and enqueue Subscription object key.
- if err = c.Watch(&source.Kind{Type: &v1alpha1.Subscription{}}, &handler.EnqueueRequestForObject{}); err != nil {
- return nil, err
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ subscriptionLister: subscriptionInformer.Lister(),
}
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ r.Logger.Info("Setting up event handlers")
+ subscriptionInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
- return c, nil
+ return impl
}
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Subscription resource
// with the current status of the resource.
-func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
- ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", request)))
- logging.FromContext(ctx).Debug("Reconciling Subscription")
- subscription := &v1alpha1.Subscription{}
- err := r.client.Get(ctx, request.NamespacedName, subscription)
-
- if errors.IsNotFound(err) {
- logging.FromContext(ctx).Info("Could not find Subscription")
- return reconcile.Result{}, nil
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ // Convert the namespace/name string into a distinct namespace and name
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
}
- if err != nil {
- logging.FromContext(ctx).Error("Error getting Subscription", zap.Error(err))
- return reconcile.Result{}, err
+ // Get the Subscription resource with this namespace/name
+ original, err := r.subscriptionLister.Subscriptions(namespace).Get(name)
+ if apierrs.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ logging.FromContext(ctx).Error("subscription key in work queue no longer exists", zap.Any("key", key))
+ return nil
+ } else if err != nil {
+ return err
}
+ // Don't modify the informers copy
+ subscription := original.DeepCopy()
+
// Reconcile this copy of the Subscription and then write back any status
// updates regardless of whether the reconcile error out.
err = r.reconcile(ctx, subscription)
@@ -118,20 +123,20 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
logging.FromContext(ctx).Warn("Error reconciling Subscription", zap.Error(err))
} else {
logging.FromContext(ctx).Debug("Subscription reconciled")
- r.recorder.Eventf(subscription, corev1.EventTypeNormal, subscriptionReconciled, "Subscription reconciled: %q", subscription.Name)
+ r.Recorder.Eventf(subscription, corev1.EventTypeNormal, subscriptionReconciled, "Subscription reconciled: %q", subscription.Name)
}
if _, updateStatusErr := r.updateStatus(ctx, subscription.DeepCopy()); updateStatusErr != nil {
logging.FromContext(ctx).Warn("Failed to update the Subscription", zap.Error(err))
- r.recorder.Eventf(subscription, corev1.EventTypeWarning, subscriptionUpdateStatusFailed, "Failed to update Subscription's status: %v", err)
- return reconcile.Result{}, updateStatusErr
+ r.Recorder.Eventf(subscription, corev1.EventTypeWarning, subscriptionUpdateStatusFailed, "Failed to update Subscription's status: %v", err)
+ return updateStatusErr
}
// Requeue if the resource is not ready:
- return reconcile.Result{}, err
+ return err
}
-func (r *reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subscription) error {
+func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subscription) error {
subscription.Status.InitializeConditions()
// See if the subscription has been deleted
@@ -142,29 +147,30 @@ func (r *reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc
err := r.syncPhysicalChannel(ctx, subscription, true)
if err != nil {
logging.FromContext(ctx).Warn("Failed to sync physical from Channel", zap.Error(err))
- r.recorder.Eventf(subscription, corev1.EventTypeWarning, physicalChannelSyncFailed, "Failed to sync physical Channel: %v", err)
+ r.Recorder.Eventf(subscription, corev1.EventTypeWarning, physicalChannelSyncFailed, "Failed to sync physical Channel: %v", err)
return err
}
}
removeFinalizer(subscription)
- return nil
+ _, err := r.EventingClientSet.EventingV1alpha1().Subscriptions(subscription.Namespace).Update(subscription)
+ return err
}
// Verify that `channel` exists.
- if _, err := resolve.ObjectReference(ctx, r.dynamicClient, subscription.Namespace, &subscription.Spec.Channel); err != nil {
+ if _, err := resolve.ObjectReference(ctx, r.DynamicClientSet, subscription.Namespace, &subscription.Spec.Channel); err != nil {
logging.FromContext(ctx).Warn("Failed to validate Channel exists",
zap.Error(err),
zap.Any("channel", subscription.Spec.Channel))
- r.recorder.Eventf(subscription, corev1.EventTypeWarning, channelReferenceFetchFailed, "Failed to validate spec.channel exists: %v", err)
+ r.Recorder.Eventf(subscription, corev1.EventTypeWarning, channelReferenceFetchFailed, "Failed to validate spec.channel exists: %v", err)
return err
}
- subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, subscription.Namespace, subscription.Spec.Subscriber)
+ subscriberURI, err := resolve.SubscriberSpec(ctx, r.DynamicClientSet, subscription.Namespace, subscription.Spec.Subscriber)
if err != nil {
logging.FromContext(ctx).Warn("Failed to resolve Subscriber",
zap.Error(err),
zap.Any("subscriber", subscription.Spec.Subscriber))
- r.recorder.Eventf(subscription, corev1.EventTypeWarning, subscriberResolveFailed, "Failed to resolve spec.subscriber: %v", err)
+ r.Recorder.Eventf(subscription, corev1.EventTypeWarning, subscriberResolveFailed, "Failed to resolve spec.subscriber: %v", err)
return err
}
@@ -176,7 +182,7 @@ func (r *reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc
logging.FromContext(ctx).Warn("Failed to resolve reply",
zap.Error(err),
zap.Any("reply", subscription.Spec.Reply))
- r.recorder.Eventf(subscription, corev1.EventTypeWarning, resultResolveFailed, "Failed to resolve spec.reply: %v", err)
+ r.Recorder.Eventf(subscription, corev1.EventTypeWarning, resultResolveFailed, "Failed to resolve spec.reply: %v", err)
return err
}
@@ -186,16 +192,19 @@ func (r *reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc
// Everything that was supposed to be resolved was, so flip the status bit on that.
subscription.Status.MarkReferencesResolved()
+ if err := r.ensureFinalizer(subscription); err != nil {
+ return err
+ }
+
// Ok, now that we have the Channel and at least one of the Call/Result, let's reconcile
// the Channel with this information.
if err := r.syncPhysicalChannel(ctx, subscription, false); err != nil {
logging.FromContext(ctx).Warn("Failed to sync physical Channel", zap.Error(err))
- r.recorder.Eventf(subscription, corev1.EventTypeWarning, physicalChannelSyncFailed, "Failed to sync physical Channel: %v", err)
+ r.Recorder.Eventf(subscription, corev1.EventTypeWarning, physicalChannelSyncFailed, "Failed to sync physical Channel: %v", err)
return err
}
// Everything went well, set the fact that subscriptions have been modified
subscription.Status.MarkChannelReady()
- addFinalizer(subscription)
return nil
}
@@ -203,51 +212,61 @@ func isNilOrEmptyReply(reply *v1alpha1.ReplyStrategy) bool {
return reply == nil || equality.Semantic.DeepEqual(reply, &v1alpha1.ReplyStrategy{})
}
-// updateStatus may in fact update the subscription's finalizers in addition to the status
-func (r *reconciler) updateStatus(ctx context.Context, subscription *v1alpha1.Subscription) (*v1alpha1.Subscription, error) {
- objectKey := client.ObjectKey{Namespace: subscription.Namespace, Name: subscription.Name}
- latestSubscription := &v1alpha1.Subscription{}
-
- if err := r.client.Get(ctx, objectKey, latestSubscription); err != nil {
+func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.Subscription) (*v1alpha1.Subscription, error) {
+ subscription, err := r.subscriptionLister.Subscriptions(desired.Namespace).Get(desired.Name)
+ if err != nil {
return nil, err
}
- subscriptionChanged := false
+ // If there's nothing to update, just return.
+ if reflect.DeepEqual(subscription.Status, desired.Status) {
+ return subscription, nil
+ }
+
+ becomesReady := desired.Status.IsReady() && !subscription.Status.IsReady()
- if !equality.Semantic.DeepEqual(latestSubscription.Finalizers, subscription.Finalizers) {
- latestSubscription.SetFinalizers(subscription.ObjectMeta.Finalizers)
- if err := r.client.Update(ctx, latestSubscription); err != nil {
- return nil, err
- }
- subscriptionChanged = true
+ // Don't modify the informers copy.
+ existing := subscription.DeepCopy()
+ existing.Status = desired.Status
+
+ svc, err := r.EventingClientSet.EventingV1alpha1().Subscriptions(desired.Namespace).UpdateStatus(existing)
+ if err == nil && becomesReady {
+ duration := time.Since(svc.ObjectMeta.CreationTimestamp.Time)
+ r.Logger.Infof("Subscription %q became ready after %v", subscription.Name, duration)
+ //r.StatsReporter.ReportServiceReady(subscription.Namespace, subscription.Name, duration) // TODO: stats
}
- if equality.Semantic.DeepEqual(latestSubscription.Status, subscription.Status) {
- return latestSubscription, nil
+ return svc, err
+}
+
+func (c *Reconciler) ensureFinalizer(sub *v1alpha1.Subscription) error {
+ finalizers := sets.NewString(sub.Finalizers...)
+ if finalizers.Has(finalizerName) {
+ return nil
}
- if subscriptionChanged {
- // Refetch
- latestSubscription = &v1alpha1.Subscription{}
- if err := r.client.Get(ctx, objectKey, latestSubscription); err != nil {
- return nil, err
- }
+ mergePatch := map[string]interface{}{
+ "metadata": map[string]interface{}{
+ "finalizers": append(sub.Finalizers, finalizerName),
+ "resourceVersion": sub.ResourceVersion,
+ },
}
- latestSubscription.Status = subscription.Status
- if err := r.client.Status().Update(ctx, latestSubscription); err != nil {
- return nil, err
+ patch, err := json.Marshal(mergePatch)
+ if err != nil {
+ return err
}
- return latestSubscription, nil
+ _, err = c.EventingClientSet.EventingV1alpha1().Subscriptions(sub.Namespace).Patch(sub.Name, types.MergePatchType, patch)
+ return err
}
// resolveResult resolves the Spec.Result object.
-func (r *reconciler) resolveResult(ctx context.Context, namespace string, replyStrategy *v1alpha1.ReplyStrategy) (string, error) {
+func (r *Reconciler) resolveResult(ctx context.Context, namespace string, replyStrategy *v1alpha1.ReplyStrategy) (string, error) {
if isNilOrEmptyReply(replyStrategy) {
return "", nil
}
- obj, err := resolve.ObjectReference(ctx, r.dynamicClient, namespace, replyStrategy.Channel)
+ obj, err := resolve.ObjectReference(ctx, r.DynamicClientSet, namespace, replyStrategy.Channel)
if err != nil {
logging.FromContext(ctx).Warn("Failed to fetch ReplyStrategy Channel",
zap.Error(err),
@@ -266,7 +285,7 @@ func (r *reconciler) resolveResult(ctx context.Context, namespace string, replyS
return "", fmt.Errorf("status does not contain address")
}
-func (r *reconciler) syncPhysicalChannel(ctx context.Context, sub *v1alpha1.Subscription, isDeleted bool) error {
+func (r *Reconciler) syncPhysicalChannel(ctx context.Context, sub *v1alpha1.Subscription, isDeleted bool) error {
logging.FromContext(ctx).Debug("Reconciling physical from Channel", zap.Any("sub", sub))
subs, err := r.listAllSubscriptionsWithPhysicalChannel(ctx, sub)
@@ -293,50 +312,38 @@ func (r *reconciler) syncPhysicalChannel(ctx context.Context, sub *v1alpha1.Subs
return nil
}
-func (r *reconciler) listAllSubscriptionsWithPhysicalChannel(ctx context.Context, sub *v1alpha1.Subscription) ([]v1alpha1.Subscription, error) {
+func (r *Reconciler) listAllSubscriptionsWithPhysicalChannel(ctx context.Context, sub *v1alpha1.Subscription) ([]v1alpha1.Subscription, error) {
subs := make([]v1alpha1.Subscription, 0)
- opts := &client.ListOptions{
- Namespace: sub.Namespace,
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
- for {
- sl := &v1alpha1.SubscriptionList{}
- err := r.client.List(ctx, opts, sl)
- if err != nil {
- return nil, err
- }
- for _, s := range sl.Items {
- if sub.UID == s.UID {
- // This is the sub that is being reconciled. Skip it.
- continue
- }
- if equality.Semantic.DeepEqual(sub.Spec.Channel, s.Spec.Channel) {
- subs = append(subs, s)
- }
+ sl, err := r.subscriptionLister.Subscriptions(sub.Namespace).List(labels.Everything()) // TODO: we can use labels to help here
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range sl {
+ if sub.UID == s.UID {
+ // This is the sub that is being reconciled. Skip it.
+ continue
}
- if sl.Continue != "" {
- opts.Raw.Continue = sl.Continue
- } else {
- return subs, nil
+ if equality.Semantic.DeepEqual(sub.Spec.Channel, s.Spec.Channel) {
+ subs = append(subs, *s)
}
}
+ return subs, nil
}
-func (r *reconciler) createSubscribable(subs []v1alpha1.Subscription) *eventingduck.Subscribable {
+func (r *Reconciler) createSubscribable(subs []v1alpha1.Subscription) *eventingduck.Subscribable {
rv := &eventingduck.Subscribable{}
for _, sub := range subs {
if sub.Status.PhysicalSubscription.SubscriberURI != "" || sub.Status.PhysicalSubscription.ReplyURI != "" {
rv.Subscribers = append(rv.Subscribers, eventingduck.ChannelSubscriberSpec{
- Ref: &corev1.ObjectReference{
+ DeprecatedRef: &corev1.ObjectReference{
APIVersion: sub.APIVersion,
Kind: sub.Kind,
Namespace: sub.Namespace,
Name: sub.Name,
UID: sub.UID,
},
+ UID: sub.UID,
SubscriberURI: sub.Status.PhysicalSubscription.SubscriberURI,
ReplyURI: sub.Status.PhysicalSubscription.ReplyURI,
})
@@ -345,9 +352,9 @@ func (r *reconciler) createSubscribable(subs []v1alpha1.Subscription) *eventingd
return rv
}
-func (r *reconciler) patchPhysicalFrom(ctx context.Context, namespace string, physicalFrom corev1.ObjectReference, subs *eventingduck.Subscribable) error {
+func (r *Reconciler) patchPhysicalFrom(ctx context.Context, namespace string, physicalFrom corev1.ObjectReference, subs *eventingduck.Subscribable) error {
// First get the original object and convert it to only the bits we care about
- s, err := resolve.ObjectReference(ctx, r.dynamicClient, namespace, &physicalFrom)
+ s, err := resolve.ObjectReference(ctx, r.DynamicClientSet, namespace, &physicalFrom)
if err != nil {
return err
}
@@ -365,7 +372,7 @@ func (r *reconciler) patchPhysicalFrom(ctx context.Context, namespace string, ph
return err
}
- resourceClient, err := resolve.ResourceInterface(r.dynamicClient, namespace, &physicalFrom)
+ resourceClient, err := resolve.ResourceInterface(r.DynamicClientSet, namespace, &physicalFrom)
if err != nil {
logging.FromContext(ctx).Warn("Failed to create dynamic resource client", zap.Error(err))
return err
@@ -379,28 +386,8 @@ func (r *reconciler) patchPhysicalFrom(ctx context.Context, namespace string, ph
return nil
}
-func addFinalizer(sub *v1alpha1.Subscription) {
- finalizers := sets.NewString(sub.Finalizers...)
- finalizers.Insert(finalizerName)
- sub.Finalizers = finalizers.List()
-}
-
func removeFinalizer(sub *v1alpha1.Subscription) {
finalizers := sets.NewString(sub.Finalizers...)
finalizers.Delete(finalizerName)
sub.Finalizers = finalizers.List()
}
-
-// InjectClient implements controller runtime's inject.Client.
-func (r *reconciler) InjectClient(c client.Client) error {
- r.client = c
- return nil
-}
-
-// InjectConfig implements controller runtime's inject.Config.
-func (r *reconciler) InjectConfig(c *rest.Config) error {
- r.restConfig = c
- var err error
- r.dynamicClient, err = dynamic.NewForConfig(c)
- return err
-}
diff --git a/pkg/reconciler/subscription/subscription_test.go b/pkg/reconciler/subscription/subscription_test.go
new file mode 100644
index 00000000000..fe9f0441a88
--- /dev/null
+++ b/pkg/reconciler/subscription/subscription_test.go
@@ -0,0 +1,700 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Veroute.on 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package subscription
+
+import (
+ "encoding/json"
+ "fmt"
+ testing2 "github.com/knative/eventing/pkg/reconciler/testing"
+ "testing"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/kubernetes/scheme"
+ clientgotesting "k8s.io/client-go/testing"
+
+ "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
+ eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ informers "github.com/knative/eventing/pkg/client/informers/externalversions"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/utils"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+ . "github.com/knative/pkg/reconciler/testing"
+)
+
+const (
+ subscriberName = "subscriber"
+ replyName = "reply"
+ channelName = "origin"
+ serviceName = "service"
+
+ subscriptionUID = subscriptionName + "-abc-123"
+ subscriptionName = "testsubscription"
+ testNS = "testnamespace"
+)
+
+// subscriptions have: channel -> SUB -> subscriber -viaSub-> reply
+
+var (
+ channelDNS = "channel.mynamespace.svc." + utils.GetClusterDomainName()
+ channelURI = "http://" + channelDNS + "/"
+
+ subscriberDNS = "subscriber.mynamespace.svc." + utils.GetClusterDomainName()
+ subscriberURI = "http://" + subscriberDNS + "/"
+
+ replyDNS = "reply.mynamespace.svc." + utils.GetClusterDomainName()
+ replyURI = "http://" + replyDNS + "/"
+
+ serviceDNS = serviceName + "." + testNS + ".svc." + utils.GetClusterDomainName()
+ serviceURI = "http://" + serviceDNS + "/"
+
+ subscriberGVK = metav1.GroupVersionKind{
+ Group: "testing.eventing.knative.dev",
+ Version: "v1alpha1",
+ Kind: "Subscriber",
+ }
+
+ serviceGVK = metav1.GroupVersionKind{
+ Version: "v1",
+ Kind: "Service",
+ }
+
+ channelGVK = metav1.GroupVersionKind{
+ Group: "eventing.knative.dev",
+ Version: "v1alpha1",
+ Kind: "Channel",
+ }
+)
+
+func init() {
+ // Add types to scheme
+ _ = eventingv1alpha1.AddToScheme(scheme.Scheme)
+ _ = duckv1alpha1.AddToScheme(scheme.Scheme)
+}
+
+func TestAllCases(t *testing.T) {
+ table := TableTest{
+ {
+ Name: "bad workqueue key",
+ // Make sure Reconcile handles bad keys.
+ Key: "too/many/parts",
+ }, {
+ Name: "key not found",
+ // Make sure Reconcile handles good keys that don't exist.
+ Key: "foo/not-found",
+ //}, { // TODO: there is a bug in the controller, it will query for ""
+ // Name: "incomplete subscription",
+ // Objects: []runtime.Object{
+ // NewSubscription(subscriptionName, testNS),
+ // },
+ // Key: "foo/incomplete",
+ // WantErr: true,
+ // WantEvents: []string{
+ // Eventf(corev1.EventTypeWarning, "ChannelReferenceFetchFailed", "Failed to validate spec.channel exists: s \"\" not found"),
+ // },
+ }, {
+ Name: "subscription, but subscriber is not addressable",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ ),
+ testing2.NewUnstructured(subscriberGVK, subscriberName, testNS),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SubscriberResolveFailed", "Failed to resolve spec.subscriber: status does not contain address"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ ),
+ }},
+ }, {
+ Name: "subscription, but subscriber does not exist",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SubscriberResolveFailed", "Failed to resolve spec.subscriber: subscribers.testing.eventing.knative.dev %q not found", subscriberName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ ),
+ }},
+ }, {
+ Name: "subscription, reply does not exist",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ ),
+ testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
+ testing2.WithUnstructuredAddressable(subscriberDNS)),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "ResultResolveFailed", "Failed to resolve spec.reply: channels.eventing.knative.dev %q not found", replyName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ ),
+ }},
+ }, {
+ Name: "subscription, reply is not addressable",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithSubscriptionReply(subscriberGVK, replyName), // reply will be a subscriberGVK for this test
+ ),
+ testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
+ testing2.WithUnstructuredAddressable(subscriberDNS),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ testing2.NewUnstructured(subscriberGVK, replyName, testNS),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "ResultResolveFailed", "Failed to resolve spec.reply: status does not contain address"),
+ Eventf(corev1.EventTypeWarning, "SubscriptionUpdateStatusFailed", "Failed to update Subscription's status: status does not contain address"), // TODO: BUGBUG THIS IS WEIRD
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithSubscriptionReply(subscriberGVK, replyName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ ),
+ }},
+ }, {
+ Name: "subscription, valid channel+subscriber",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ ),
+ testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
+ testing2.WithUnstructuredAddressable(subscriberDNS),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ ),
+ }},
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchSubscribers(testNS, channelName, []v1alpha1.ChannelSubscriberSpec{
+ {UID: subscriptionUID, SubscriberURI: subscriberURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
+ }),
+ patchFinalizers(testNS, subscriptionName),
+ },
+ }, {
+ Name: "subscription, valid channel+reply",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ testing2.NewChannel(replyName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(replyDNS),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ ),
+ }},
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchSubscribers(testNS, channelName, []v1alpha1.ChannelSubscriberSpec{
+ {UID: subscriptionUID, ReplyURI: replyURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
+ }),
+ patchFinalizers(testNS, subscriptionName),
+ },
+ }, {
+ Name: "subscription, valid channel+subscriber+reply",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ ),
+ testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
+ testing2.WithUnstructuredAddressable(subscriberDNS),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ testing2.NewChannel(replyName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(replyDNS),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ ),
+ }},
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchSubscribers(testNS, channelName, []v1alpha1.ChannelSubscriberSpec{
+ {UID: subscriptionUID, SubscriberURI: subscriberURI, ReplyURI: replyURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
+ }),
+ patchFinalizers(testNS, subscriptionName),
+ },
+ }, {
+ Name: "subscription, valid remove reply",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ ),
+ testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
+ testing2.WithUnstructuredAddressable(subscriberDNS),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ testing2.WithChannelSubscribers([]v1alpha1.ChannelSubscriberSpec{
+ {UID: subscriptionUID, SubscriberURI: subscriberURI, ReplyURI: replyURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
+ }),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ ),
+ }},
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchSubscribers(testNS, channelName, []v1alpha1.ChannelSubscriberSpec{
+ {UID: subscriptionUID, SubscriberURI: subscriberURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
+ }),
+ patchFinalizers(testNS, subscriptionName),
+ },
+ }, {
+ Name: "subscription, valid remove subscriber",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithInitSubscriptionConditions,
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ testing2.WithChannelSubscribers([]v1alpha1.ChannelSubscriberSpec{
+ {UID: subscriptionUID, SubscriberURI: subscriberURI, ReplyURI: replyURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
+ }),
+ ),
+ testing2.NewChannel(replyName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(replyDNS),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ ),
+ }},
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchSubscribers(testNS, channelName, []v1alpha1.ChannelSubscriberSpec{
+ {UID: subscriptionUID, ReplyURI: replyURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
+ }),
+ patchFinalizers(testNS, subscriptionName),
+ },
+ }, {
+ Name: "subscription, channel+subscriber as service, does not exist",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SubscriberResolveFailed", "Failed to resolve spec.subscriber: services %q not found", serviceName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ ),
+ }},
+ }, {
+ Name: "subscription, valid channel+subscriber as service",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ testing2.NewService(serviceName, testNS),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ ),
+ }},
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchSubscribers(testNS, channelName, []v1alpha1.ChannelSubscriberSpec{
+ {UID: subscriptionUID, SubscriberURI: serviceURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
+ }),
+ patchFinalizers(testNS, subscriptionName),
+ },
+ }, {
+ Name: "subscription, two subscribers for a channel",
+ Objects: []runtime.Object{
+ testing2.NewSubscription("a_"+subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ ),
+ // an already rec'ed subscription
+ testing2.NewSubscription("b_"+subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ testing2.NewService(serviceName, testNS),
+ },
+ Key: testNS + "/" + "a_" + subscriptionName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", "a_"+subscriptionName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription("a_"+subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ // The first reconciliation will initialize the status conditions.
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ ),
+ }},
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchSubscribers(testNS, channelName, []v1alpha1.ChannelSubscriberSpec{
+ {UID: "b_" + subscriptionUID, SubscriberURI: serviceURI, DeprecatedRef: &corev1.ObjectReference{Name: "b_" + subscriptionName, Namespace: testNS, UID: "b_" + subscriptionUID}},
+ {UID: "a_" + subscriptionUID, SubscriberURI: serviceURI, DeprecatedRef: &corev1.ObjectReference{Name: "a_" + subscriptionName, Namespace: testNS, UID: "a_" + subscriptionUID}},
+ }),
+ patchFinalizers(testNS, "a_"+subscriptionName),
+ },
+ }, {
+ Name: "subscription deleted",
+ Objects: []runtime.Object{
+ testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionFinalizers(finalizerName),
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ testing2.WithSubscriptionDeleted,
+ ),
+ testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
+ testing2.WithUnstructuredAddressable(subscriberDNS),
+ ),
+ testing2.NewChannel(channelName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(channelDNS),
+ ),
+ testing2.NewChannel(replyName, testNS,
+ testing2.WithInitChannelConditions,
+ testing2.WithChannelAddress(replyDNS),
+ ),
+ },
+ Key: testNS + "/" + subscriptionName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
+ },
+ WantUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: testing2.NewSubscription(subscriptionName, testNS,
+ testing2.WithSubscriptionChannel(channelGVK, channelName),
+ testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ testing2.WithSubscriptionReply(channelGVK, replyName),
+ testing2.WithInitSubscriptionConditions,
+ testing2.MarkSubscriptionReady,
+ testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ testing2.WithSubscriptionDeleted,
+ ),
+ }},
+ WantPatches: []clientgotesting.PatchActionImpl{
+ patchSubscribers(testNS, channelName, nil),
+ },
+ },
+ }
+
+ defer logtesting.ClearAll()
+ table.Test(t, testing2.MakeFactory(func(listers *testing2.Listers, opt reconciler.Options) controller.Reconciler {
+ return &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ subscriptionLister: listers.GetSubscriptionLister(),
+ }
+ }))
+
+}
+
+func TestNew(t *testing.T) {
+ defer logtesting.ClearAll()
+ kubeClient := fakekubeclientset.NewSimpleClientset()
+ eventingClient := fakeclientset.NewSimpleClientset()
+ eventingInformer := informers.NewSharedInformerFactory(eventingClient, 0)
+
+ subscriptionInformer := eventingInformer.Eventing().V1alpha1().Subscriptions()
+ c := NewController(reconciler.Options{
+ KubeClientSet: kubeClient,
+ EventingClientSet: eventingClient,
+ Logger: logtesting.TestLogger(t),
+ }, subscriptionInformer)
+
+ if c == nil {
+ t.Fatal("Expected NewController to return a non-nil value")
+ }
+}
+
+func TestFinalizers(t *testing.T) {
+ testCases := []struct {
+ name string
+ original sets.String
+ add bool
+ want sets.String
+ }{
+ {
+ name: "empty, add",
+ original: sets.NewString(),
+ add: true,
+ want: sets.NewString(finalizerName),
+ }, {
+ name: "empty, delete",
+ original: sets.NewString(),
+ add: false,
+ want: sets.NewString(),
+ }, {
+ name: "existing, delete",
+ original: sets.NewString(finalizerName),
+ add: false,
+ want: sets.NewString(),
+ }, {
+ name: "existing, add",
+ original: sets.NewString(finalizerName),
+ add: true,
+ want: sets.NewString(finalizerName),
+ }, {
+ name: "existing two, delete",
+ original: sets.NewString(finalizerName, "someother"),
+ add: false,
+ want: sets.NewString("someother"),
+ }, {
+ name: "existing two, no change",
+ original: sets.NewString(finalizerName, "someother"),
+ add: true,
+ want: sets.NewString(finalizerName, "someother"),
+ },
+ }
+
+ for _, tc := range testCases {
+ original := &eventingv1alpha1.Subscription{}
+ original.Finalizers = tc.original.List()
+ if tc.add {
+ addFinalizer(original)
+ } else {
+ removeFinalizer(original)
+ }
+ has := sets.NewString(original.Finalizers...)
+ diff := has.Difference(tc.want)
+ if diff.Len() > 0 {
+ t.Errorf("%q failed, diff: %+v", tc.name, diff)
+ }
+ }
+}
+
+func addFinalizer(sub *eventingv1alpha1.Subscription) {
+ finalizers := sets.NewString(sub.Finalizers...)
+ finalizers.Insert(finalizerName)
+ sub.Finalizers = finalizers.List()
+}
+
+func patchSubscribers(namespace, name string, subscribers []v1alpha1.ChannelSubscriberSpec) clientgotesting.PatchActionImpl {
+ action := clientgotesting.PatchActionImpl{}
+ action.Name = name
+ action.Namespace = namespace
+
+ var spec string
+ if subscribers != nil {
+ b, err := json.Marshal(subscribers)
+ ss := make([]map[string]interface{}, 0)
+ err = json.Unmarshal(b, &ss)
+ subs, err := json.Marshal(ss)
+ if err != nil {
+ return action
+ }
+ spec = fmt.Sprintf(`{"subscribable":{"subscribers":%s}}`, subs)
+ } else {
+ spec = `{"subscribable":{}}`
+ }
+
+ patch := `{"spec":` + spec + `}`
+ action.Patch = []byte(patch)
+ return action
+}
+
+func patchFinalizers(namespace, name string) clientgotesting.PatchActionImpl {
+ action := clientgotesting.PatchActionImpl{}
+ action.Name = name
+ action.Namespace = namespace
+ patch := `{"metadata":{"finalizers":["` + finalizerName + `"],"resourceVersion":""}}`
+ action.Patch = []byte(patch)
+ return action
+}
diff --git a/pkg/reconciler/testing/channel.go b/pkg/reconciler/testing/channel.go
new file mode 100644
index 00000000000..3d705befdc9
--- /dev/null
+++ b/pkg/reconciler/testing/channel.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+
+ duckv1alpha1 "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ChannelOption enables further configuration of a Channel.
+type ChannelOption func(*v1alpha1.Channel)
+
+// NewChannel creates a Channel with ChannelOptions
+func NewChannel(name, namespace string, o ...ChannelOption) *v1alpha1.Channel {
+ c := &v1alpha1.Channel{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ }
+ for _, opt := range o {
+ opt(c)
+ }
+ c.SetDefaults(context.Background())
+ return c
+}
+
+// NewChannelWithoutNamespace creates a Channel with ChannelOptions but without a specific namespace
+func NewChannelWithoutNamespace(name string, o ...ChannelOption) *v1alpha1.Channel {
+ s := &v1alpha1.Channel{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ }
+ for _, opt := range o {
+ opt(s)
+ }
+ s.SetDefaults(context.Background())
+ return s
+}
+
+// WithInitChannelConditions initializes the Channel's conditions.
+func WithInitChannelConditions(s *v1alpha1.Channel) {
+ s.Status.InitializeConditions()
+}
+
+func WithChannelAddress(hostname string) ChannelOption {
+ return func(c *v1alpha1.Channel) {
+ c.Status.Address.Hostname = hostname
+ }
+}
+
+func WithChannelSubscribers(subscribers []duckv1alpha1.ChannelSubscriberSpec) ChannelOption {
+ return func(c *v1alpha1.Channel) {
+ c.Spec.Subscribable = &duckv1alpha1.Subscribable{
+ Subscribers: subscribers,
+ }
+ }
+}
diff --git a/pkg/reconciler/testing/factory.go b/pkg/reconciler/testing/factory.go
new file mode 100644
index 00000000000..c1f39e4f4cf
--- /dev/null
+++ b/pkg/reconciler/testing/factory.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2018 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+ "k8s.io/apimachinery/pkg/runtime"
+ "testing"
+
+ fakedynamicclientset "k8s.io/client-go/dynamic/fake"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+ clientgotesting "k8s.io/client-go/testing"
+ "k8s.io/client-go/tools/record"
+
+ fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+
+ . "github.com/knative/pkg/reconciler/testing"
+)
+
+const (
+ // maxEventBufferSize is the estimated max number of event notifications that
+ // can be buffered during reconciliation.
+ maxEventBufferSize = 10
+)
+
+// Ctor functions create a k8s controller with given params.
+type Ctor func(*Listers, reconciler.Options) controller.Reconciler
+
+// MakeFactory creates a reconciler factory with fake clients and controller created by `ctor`.
+func MakeFactory(ctor Ctor) Factory {
+ return func(t *testing.T, r *TableRow) (controller.Reconciler, ActionRecorderList, EventList, *FakeStatsReporter) {
+ ls := NewListers(r.Objects)
+
+ kubeClient := fakekubeclientset.NewSimpleClientset(ls.GetKubeObjects()...)
+ client := fakeclientset.NewSimpleClientset(ls.GetEventingObjects()...)
+
+ dynamicScheme := runtime.NewScheme()
+ for _, addTo := range clientSetSchemes {
+ addTo(dynamicScheme)
+ }
+
+ dynamicClient := fakedynamicclientset.NewSimpleDynamicClient(dynamicScheme, ls.GetAllObjects()...)
+ eventRecorder := record.NewFakeRecorder(maxEventBufferSize)
+ statsReporter := &FakeStatsReporter{}
+
+ PrependGenerateNameReactor(&client.Fake)
+ PrependGenerateNameReactor(&dynamicClient.Fake)
+
+ // Set up our Controller from the fakes.
+ c := ctor(&ls, reconciler.Options{
+ KubeClientSet: kubeClient,
+ DynamicClientSet: dynamicClient,
+ EventingClientSet: client,
+ Recorder: eventRecorder,
+ //StatsReporter: statsReporter,
+ Logger: logtesting.TestLogger(t),
+ })
+
+ for _, reactor := range r.WithReactors {
+ kubeClient.PrependReactor("*", "*", reactor)
+ client.PrependReactor("*", "*", reactor)
+ dynamicClient.PrependReactor("*", "*", reactor)
+ }
+
+ // Validate all Create operations through the eventing client.
+ client.PrependReactor("create", "*", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
+ return ValidateCreates(context.Background(), action)
+ })
+ client.PrependReactor("update", "*", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
+ return ValidateUpdates(context.Background(), action)
+ })
+
+ actionRecorderList := ActionRecorderList{dynamicClient, client, kubeClient}
+ eventList := EventList{Recorder: eventRecorder}
+
+ return c, actionRecorderList, eventList, statsReporter
+ }
+}
diff --git a/pkg/reconciler/testing/listers.go b/pkg/reconciler/testing/listers.go
new file mode 100644
index 00000000000..814135227c2
--- /dev/null
+++ b/pkg/reconciler/testing/listers.go
@@ -0,0 +1,124 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ fakeeventingclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ eventinglisters "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
+ istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ fakesharedclientset "github.com/knative/pkg/client/clientset/versioned/fake"
+ istiolisters "github.com/knative/pkg/client/listers/istio/v1alpha3"
+ "github.com/knative/pkg/reconciler/testing"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+ appsv1listers "k8s.io/client-go/listers/apps/v1"
+ corev1listers "k8s.io/client-go/listers/core/v1"
+ "k8s.io/client-go/tools/cache"
+)
+
+var subscriberAddToScheme = func(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "testing.eventing.knative.dev", Version: "v1alpha1", Kind: "Subscriber"}, &unstructured.Unstructured{})
+ return nil
+}
+
+var clientSetSchemes = []func(*runtime.Scheme) error{
+ fakekubeclientset.AddToScheme,
+ fakesharedclientset.AddToScheme,
+ fakeeventingclientset.AddToScheme,
+ subscriberAddToScheme,
+}
+
+type Listers struct {
+ sorter testing.ObjectSorter
+}
+
+func NewListers(objs []runtime.Object) Listers {
+ scheme := runtime.NewScheme()
+
+ for _, addTo := range clientSetSchemes {
+ addTo(scheme)
+ }
+
+ ls := Listers{
+ sorter: testing.NewObjectSorter(scheme),
+ }
+
+ ls.sorter.AddObjects(objs...)
+
+ return ls
+}
+
+func (l *Listers) indexerFor(obj runtime.Object) cache.Indexer {
+ return l.sorter.IndexerForObjectType(obj)
+}
+
+func (l *Listers) GetKubeObjects() []runtime.Object {
+ return l.sorter.ObjectsForSchemeFunc(fakekubeclientset.AddToScheme)
+}
+
+func (l *Listers) GetEventingObjects() []runtime.Object {
+ return l.sorter.ObjectsForSchemeFunc(fakeeventingclientset.AddToScheme)
+}
+
+func (l *Listers) GetSubscriberObjects() []runtime.Object {
+ return l.sorter.ObjectsForSchemeFunc(subscriberAddToScheme)
+}
+
+func (l *Listers) GetAllObjects() []runtime.Object {
+ all := l.GetSubscriberObjects()
+ all = append(all, l.GetEventingObjects()...)
+ all = append(all, l.GetKubeObjects()...)
+ return all
+}
+
+func (l *Listers) GetSharedObjects() []runtime.Object {
+ return l.sorter.ObjectsForSchemeFunc(fakesharedclientset.AddToScheme)
+}
+
+func (l *Listers) GetSubscriptionLister() eventinglisters.SubscriptionLister {
+ return eventinglisters.NewSubscriptionLister(l.indexerFor(&eventingv1alpha1.Subscription{}))
+}
+
+func (l *Listers) GetVirtualServiceLister() istiolisters.VirtualServiceLister {
+ return istiolisters.NewVirtualServiceLister(l.indexerFor(&istiov1alpha3.VirtualService{}))
+}
+
+// GetGatewayLister gets lister for Istio Gateway resource.
+func (l *Listers) GetGatewayLister() istiolisters.GatewayLister {
+ return istiolisters.NewGatewayLister(l.indexerFor(&istiov1alpha3.Gateway{}))
+}
+
+func (l *Listers) GetDeploymentLister() appsv1listers.DeploymentLister {
+ return appsv1listers.NewDeploymentLister(l.indexerFor(&appsv1.Deployment{}))
+}
+
+func (l *Listers) GetK8sServiceLister() corev1listers.ServiceLister {
+ return corev1listers.NewServiceLister(l.indexerFor(&corev1.Service{}))
+}
+
+func (l *Listers) GetEndpointsLister() corev1listers.EndpointsLister {
+ return corev1listers.NewEndpointsLister(l.indexerFor(&corev1.Endpoints{}))
+}
+
+func (l *Listers) GetConfigMapLister() corev1listers.ConfigMapLister {
+ return corev1listers.NewConfigMapLister(l.indexerFor(&corev1.ConfigMap{}))
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_defaults.go b/pkg/reconciler/testing/service.go
similarity index 53%
rename from vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_defaults.go
rename to pkg/reconciler/testing/service.go
index 738ec88a37e..b074b93a722 100644
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_defaults.go
+++ b/pkg/reconciler/testing/service.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,30 +14,26 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
+package testing
import (
- "context"
- "time"
-
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-// DefaultTimeout is 10min
-const DefaultTimeout = 10 * time.Minute
+// ServiceOption enables further configuration of a Service.
+type ServiceOption func(*corev1.Service)
-// SetDefaults for build
-func (b *Build) SetDefaults(ctx context.Context) {
- if b == nil {
- return
- }
- if b.Spec.ServiceAccountName == "" {
- b.Spec.ServiceAccountName = "default"
- }
- if b.Spec.Timeout == nil {
- b.Spec.Timeout = &metav1.Duration{Duration: DefaultTimeout}
+// NewService creates a Service with ServiceOptions
+func NewService(name, namespace string, so ...ServiceOption) *corev1.Service {
+ s := &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
}
- if b.Spec.Template != nil && b.Spec.Template.Kind == "" {
- b.Spec.Template.Kind = BuildTemplateKind
+ for _, opt := range so {
+ opt(s)
}
+ return s
}
diff --git a/pkg/reconciler/testing/subscription.go b/pkg/reconciler/testing/subscription.go
new file mode 100644
index 00000000000..60ee2a14844
--- /dev/null
+++ b/pkg/reconciler/testing/subscription.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+ "k8s.io/apimachinery/pkg/types"
+ "time"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// SubscriptionOption enables further configuration of a Subscription.
+type SubscriptionOption func(*v1alpha1.Subscription)
+
+// NewSubscription creates a Subscription with SubscriptionOptions
+func NewSubscription(name, namespace string, so ...SubscriptionOption) *v1alpha1.Subscription {
+ s := &v1alpha1.Subscription{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ UID: types.UID(name + "-abc-123"),
+ },
+ }
+ for _, opt := range so {
+ opt(s)
+ }
+ s.SetDefaults(context.Background())
+ return s
+}
+
+// NewSubscriptionWithoutNamespace creates a Subscription with SubscriptionOptions but without a specific namespace
+func NewSubscriptionWithoutNamespace(name string, so ...SubscriptionOption) *v1alpha1.Subscription {
+ s := &v1alpha1.Subscription{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ }
+ for _, opt := range so {
+ opt(s)
+ }
+ s.SetDefaults(context.Background())
+ return s
+}
+
+// WithInitSubscriptionConditions initializes the Subscriptions's conditions.
+func WithInitSubscriptionConditions(s *v1alpha1.Subscription) {
+ s.Status.InitializeConditions()
+}
+
+// TODO: this can be a runtime object
+func WithSubscriptionDeleted(s *v1alpha1.Subscription) {
+ t := metav1.NewTime(time.Unix(1e9, 0))
+ s.ObjectMeta.SetDeletionTimestamp(&t)
+}
+
+func WithSubscriptionChannel(gvk metav1.GroupVersionKind, name string) SubscriptionOption {
+ return func(s *v1alpha1.Subscription) {
+ s.Spec.Channel = corev1.ObjectReference{
+ APIVersion: apiVersion(gvk),
+ Kind: gvk.Kind,
+ Name: name,
+ }
+ }
+}
+
+func WithSubscriptionSubscriberRef(gvk metav1.GroupVersionKind, name string) SubscriptionOption {
+ return func(s *v1alpha1.Subscription) {
+ s.Spec.Subscriber = &v1alpha1.SubscriberSpec{
+ Ref: &corev1.ObjectReference{
+ APIVersion: apiVersion(gvk),
+ Kind: gvk.Kind,
+ Name: name,
+ },
+ }
+ }
+}
+
+func WithSubscriptionPhysicalSubscriptionSubscriber(uri string) SubscriptionOption {
+ return func(s *v1alpha1.Subscription) {
+ s.Status.PhysicalSubscription.SubscriberURI = uri
+ }
+}
+
+func WithSubscriptionPhysicalSubscriptionReply(uri string) SubscriptionOption {
+ return func(s *v1alpha1.Subscription) {
+ s.Status.PhysicalSubscription.ReplyURI = uri
+ }
+}
+
+func WithSubscriptionFinalizers(finalizers ...string) SubscriptionOption {
+ return func(s *v1alpha1.Subscription) {
+ s.Finalizers = finalizers
+ }
+}
+
+func MarkSubscriptionReady(s *v1alpha1.Subscription) {
+ s.Status.MarkChannelReady()
+ s.Status.MarkReferencesResolved()
+}
+
+func WithSubscriptionReply(gvk metav1.GroupVersionKind, name string) SubscriptionOption {
+ return func(s *v1alpha1.Subscription) {
+ s.Spec.Reply = &v1alpha1.ReplyStrategy{
+ Channel: &corev1.ObjectReference{
+ APIVersion: apiVersion(gvk),
+ Kind: gvk.Kind,
+ Name: name,
+ },
+ }
+ }
+}
diff --git a/pkg/reconciler/testing/unstructured.go b/pkg/reconciler/testing/unstructured.go
new file mode 100644
index 00000000000..1a178cc6040
--- /dev/null
+++ b/pkg/reconciler/testing/unstructured.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// UnstructuredOption enables further configuration of a Unstructured.
+type UnstructuredOption func(*unstructured.Unstructured)
+
+// NewUnstructured creates a unstructured.Unstructured with UnstructuredOption
+func NewUnstructured(gvk metav1.GroupVersionKind, name, namespace string, uo ...UnstructuredOption) *unstructured.Unstructured {
+ u := &unstructured.Unstructured{
+ Object: map[string]interface{}{
+ "apiVersion": apiVersion(gvk),
+ "kind": gvk.Kind,
+ "metadata": map[string]interface{}{
+ "namespace": namespace,
+ "name": name,
+ },
+ "spec": map[string]interface{}{},
+ "status": map[string]interface{}{},
+ },
+ }
+ for _, opt := range uo {
+ opt(u)
+ }
+ return u
+}
+
+func WithUnstructuredAddressable(hostname string) UnstructuredOption {
+ return func(u *unstructured.Unstructured) {
+ status, ok := u.Object["status"].(map[string]interface{})
+ if ok {
+ status["address"] = map[string]interface{}{
+ "hostname": hostname,
+ }
+ }
+ }
+}
+
+func apiVersion(gvk metav1.GroupVersionKind) string {
+ groupVersion := gvk.Version
+ if gvk.Group != "" {
+ groupVersion = gvk.Group + "/" + gvk.Version
+ }
+ return groupVersion
+}
diff --git a/pkg/reconciler/v1alpha1/subscription/provider_test.go b/pkg/reconciler/v1alpha1/subscription/provider_test.go
deleted file mode 100644
index 7aed5853145..00000000000
--- a/pkg/reconciler/v1alpha1/subscription/provider_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package subscription
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "k8s.io/client-go/dynamic"
- "k8s.io/client-go/rest"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
-)
-
-func TestProvideController(t *testing.T) {
- // TODO(grantr) This needs a mock of manager.Manager. Creating a manager
- // with a fake Config fails because the Manager tries to contact the
- // apiserver.
-
- // cfg := &rest.Config{
- // Host: "http://foo:80",
- // }
- //
- // mgr, err := manager.New(cfg, manager.Options{})
- // if err != nil {
- // t.Fatalf("Error creating manager: %v", err)
- // }
- //
- // _, err = ProvideController(mgr)
- // if err != nil {
- // t.Fatalf("Error in ProvideController: %v", err)
- // }
-}
-
-func TestInjectClient(t *testing.T) {
- r := &reconciler{}
- orig := r.client
- n := fake.NewFakeClient()
- if orig == n {
- t.Errorf("Original and new clients are identical: %v", orig)
- }
- err := r.InjectClient(n)
- if err != nil {
- t.Errorf("Unexpected error injecting the client: %v", err)
- }
- if n != r.client {
- t.Errorf("Unexpected client. Expected: '%v'. Actual: '%v'", n, r.client)
- }
-}
-
-func TestInjectConfig(t *testing.T) {
- r := &reconciler{}
- wantCfg := &rest.Config{
- Host: "http://foo",
- }
-
- err := r.InjectConfig(wantCfg)
- if err != nil {
- t.Fatalf("Unexpected error injecting the config: %v", err)
- }
-
- gotCfg := r.restConfig
- if diff := cmp.Diff(wantCfg, gotCfg); diff != "" {
- t.Errorf("Unexpected config (-want, +got): %v", diff)
- }
-
- wantDynClient, err := dynamic.NewForConfig(wantCfg)
- if err != nil {
- t.Fatalf("Unexpected error generating dynamic client: %v", err)
- }
-
- // Since dynamicClient doesn't export any fields, we can only test its type.
- switch r.dynamicClient.(type) {
- case dynamic.Interface:
- // ok
- default:
- t.Errorf("Unexpected dynamicClient type. Expected: %T, Got: %T", wantDynClient, r.dynamicClient)
- }
-}
diff --git a/pkg/reconciler/v1alpha1/subscription/subscription_test.go b/pkg/reconciler/v1alpha1/subscription/subscription_test.go
deleted file mode 100644
index feb89d2a131..00000000000
--- a/pkg/reconciler/v1alpha1/subscription/subscription_test.go
+++ /dev/null
@@ -1,1417 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Veroute.on 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package subscription
-
-import (
- "fmt"
- "testing"
-
- "go.uber.org/zap"
-
- eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
- eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
- "github.com/knative/eventing/pkg/utils"
- "github.com/knative/eventing/pkg/utils/resolve"
- duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/util/sets"
- "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/rest"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
-)
-
-var (
- trueVal = true
-
- // deletionTime is used when objects are marked as deleted. Rfc3339Copy()
- // truncates to seconds to match the loss of precision during serialization.
- deletionTime = metav1.Now().Rfc3339Copy()
-
- // map of events to set test cases' expectations easier
- events = map[string]corev1.Event{
- subscriptionReconciled: {Reason: subscriptionReconciled, Type: corev1.EventTypeNormal},
- subscriptionUpdateStatusFailed: {Reason: subscriptionUpdateStatusFailed, Type: corev1.EventTypeWarning},
- physicalChannelSyncFailed: {Reason: physicalChannelSyncFailed, Type: corev1.EventTypeWarning},
- channelReferenceFetchFailed: {Reason: channelReferenceFetchFailed, Type: corev1.EventTypeWarning},
- subscriberResolveFailed: {Reason: subscriberResolveFailed, Type: corev1.EventTypeWarning},
- resultResolveFailed: {Reason: resultResolveFailed, Type: corev1.EventTypeWarning},
- }
-)
-
-const (
- fromChannelName = "fromchannel"
- resultChannelName = "resultchannel"
- sourceName = "source"
- routeName = "subscriberroute"
- channelKind = "Channel"
- routeKind = "Route"
- sourceKind = "Source"
- subscriptionKind = "Subscription"
- eventType = "myeventtype"
- subscriptionName = "testsubscription"
- testNS = "testnamespace"
- k8sServiceName = "testk8sservice"
-)
-
-var (
- targetDNS = "myfunction.mynamespace.svc." + utils.GetClusterDomainName()
- sinkableDNS = "myresultchannel.mynamespace.svc." + utils.GetClusterDomainName()
- k8sServiceDNS = "testk8sservice.testnamespace.svc." + utils.GetClusterDomainName()
- otherAddressableDNS = "other-sinkable-channel.mynamespace.svc." + utils.GetClusterDomainName()
-)
-
-func init() {
- // Add types to scheme
- _ = eventingv1alpha1.AddToScheme(scheme.Scheme)
- _ = duckv1alpha1.AddToScheme(scheme.Scheme)
-}
-
-func TestAllCases(t *testing.T) {
- testCases := []controllertesting.TestCase{
- {
- Name: "subscription does not exist",
- WantErr: false,
- }, {
- Name: "subscription but From channel does not exist",
- InitialState: []runtime.Object{
- Subscription(),
- },
- WantErrMsg: `channels.eventing.knative.dev "fromchannel" not found`,
- WantEvent: []corev1.Event{
- events[channelReferenceFetchFailed],
- },
- }, {
- Name: "subscription, but From is not subscribable",
- InitialState: []runtime.Object{
- Subscription().FromSource(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed. It should actually fail saying that there is no
- // Spec.Subscribers field.
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": sourceKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": sourceName,
- },
- "spec": map[string]interface{}{},
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": targetDNS,
- },
- },
- },
- },
- // Reply channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": resultChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": sinkableDNS,
- },
- },
- },
- },
- },
- }, {
- Name: "Valid channel, subscriber does not exist",
- InitialState: []runtime.Object{
- Subscription(),
- },
- WantErrMsg: `routes.serving.knative.dev "subscriberroute" not found`,
- WantPresent: []runtime.Object{
- Subscription().UnknownConditions(),
- },
- WantEvent: []corev1.Event{
- events[subscriberResolveFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- },
- }, {
- Name: "Valid channel, subscriber is not callable",
- InitialState: []runtime.Object{
- Subscription(),
- },
- WantPresent: []runtime.Object{
- Subscription().UnknownConditions(),
- },
- WantErrMsg: "status does not contain address",
- WantEvent: []corev1.Event{
- events[subscriberResolveFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "someotherstuff": targetDNS,
- },
- },
- },
- },
- }, {
- Name: "Valid channel and subscriber, result does not exist",
- InitialState: []runtime.Object{
- Subscription(),
- },
- WantPresent: []runtime.Object{
- Subscription().UnknownConditions().PhysicalSubscriber(targetDNS),
- },
- WantErrMsg: `channels.eventing.knative.dev "resultchannel" not found`,
- WantEvent: []corev1.Event{
- events[resultResolveFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": targetDNS,
- },
- },
- },
- },
- },
- }, {
- Name: "valid channel, subscriber, result is not addressable",
- InitialState: []runtime.Object{
- Subscription(),
- },
- WantErrMsg: "status does not contain address",
- WantPresent: []runtime.Object{
- // TODO: Again this works on gke cluster, but I need to set
- // something else up here. later...
- // Subscription().ReferencesResolved(),
- Subscription().UnknownConditions().PhysicalSubscriber(targetDNS),
- },
- WantEvent: []corev1.Event{
- events[resultResolveFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": targetDNS,
- },
- },
- },
- },
- // Reply channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": resultChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- },
- }, {
- Name: "new subscription: adds status, all targets resolved, subscribers modified",
- InitialState: []runtime.Object{
- Subscription(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().ReferencesResolved().PhysicalSubscriber(targetDNS).Reply(),
- },
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": targetDNS,
- },
- },
- },
- },
- // Reply channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": resultChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": sinkableDNS,
- },
- },
- },
- },
- },
- }, {
- Name: "new subscription: adds status, all targets resolved, subscribers modified -- nil reply",
- InitialState: []runtime.Object{
- Subscription().NilReply(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().NilReply().ReferencesResolved().PhysicalSubscriber(targetDNS),
- },
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": targetDNS,
- },
- },
- },
- },
- },
- }, {
- Name: "new subscription: adds status, all targets resolved, subscribers modified -- empty but non-nil reply",
- InitialState: []runtime.Object{
- Subscription().EmptyNonNilReply(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().ReferencesResolved().PhysicalSubscriber(targetDNS).EmptyNonNilReply(),
- },
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": targetDNS,
- },
- },
- },
- },
- },
- }, {
- Name: "new subscription: adds status, target points to the legacy targetable interface",
- InitialState: []runtime.Object{
- Subscription().EmptyNonNilReply(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().ReferencesResolved().PhysicalSubscriber(targetDNS).EmptyNonNilReply(),
- },
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "domainInternal": targetDNS,
- },
- },
- },
- },
- }, {
- Name: "old subscription: updates status, removing the no longer present Subscriber",
- InitialState: []runtime.Object{
- // This will have no Subscriber in the spec, but will have one in the status.
- Subscription().NilSubscriber().ReferencesResolved().PhysicalSubscriber(targetDNS).Reply(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().NilSubscriber().ReferencesResolved().Reply(),
- },
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Reply channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": resultChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": sinkableDNS,
- },
- },
- },
- },
- },
- }, {
- Name: "old subscription: updates status, removing the no longer present reply",
- InitialState: []runtime.Object{
- // This will have no Reply in the spec, but will have one in the status.
- Subscription().NilReply().ReferencesResolved().PhysicalSubscriber(targetDNS).Reply(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().NilReply().ReferencesResolved().PhysicalSubscriber(targetDNS),
- },
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "domainInternal": targetDNS,
- },
- },
- },
- },
- }, {
- Name: "new subscription: adds status, all targets resolved, subscribers modified -- nil subscriber",
- InitialState: []runtime.Object{
- Subscription().NilSubscriber(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().NilSubscriber().ReferencesResolved().Reply(),
- },
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Reply channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": resultChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": sinkableDNS,
- },
- },
- },
- },
- },
- }, {
- Name: "new subscription: adds status, all targets resolved, subscribers modified -- empty but non-nil subscriber",
- InitialState: []runtime.Object{
- Subscription().EmptyNonNilSubscriber(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().EmptyNonNilSubscriber().ReferencesResolved().Reply(),
- },
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Reply channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": resultChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": sinkableDNS,
- },
- },
- },
- },
- },
- }, {
- Name: "new subscription to non-existent K8s Service: fails with no service found",
- InitialState: []runtime.Object{
- Subscription().ToK8sService(),
- },
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().ToK8sService().UnknownConditions(),
- },
- WantErrMsg: "services \"testk8sservice\" not found",
- WantEvent: []corev1.Event{
- events[subscriberResolveFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- },
- }, {
- Name: "new subscription to K8s Service: adds status, all targets resolved, subscribers modified",
- InitialState: []runtime.Object{
- Subscription().ToK8sService(),
- getK8sService(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantPresent: []runtime.Object{
- Subscription().ToK8sService().ReferencesResolved().PhysicalSubscriber(k8sServiceDNS).Reply(),
- },
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using K8s Service)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "v1",
- "kind": "Service",
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": k8sServiceName,
- },
- },
- },
- // Reply channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": resultChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": sinkableDNS,
- },
- },
- },
- },
- },
- }, {
- Name: "new subscription with from channel: adds status, all targets resolved, subscribers modified",
- InitialState: []runtime.Object{
- Subscription(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantPresent: []runtime.Object{
- Subscription().ReferencesResolved().PhysicalSubscriber(targetDNS).Reply(),
- },
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source with a reference to the From Channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": sourceKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": sourceName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": targetDNS,
- },
- },
- },
- },
- // Reply channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": resultChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": sinkableDNS,
- },
- },
- },
- },
- },
- },
- {
- Name: "sync multiple Subscriptions to one channel",
- InitialState: []runtime.Object{
- // The first two Subscriptions both have the same physical From, so we should see that
- // Channel updated with both Subscriptions.
- Subscription(),
- Subscription().Renamed().ReferencesResolved().PhysicalSubscriber(targetDNS).Reply(),
- // This subscription has a different physical From, so we should not see it in the same
- // Channel as the first two.
- Subscription().DifferentChannel(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantPresent: []runtime.Object{
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. The entire test is really to
- // verify the following, but can't be done because the call to Patch fails (it assumes
- // a Strategic Merge Patch, whereas we are doing a JSON Patch). so for now, comment it
- // out.
- // getChannelWithMultipleSubscriptions(),
- Subscription().ReferencesResolved().PhysicalSubscriber(targetDNS).Reply(),
- // Unaltered because this Subscription was not reconciled.
- Subscription().Renamed().ReferencesResolved().PhysicalSubscriber(targetDNS).Reply(),
- Subscription().DifferentChannel(),
- },
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source with a reference to the From Channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": sourceKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": sourceName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- },
- },
- // Subscriber (using knative route)
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "serving.knative.dev/v1alpha1",
- "kind": routeKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": routeName,
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": targetDNS,
- },
- },
- },
- },
- // Reply channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": resultChannelName,
- },
- "spec": map[string]interface{}{
- "subscribable": map[string]interface{}{},
- },
- "status": map[string]interface{}{
- "address": map[string]interface{}{
- "hostname": sinkableDNS,
- },
- },
- },
- },
- },
- },
- {
- Name: "delete subscription with from channel: subscribers modified",
- InitialState: []runtime.Object{
- Subscription().Deleted().ChannelReady(),
- },
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. Marking this as expecting a specific
- // failure for now, until upstream is fixed.
- WantResult: reconcile.Result{},
- WantErrMsg: `unable to find api field in struct Unstructured for the json field "spec"`,
- WantAbsent: []runtime.Object{
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. The entire test is really to
- // verify the following, but can't be done because the call to Patch fails (it assumes
- // a Strategic Merge Patch, whereas we are doing a JSON Patch). so for now, comment it
- // out.
- // getNewDeletedSubscriptionWithChannelReady(),
- },
- WantPresent: []runtime.Object{
- // TODO: JSON patch is not working on the fake, see
- // https://github.com/kubernetes/client-go/issues/478. The entire test is really to
- // verify the following, but can't be done because the call to Patch fails (it assumes
- // a Strategic Merge Patch, whereas we are doing a JSON Patch). so for now, comment it
- // out.
- // getChannelWithOtherSubscription(),
- },
- WantEvent: []corev1.Event{
- events[physicalChannelSyncFailed],
- },
- Objects: []runtime.Object{
- // Source channel
- &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": eventingv1alpha1.SchemeGroupVersion.String(),
- "kind": channelKind,
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": fromChannelName,
- },
- "spec": map[string]interface{}{
- "channelable": map[string]interface{}{
- "subscribers": []interface{}{
- map[string]interface{}{
- "subscriberURI": targetDNS,
- "replyURI": sinkableDNS,
- },
- map[string]interface{}{
- "replyURI": otherAddressableDNS,
- },
- },
- },
- },
- },
- },
- },
- },
- }
-
- for _, tc := range testCases {
- tc.Scheme = scheme.Scheme
- c := tc.GetClient()
- dc := tc.GetDynamicClient()
- recorder := tc.GetEventRecorder()
-
- r := &reconciler{
- client: c,
- dynamicClient: dc,
- restConfig: &rest.Config{},
- recorder: recorder,
- logger: zap.NewNop(),
- }
- tc.ReconcileKey = fmt.Sprintf("%s/%s", testNS, subscriptionName)
- tc.IgnoreTimes = true
- t.Run(tc.Name, tc.Runner(t, r, c, recorder))
- }
-}
-
-func TestFinalizers(t *testing.T) {
- testCases := []struct {
- name string
- original sets.String
- add bool
- want sets.String
- }{
- {
- name: "empty, add",
- original: sets.NewString(),
- add: true,
- want: sets.NewString(finalizerName),
- }, {
- name: "empty, delete",
- original: sets.NewString(),
- add: false,
- want: sets.NewString(),
- }, {
- name: "existing, delete",
- original: sets.NewString(finalizerName),
- add: false,
- want: sets.NewString(),
- }, {
- name: "existing, add",
- original: sets.NewString(finalizerName),
- add: true,
- want: sets.NewString(finalizerName),
- }, {
- name: "existing two, delete",
- original: sets.NewString(finalizerName, "someother"),
- add: false,
- want: sets.NewString("someother"),
- }, {
- name: "existing two, no change",
- original: sets.NewString(finalizerName, "someother"),
- add: true,
- want: sets.NewString(finalizerName, "someother"),
- },
- }
-
- for _, tc := range testCases {
- original := &eventingv1alpha1.Subscription{}
- original.Finalizers = tc.original.List()
- if tc.add {
- addFinalizer(original)
- } else {
- removeFinalizer(original)
- }
- has := sets.NewString(original.Finalizers...)
- diff := has.Difference(tc.want)
- if diff.Len() > 0 {
- t.Errorf("%q failed, diff: %+v", tc.name, diff)
- }
- }
-}
-
-func getNewFromChannel() *eventingv1alpha1.Channel {
- return getNewChannel(fromChannelName)
-}
-
-func getNewReplyChannel() *eventingv1alpha1.Channel {
- return getNewChannel(resultChannelName)
-}
-
-func getNewChannel(name string) *eventingv1alpha1.Channel {
- channel := &eventingv1alpha1.Channel{
- TypeMeta: channelType(),
- ObjectMeta: om("test", name),
- Spec: eventingv1alpha1.ChannelSpec{},
- }
- channel.ObjectMeta.OwnerReferences = append(channel.ObjectMeta.OwnerReferences, getOwnerReference(false))
-
- // selflink is not filled in when we create the object, so clear it
- channel.ObjectMeta.SelfLink = ""
- return channel
-}
-
-type SubscriptionBuilder struct {
- *eventingv1alpha1.Subscription
-}
-
-// Verify the Builder implements Buildable
-var _ controllertesting.Buildable = &SubscriptionBuilder{}
-
-func Subscription() *SubscriptionBuilder {
- subscription := &eventingv1alpha1.Subscription{
- TypeMeta: subscriptionType(),
- ObjectMeta: om(testNS, subscriptionName),
- Spec: eventingv1alpha1.SubscriptionSpec{
- Channel: corev1.ObjectReference{
- Name: fromChannelName,
- Kind: channelKind,
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- },
- Subscriber: &eventingv1alpha1.SubscriberSpec{
- Ref: &corev1.ObjectReference{
- Name: routeName,
- Kind: routeKind,
- APIVersion: "serving.knative.dev/v1alpha1",
- },
- },
- Reply: &eventingv1alpha1.ReplyStrategy{
- Channel: &corev1.ObjectReference{
- Name: resultChannelName,
- Kind: channelKind,
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- },
- },
- },
- }
- subscription.ObjectMeta.OwnerReferences = append(subscription.ObjectMeta.OwnerReferences, getOwnerReference(false))
-
- // selflink is not filled in when we create the object, so clear it
- subscription.ObjectMeta.SelfLink = ""
-
- return &SubscriptionBuilder{
- Subscription: subscription,
- }
-}
-
-func (s *SubscriptionBuilder) Build() runtime.Object {
- return s.Subscription
-}
-
-func (s *SubscriptionBuilder) EmptyNonNilReply() *SubscriptionBuilder {
- s.Spec.Reply = &eventingv1alpha1.ReplyStrategy{}
- return s
-}
-
-func (s *SubscriptionBuilder) NilReply() *SubscriptionBuilder {
- s.Spec.Reply = nil
- return s
-}
-
-func (s *SubscriptionBuilder) EmptyNonNilSubscriber() *SubscriptionBuilder {
- s.Spec.Subscriber = &eventingv1alpha1.SubscriberSpec{}
- return s
-}
-
-func (s *SubscriptionBuilder) NilSubscriber() *SubscriptionBuilder {
- s.Spec.Subscriber = nil
- return s
-}
-
-func (s *SubscriptionBuilder) FromSource() *SubscriptionBuilder {
- s.Spec.Channel = corev1.ObjectReference{
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: sourceKind,
- Name: sourceName,
- }
- return s
-}
-
-func (s *SubscriptionBuilder) ToK8sService() *SubscriptionBuilder {
- s.Spec.Subscriber = &eventingv1alpha1.SubscriberSpec{
- Ref: &corev1.ObjectReference{
- Name: k8sServiceName,
- Kind: "Service",
- APIVersion: "v1",
- },
- }
- return s
-}
-
-func (s *SubscriptionBuilder) UnknownConditions() *SubscriptionBuilder {
- s.Status.InitializeConditions()
- return s
-}
-
-func (s *SubscriptionBuilder) PhysicalSubscriber(dns string) *SubscriptionBuilder {
- s.Status.PhysicalSubscription.SubscriberURI = resolve.DomainToURL(dns)
- return s
-}
-
-func (s *SubscriptionBuilder) ReferencesResolved() *SubscriptionBuilder {
- s.UnknownConditions()
- s.Status.MarkReferencesResolved()
- return s
-}
-
-func (s *SubscriptionBuilder) Reply() *SubscriptionBuilder {
- s.Status.PhysicalSubscription.ReplyURI = resolve.DomainToURL(sinkableDNS)
- return s
-}
-
-func (s *SubscriptionBuilder) DifferentChannel() *SubscriptionBuilder {
- s.Name = "different-channel"
- s.UID = "different-channel-UID"
- s.Status.PhysicalSubscription.SubscriberURI = "some-other-domain"
- return s
-}
-
-func (s *SubscriptionBuilder) ChannelReady() *SubscriptionBuilder {
- s.ReferencesResolved()
- s.Status.MarkChannelReady()
- return s
-}
-
-func (s *SubscriptionBuilder) Deleted() *SubscriptionBuilder {
- s.ObjectMeta.DeletionTimestamp = &deletionTime
- return s
-}
-
-// Renamed renames the subscription. It is intended to be used in tests that create multiple
-// Subscriptions, so that there are no naming conflicts.
-func (s *SubscriptionBuilder) Renamed() *SubscriptionBuilder {
- s.Name = "renamed"
- s.UID = "renamed-UID"
- s.Status.PhysicalSubscription.SubscriberURI = ""
- s.Status.PhysicalSubscription.ReplyURI = otherAddressableDNS
- return s
-}
-
-func channelType() metav1.TypeMeta {
- return metav1.TypeMeta{
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: "Channel",
- }
-}
-
-func subscriptionType() metav1.TypeMeta {
- return metav1.TypeMeta{
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: "Subscription",
- }
-}
-
-func getK8sService() *corev1.Service {
- return &corev1.Service{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "v1",
- Kind: "Service",
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNS,
- Name: k8sServiceName,
- },
- }
-}
-
-func getChannelWithMultipleSubscriptions() *eventingv1alpha1.Channel {
- return &eventingv1alpha1.Channel{
- TypeMeta: metav1.TypeMeta{
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: channelKind,
- },
- ObjectMeta: om(testNS, fromChannelName),
- Spec: eventingv1alpha1.ChannelSpec{
- Subscribable: &eventingduck.Subscribable{
- Subscribers: []eventingduck.ChannelSubscriberSpec{
- {
- Ref: &corev1.ObjectReference{
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: subscriptionKind,
- Namespace: testNS,
- Name: subscriptionName,
- UID: "",
- },
- SubscriberURI: targetDNS,
- ReplyURI: sinkableDNS,
- },
- {
- Ref: &corev1.ObjectReference{
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: subscriptionKind,
- Namespace: testNS,
- Name: "renamed",
- UID: "renamed-UID",
- },
- ReplyURI: otherAddressableDNS,
- },
- },
- },
- },
- }
-}
-
-func getChannelWithOtherSubscription() *eventingv1alpha1.Channel {
- return &eventingv1alpha1.Channel{
- TypeMeta: metav1.TypeMeta{
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: channelKind,
- },
- ObjectMeta: om(testNS, fromChannelName),
- Spec: eventingv1alpha1.ChannelSpec{
- Subscribable: &eventingduck.Subscribable{
- Subscribers: []eventingduck.ChannelSubscriberSpec{
- {
- ReplyURI: otherAddressableDNS,
- },
- },
- },
- },
- }
-}
-
-func om(namespace, name string) metav1.ObjectMeta {
- return metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name),
- }
-}
-func feedObjectMeta(namespace, generateName string) metav1.ObjectMeta {
- return metav1.ObjectMeta{
- Namespace: namespace,
- GenerateName: generateName,
- OwnerReferences: []metav1.OwnerReference{
- getOwnerReference(true),
- },
- }
-}
-
-func getOwnerReference(blockOwnerDeletion bool) metav1.OwnerReference {
- return metav1.OwnerReference{
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: "Subscription",
- Name: subscriptionName,
- Controller: &trueVal,
- BlockOwnerDeletion: &blockOwnerDeletion,
- }
-}
diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE
index 697031fbfb7..8b36c02a3be 100644
--- a/third_party/VENDOR-LICENSE
+++ b/third_party/VENDOR-LICENSE
@@ -207,29 +207,445 @@ Import: github.com/knative/eventing/vendor/cloud.google.com/go
+===========================================================
+Import: github.com/knative/eventing/vendor/contrib.go.opencensus.io/exporter/stackdriver
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+
===========================================================
Import: github.com/knative/eventing/vendor/github.com/Shopify/sarama
-Copyright (c) 2013 Shopify
+Copyright (c) 2013 Shopify
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+
+===========================================================
+Import: github.com/knative/eventing/vendor/github.com/aws/aws-sdk-go
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
+ APPENDIX: How to apply the Apache License to your work.
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
@@ -287,6 +703,214 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+===========================================================
+Import: github.com/knative/eventing/vendor/github.com/census-instrumentation/opencensus-proto
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+
===========================================================
Import: github.com/knative/eventing/vendor/github.com/cloudevents/sdk-go
@@ -2755,6 +3379,25 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+===========================================================
+Import: github.com/knative/eventing/vendor/github.com/jmespath/go-jmespath
+
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+
+
===========================================================
Import: github.com/knative/eventing/vendor/github.com/joho/godotenv
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go
new file mode 100644
index 00000000000..e3338793b91
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/alert_policy_client.go
@@ -0,0 +1,278 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient.
+type AlertPolicyCallOptions struct {
+ ListAlertPolicies []gax.CallOption
+ GetAlertPolicy []gax.CallOption
+ CreateAlertPolicy []gax.CallOption
+ DeleteAlertPolicy []gax.CallOption
+ UpdateAlertPolicy []gax.CallOption
+}
+
+func defaultAlertPolicyClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &AlertPolicyCallOptions{
+ ListAlertPolicies: retry[[2]string{"default", "idempotent"}],
+ GetAlertPolicy: retry[[2]string{"default", "idempotent"}],
+ CreateAlertPolicy: retry[[2]string{"default", "non_idempotent"}],
+ DeleteAlertPolicy: retry[[2]string{"default", "idempotent"}],
+ UpdateAlertPolicy: retry[[2]string{"default", "non_idempotent"}],
+ }
+}
+
+// AlertPolicyClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type AlertPolicyClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ alertPolicyClient monitoringpb.AlertPolicyServiceClient
+
+ // The call options for this service.
+ CallOptions *AlertPolicyCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewAlertPolicyClient creates a new alert policy service client.
+//
+// The AlertPolicyService API is used to manage (list, create, delete,
+// edit) alert policies in Stackdriver Monitoring. An alerting policy is
+// a description of the conditions under which some aspect of your
+// system is considered to be "unhealthy" and the ways to notify
+// people or services about this state. In addition to using this API, alert
+// policies can also be managed through
+// Stackdriver Monitoring (at https://cloud.google.com/monitoring/docs/),
+// which can be reached by clicking the "Monitoring" tab in
+// Cloud Console (at https://console.cloud.google.com/).
+func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultAlertPolicyClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &AlertPolicyClient{
+ conn: conn,
+ CallOptions: defaultAlertPolicyCallOptions(),
+
+ alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *AlertPolicyClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *AlertPolicyClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListAlertPolicies lists the existing alerting policies for the project.
+func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListAlertPolicies[0:len(c.CallOptions.ListAlertPolicies):len(c.CallOptions.ListAlertPolicies)], opts...)
+ it := &AlertPolicyIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) {
+ var resp *monitoringpb.ListAlertPoliciesResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.AlertPolicies, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetAlertPolicy gets a single alerting policy.
+func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetAlertPolicy[0:len(c.CallOptions.GetAlertPolicy):len(c.CallOptions.GetAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateAlertPolicy creates a new alerting policy.
+func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CreateAlertPolicy[0:len(c.CallOptions.CreateAlertPolicy):len(c.CallOptions.CreateAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteAlertPolicy deletes an alerting policy.
+func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.DeleteAlertPolicy[0:len(c.CallOptions.DeleteAlertPolicy):len(c.CallOptions.DeleteAlertPolicy)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with
+// a new one or replace only certain fields in the current alerting policy by
+// specifying the fields to be updated via updateMask. Returns the
+// updated alerting policy.
+func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.UpdateAlertPolicy[0:len(c.CallOptions.UpdateAlertPolicy):len(c.CallOptions.UpdateAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy.
+type AlertPolicyIterator struct {
+ items []*monitoringpb.AlertPolicy
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) {
+ var item *monitoringpb.AlertPolicy
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *AlertPolicyIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *AlertPolicyIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go
new file mode 100644
index 00000000000..eb9ef7f5325
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go
@@ -0,0 +1,94 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+// Package monitoring is an auto-generated package for the
+// Stackdriver Monitoring API.
+//
+// NOTE: This package is in alpha. It is not stable, and is likely to change.
+//
+// Manages your Stackdriver Monitoring data and configurations. Most projects
+// must be associated with a Stackdriver account, with a few exceptions as
+// noted on the individual method pages.
+package monitoring // import "cloud.google.com/go/monitoring/apiv3"
+
+import (
+ "context"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+ out, _ := metadata.FromOutgoingContext(ctx)
+ out = out.Copy()
+ for _, md := range mds {
+ for k, v := range md {
+ out[k] = append(out[k], v...)
+ }
+ }
+ return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/monitoring",
+ "https://www.googleapis.com/auth/monitoring.read",
+ "https://www.googleapis.com/auth/monitoring.write",
+ }
+}
+
+// versionGo returns the Go runtime version. The returned string
+// has no whitespace, suitable for reporting in header.
+func versionGo() string {
+ const develPrefix = "devel +"
+
+ s := runtime.Version()
+ if strings.HasPrefix(s, develPrefix) {
+ s = s[len(develPrefix):]
+ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+ return s
+ }
+
+ notSemverRune := func(r rune) bool {
+ return strings.IndexRune("0123456789.", r) < 0
+ }
+
+ if strings.HasPrefix(s, "go1") {
+ s = s[2:]
+ var prerelease string
+ if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+ s, prerelease = s[:p], s[p:]
+ }
+ if strings.HasSuffix(s, ".") {
+ s += "0"
+ } else if strings.Count(s, ".") < 2 {
+ s += ".0"
+ }
+ if prerelease != "" {
+ s += "-" + prerelease
+ }
+ return s
+ }
+ return "UNKNOWN"
+}
+
+const versionClient = "20181129"
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go
new file mode 100644
index 00000000000..2e37d23ad84
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go
@@ -0,0 +1,361 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// GroupCallOptions contains the retry settings for each method of GroupClient.
+type GroupCallOptions struct {
+ ListGroups []gax.CallOption
+ GetGroup []gax.CallOption
+ CreateGroup []gax.CallOption
+ UpdateGroup []gax.CallOption
+ DeleteGroup []gax.CallOption
+ ListGroupMembers []gax.CallOption
+}
+
+func defaultGroupClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultGroupCallOptions() *GroupCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &GroupCallOptions{
+ ListGroups: retry[[2]string{"default", "idempotent"}],
+ GetGroup: retry[[2]string{"default", "idempotent"}],
+ CreateGroup: retry[[2]string{"default", "non_idempotent"}],
+ UpdateGroup: retry[[2]string{"default", "idempotent"}],
+ DeleteGroup: retry[[2]string{"default", "idempotent"}],
+ ListGroupMembers: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// GroupClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type GroupClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ groupClient monitoringpb.GroupServiceClient
+
+ // The call options for this service.
+ CallOptions *GroupCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewGroupClient creates a new group service client.
+//
+// The Group API lets you inspect and manage your
+// groups (at #google.monitoring.v3.Group).
+//
+// A group is a named filter that is used to identify
+// a collection of monitored resources. Groups are typically used to
+// mirror the physical and/or logical topology of the environment.
+// Because group membership is computed dynamically, monitored
+// resources that are started in the future are automatically placed
+// in matching groups. By using a group to name monitored resources in,
+// for example, an alert policy, the target of that alert policy is
+// updated automatically as monitored resources are added and removed
+// from the infrastructure.
+func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultGroupClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &GroupClient{
+ conn: conn,
+ CallOptions: defaultGroupCallOptions(),
+
+ groupClient: monitoringpb.NewGroupServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *GroupClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *GroupClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *GroupClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListGroups lists the existing groups.
+func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...)
+ it := &GroupIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListGroupsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) {
+ var resp *monitoringpb.ListGroupsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Group, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetGroup gets a single group.
+func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateGroup creates a new group.
+func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateGroup updates an existing group.
+// You can change any group attributes except name.
+func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteGroup deletes an existing group.
+func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// ListGroupMembers lists the monitored resources that are members of a group.
+func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...)
+ it := &MonitoredResourceIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) {
+ var resp *monitoringpb.ListGroupMembersResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Members, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GroupIterator manages a stream of *monitoringpb.Group.
+type GroupIterator struct {
+ items []*monitoringpb.Group
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *GroupIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *GroupIterator) Next() (*monitoringpb.Group, error) {
+ var item *monitoringpb.Group
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *GroupIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *GroupIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource.
+type MonitoredResourceIterator struct {
+ items []*monitoredrespb.MonitoredResource
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) {
+ var item *monitoredrespb.MonitoredResource
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MonitoredResourceIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MonitoredResourceIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go
new file mode 100644
index 00000000000..029e073f789
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go
@@ -0,0 +1,452 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// MetricCallOptions contains the retry settings for each method of MetricClient.
+type MetricCallOptions struct {
+ ListMonitoredResourceDescriptors []gax.CallOption
+ GetMonitoredResourceDescriptor []gax.CallOption
+ ListMetricDescriptors []gax.CallOption
+ GetMetricDescriptor []gax.CallOption
+ CreateMetricDescriptor []gax.CallOption
+ DeleteMetricDescriptor []gax.CallOption
+ ListTimeSeries []gax.CallOption
+ CreateTimeSeries []gax.CallOption
+}
+
+func defaultMetricClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultMetricCallOptions() *MetricCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &MetricCallOptions{
+ ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}],
+ GetMonitoredResourceDescriptor: retry[[2]string{"default", "idempotent"}],
+ ListMetricDescriptors: retry[[2]string{"default", "idempotent"}],
+ GetMetricDescriptor: retry[[2]string{"default", "idempotent"}],
+ CreateMetricDescriptor: retry[[2]string{"default", "non_idempotent"}],
+ DeleteMetricDescriptor: retry[[2]string{"default", "idempotent"}],
+ ListTimeSeries: retry[[2]string{"default", "idempotent"}],
+ CreateTimeSeries: retry[[2]string{"default", "non_idempotent"}],
+ }
+}
+
+// MetricClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type MetricClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ metricClient monitoringpb.MetricServiceClient
+
+ // The call options for this service.
+ CallOptions *MetricCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewMetricClient creates a new metric service client.
+//
+// Manages metric descriptors, monitored resource descriptors, and
+// time series data.
+func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultMetricClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &MetricClient{
+ conn: conn,
+ CallOptions: defaultMetricCallOptions(),
+
+ metricClient: monitoringpb.NewMetricServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *MetricClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *MetricClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *MetricClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
+func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...)
+ it := &MonitoredResourceDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
+ var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.ResourceDescriptors, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account.
+func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...)
+ var resp *monitoredrespb.MonitoredResourceDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account.
+func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...)
+ it := &MetricDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) {
+ var resp *monitoringpb.ListMetricDescriptorsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.MetricDescriptors, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account.
+func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...)
+ var resp *metricpb.MetricDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateMetricDescriptor creates a new metric descriptor.
+// User-created metric descriptors define
+// custom metrics (at /monitoring/custom-metrics).
+func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...)
+ var resp *metricpb.MetricDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteMetricDescriptor deletes a metric descriptor. Only user-created
+// custom metrics (at /monitoring/custom-metrics) can be deleted.
+func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account.
+func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...)
+ it := &TimeSeriesIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) {
+ var resp *monitoringpb.ListTimeSeriesResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.TimeSeries, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// CreateTimeSeries creates or adds data to one or more time series.
+// The response is empty if all time series in the request were written.
+// If any time series could not be written, a corresponding failure message is
+// included in the error response.
+func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor.
+type MetricDescriptorIterator struct {
+ items []*metricpb.MetricDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) {
+ var item *metricpb.MetricDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MetricDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MetricDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor.
+type MonitoredResourceDescriptorIterator struct {
+ items []*monitoredrespb.MonitoredResourceDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ var item *monitoredrespb.MonitoredResourceDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MonitoredResourceDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries.
+type TimeSeriesIterator struct {
+ items []*monitoringpb.TimeSeries
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) {
+ var item *monitoringpb.TimeSeries
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *TimeSeriesIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *TimeSeriesIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go
new file mode 100644
index 00000000000..eb82cc7bc69
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/notification_channel_client.go
@@ -0,0 +1,375 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient.
+type NotificationChannelCallOptions struct {
+ ListNotificationChannelDescriptors []gax.CallOption
+ GetNotificationChannelDescriptor []gax.CallOption
+ ListNotificationChannels []gax.CallOption
+ GetNotificationChannel []gax.CallOption
+ CreateNotificationChannel []gax.CallOption
+ UpdateNotificationChannel []gax.CallOption
+ DeleteNotificationChannel []gax.CallOption
+}
+
+func defaultNotificationChannelClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &NotificationChannelCallOptions{
+ ListNotificationChannelDescriptors: retry[[2]string{"default", "idempotent"}],
+ GetNotificationChannelDescriptor: retry[[2]string{"default", "idempotent"}],
+ ListNotificationChannels: retry[[2]string{"default", "idempotent"}],
+ GetNotificationChannel: retry[[2]string{"default", "idempotent"}],
+ CreateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
+ UpdateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
+ DeleteNotificationChannel: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// NotificationChannelClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type NotificationChannelClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ notificationChannelClient monitoringpb.NotificationChannelServiceClient
+
+ // The call options for this service.
+ CallOptions *NotificationChannelCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewNotificationChannelClient creates a new notification channel service client.
+//
+// The Notification Channel API provides access to configuration that
+// controls how messages related to incidents are sent.
+func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultNotificationChannelClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &NotificationChannelClient{
+ conn: conn,
+ CallOptions: defaultNotificationChannelCallOptions(),
+
+ notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *NotificationChannelClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *NotificationChannelClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors
+// makes it possible for new channel types to be dynamically added.
+func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListNotificationChannelDescriptors[0:len(c.CallOptions.ListNotificationChannelDescriptors):len(c.CallOptions.ListNotificationChannelDescriptors)], opts...)
+ it := &NotificationChannelDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) {
+ var resp *monitoringpb.ListNotificationChannelDescriptorsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.ChannelDescriptors, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields
+// are expected / permitted for a notification channel of the given type.
+func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetNotificationChannelDescriptor[0:len(c.CallOptions.GetNotificationChannelDescriptor):len(c.CallOptions.GetNotificationChannelDescriptor)], opts...)
+ var resp *monitoringpb.NotificationChannelDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListNotificationChannels lists the notification channels that have been created for the project.
+func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListNotificationChannels[0:len(c.CallOptions.ListNotificationChannels):len(c.CallOptions.ListNotificationChannels)], opts...)
+ it := &NotificationChannelIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) {
+ var resp *monitoringpb.ListNotificationChannelsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.NotificationChannels, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetNotificationChannel gets a single notification channel. The channel includes the relevant
+// configuration details with which the channel was created. However, the
+// response may truncate or omit passwords, API keys, or other private key
+// matter and thus the response may not be 100% identical to the information
+// that was supplied in the call to the create method.
+func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetNotificationChannel[0:len(c.CallOptions.GetNotificationChannel):len(c.CallOptions.GetNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateNotificationChannel creates a new notification channel, representing a single notification
+// endpoint such as an email address, SMS number, or PagerDuty service.
+func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CreateNotificationChannel[0:len(c.CallOptions.CreateNotificationChannel):len(c.CallOptions.CreateNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask
+// remain unchanged.
+func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.UpdateNotificationChannel[0:len(c.CallOptions.UpdateNotificationChannel):len(c.CallOptions.UpdateNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteNotificationChannel deletes a notification channel.
+func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.DeleteNotificationChannel[0:len(c.CallOptions.DeleteNotificationChannel):len(c.CallOptions.DeleteNotificationChannel)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor.
+type NotificationChannelDescriptorIterator struct {
+ items []*monitoringpb.NotificationChannelDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) {
+ var item *monitoringpb.NotificationChannelDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *NotificationChannelDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel.
+type NotificationChannelIterator struct {
+ items []*monitoringpb.NotificationChannel
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) {
+ var item *monitoringpb.NotificationChannel
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *NotificationChannelIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *NotificationChannelIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go
new file mode 100644
index 00000000000..b2b514ba528
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/path_funcs.go
@@ -0,0 +1,107 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package monitoring
+
+// GroupProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func GroupProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// GroupGroupPath returns the path for the group resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/groups/%s", project, group)
+// instead.
+func GroupGroupPath(project, group string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/groups/" +
+ group +
+ ""
+}
+
+// MetricProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func MetricProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// MetricMetricDescriptorPath returns the path for the metric descriptor resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/metricDescriptors/%s", project, metricDescriptor)
+// instead.
+func MetricMetricDescriptorPath(project, metricDescriptor string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/metricDescriptors/" +
+ metricDescriptor +
+ ""
+}
+
+// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/monitoredResourceDescriptors/%s", project, monitoredResourceDescriptor)
+// instead.
+func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/monitoredResourceDescriptors/" +
+ monitoredResourceDescriptor +
+ ""
+}
+
+// UptimeCheckProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func UptimeCheckProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// UptimeCheckUptimeCheckConfigPath returns the path for the uptime check config resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/uptimeCheckConfigs/%s", project, uptimeCheckConfig)
+// instead.
+func UptimeCheckUptimeCheckConfigPath(project, uptimeCheckConfig string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/uptimeCheckConfigs/" +
+ uptimeCheckConfig +
+ ""
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go
new file mode 100644
index 00000000000..07a7ab8134e
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/uptime_check_client.go
@@ -0,0 +1,361 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "math"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient.
+type UptimeCheckCallOptions struct {
+ ListUptimeCheckConfigs []gax.CallOption
+ GetUptimeCheckConfig []gax.CallOption
+ CreateUptimeCheckConfig []gax.CallOption
+ UpdateUptimeCheckConfig []gax.CallOption
+ DeleteUptimeCheckConfig []gax.CallOption
+ ListUptimeCheckIps []gax.CallOption
+}
+
+func defaultUptimeCheckClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("monitoring.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &UptimeCheckCallOptions{
+ ListUptimeCheckConfigs: retry[[2]string{"default", "idempotent"}],
+ GetUptimeCheckConfig: retry[[2]string{"default", "idempotent"}],
+ CreateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}],
+ UpdateUptimeCheckConfig: retry[[2]string{"default", "non_idempotent"}],
+ DeleteUptimeCheckConfig: retry[[2]string{"default", "idempotent"}],
+ ListUptimeCheckIps: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// UptimeCheckClient is a client for interacting with Stackdriver Monitoring API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type UptimeCheckClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ uptimeCheckClient monitoringpb.UptimeCheckServiceClient
+
+ // The call options for this service.
+ CallOptions *UptimeCheckCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewUptimeCheckClient creates a new uptime check service client.
+//
+// The UptimeCheckService API is used to manage (list, create, delete, edit)
+// uptime check configurations in the Stackdriver Monitoring product. An uptime
+// check is a piece of configuration that determines which resources and
+// services to monitor for availability. These configurations can also be
+// configured interactively by navigating to the [Cloud Console]
+// (http://console.cloud.google.com), selecting the appropriate project,
+// clicking on "Monitoring" on the left-hand side to navigate to Stackdriver,
+// and then clicking on "Uptime".
+func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultUptimeCheckClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &UptimeCheckClient{
+ conn: conn,
+ CallOptions: defaultUptimeCheckCallOptions(),
+
+ uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *UptimeCheckClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *UptimeCheckClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListUptimeCheckConfigs lists the existing valid uptime check configurations for the project,
+// leaving out any invalid configurations.
+func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListUptimeCheckConfigs[0:len(c.CallOptions.ListUptimeCheckConfigs):len(c.CallOptions.ListUptimeCheckConfigs)], opts...)
+ it := &UptimeCheckConfigIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) {
+ var resp *monitoringpb.ListUptimeCheckConfigsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.ListUptimeCheckConfigs(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.UptimeCheckConfigs, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// GetUptimeCheckConfig gets a single uptime check configuration.
+func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetUptimeCheckConfig[0:len(c.CallOptions.GetUptimeCheckConfig):len(c.CallOptions.GetUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.GetUptimeCheckConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateUptimeCheckConfig creates a new uptime check configuration.
+func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CreateUptimeCheckConfig[0:len(c.CallOptions.CreateUptimeCheckConfig):len(c.CallOptions.CreateUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.CreateUptimeCheckConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateUptimeCheckConfig updates an uptime check configuration. You can either replace the entire
+// configuration with a new one or replace only certain fields in the current
+// configuration by specifying the fields to be updated via "updateMask".
+// Returns the updated configuration.
+func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.UpdateUptimeCheckConfig[0:len(c.CallOptions.UpdateUptimeCheckConfig):len(c.CallOptions.UpdateUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.UpdateUptimeCheckConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteUptimeCheckConfig deletes an uptime check configuration. Note that this method will fail
+// if the uptime check configuration is referenced by an alert policy or
+// other dependent configs that would be rendered invalid by the deletion.
+func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.DeleteUptimeCheckConfig[0:len(c.CallOptions.DeleteUptimeCheckConfig):len(c.CallOptions.DeleteUptimeCheckConfig)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.uptimeCheckClient.DeleteUptimeCheckConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// ListUptimeCheckIps returns the list of IPs that checkers run from
+func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListUptimeCheckIps[0:len(c.CallOptions.ListUptimeCheckIps):len(c.CallOptions.ListUptimeCheckIps)], opts...)
+ it := &UptimeCheckIpIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) {
+ var resp *monitoringpb.ListUptimeCheckIpsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.uptimeCheckClient.ListUptimeCheckIps(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.UptimeCheckIps, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ return it
+}
+
+// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig.
+type UptimeCheckConfigIterator struct {
+ items []*monitoringpb.UptimeCheckConfig
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) {
+ var item *monitoringpb.UptimeCheckConfig
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *UptimeCheckConfigIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *UptimeCheckConfigIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp.
+type UptimeCheckIpIterator struct {
+ items []*monitoringpb.UptimeCheckIp
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) {
+ var item *monitoringpb.UptimeCheckIp
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *UptimeCheckIpIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *UptimeCheckIpIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/trace/apiv2/doc.go b/vendor/cloud.google.com/go/trace/apiv2/doc.go
new file mode 100644
index 00000000000..553d9de74fe
--- /dev/null
+++ b/vendor/cloud.google.com/go/trace/apiv2/doc.go
@@ -0,0 +1,94 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+// Package trace is an auto-generated package for the
+// Stackdriver Trace API.
+//
+// NOTE: This package is in alpha. It is not stable, and is likely to change.
+//
+// Sends application trace data to Stackdriver Trace for viewing. Trace data
+// is
+// collected for all App Engine applications by default. Trace data from
+// other
+// applications can be provided using this API.
+package trace // import "cloud.google.com/go/trace/apiv2"
+
+import (
+ "context"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+ out, _ := metadata.FromOutgoingContext(ctx)
+ out = out.Copy()
+ for _, md := range mds {
+ for k, v := range md {
+ out[k] = append(out[k], v...)
+ }
+ }
+ return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/trace.append",
+ }
+}
+
+// versionGo returns the Go runtime version. The returned string
+// has no whitespace, suitable for reporting in header.
+func versionGo() string {
+ const develPrefix = "devel +"
+
+ s := runtime.Version()
+ if strings.HasPrefix(s, develPrefix) {
+ s = s[len(develPrefix):]
+ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+ return s
+ }
+
+ notSemverRune := func(r rune) bool {
+ return strings.IndexRune("0123456789.", r) < 0
+ }
+
+ if strings.HasPrefix(s, "go1") {
+ s = s[2:]
+ var prerelease string
+ if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+ s, prerelease = s[:p], s[p:]
+ }
+ if strings.HasSuffix(s, ".") {
+ s += "0"
+ } else if strings.Count(s, ".") < 2 {
+ s += ".0"
+ }
+ if prerelease != "" {
+ s += "-" + prerelease
+ }
+ return s
+ }
+ return "UNKNOWN"
+}
+
+const versionClient = "20181129"
diff --git a/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go
new file mode 100644
index 00000000000..80b8d40b58e
--- /dev/null
+++ b/vendor/cloud.google.com/go/trace/apiv2/path_funcs.go
@@ -0,0 +1,43 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+// ProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func ProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// SpanPath returns the path for the span resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/traces/%s/spans/%s", project, trace, span)
+// instead.
+func SpanPath(project, trace, span string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/traces/" +
+ trace +
+ "/spans/" +
+ span +
+ ""
+}
diff --git a/vendor/cloud.google.com/go/trace/apiv2/trace_client.go b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go
new file mode 100644
index 00000000000..1581d672176
--- /dev/null
+++ b/vendor/cloud.google.com/go/trace/apiv2/trace_client.go
@@ -0,0 +1,152 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package trace
+
+import (
+ "context"
+ "time"
+
+ gax "github.com/googleapis/gax-go"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// CallOptions contains the retry settings for each method of Client.
+type CallOptions struct {
+ BatchWriteSpans []gax.CallOption
+ CreateSpan []gax.CallOption
+}
+
+func defaultClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("cloudtrace.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultCallOptions() *CallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 1000 * time.Millisecond,
+ Multiplier: 1.2,
+ })
+ }),
+ },
+ }
+ return &CallOptions{
+ BatchWriteSpans: retry[[2]string{"default", "non_idempotent"}],
+ CreateSpan: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// Client is a client for interacting with Stackdriver Trace API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type Client struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ client cloudtracepb.TraceServiceClient
+
+ // The call options for this service.
+ CallOptions *CallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewClient creates a new trace service client.
+//
+// This file describes an API for collecting and viewing traces and spans
+// within a trace. A Trace is a collection of spans corresponding to a single
+// operation or set of operations for an application. A span is an individual
+// timed event which forms a node of the trace tree. A single trace may
+// contain span(s) from multiple services.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &Client{
+ conn: conn,
+ CallOptions: defaultCallOptions(),
+
+ client: cloudtracepb.NewTraceServiceClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *Client) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *Client) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *Client) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// BatchWriteSpans sends new spans to new or existing traces. You cannot update
+// existing spans.
+func (c *Client) BatchWriteSpans(ctx context.Context, req *cloudtracepb.BatchWriteSpansRequest, opts ...gax.CallOption) error {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.BatchWriteSpans[0:len(c.CallOptions.BatchWriteSpans):len(c.CallOptions.BatchWriteSpans)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.client.BatchWriteSpans(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// CreateSpan creates a new span.
+func (c *Client) CreateSpan(ctx context.Context, req *cloudtracepb.Span, opts ...gax.CallOption) (*cloudtracepb.Span, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CreateSpan[0:len(c.CallOptions.CreateSpan):len(c.CallOptions.CreateSpan)], opts...)
+ var resp *cloudtracepb.Span
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.client.CreateSpan(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS
new file mode 100644
index 00000000000..e491a9e7f78
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
diff --git a/vendor/github.com/knative/build/LICENSE b/vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE
similarity index 100%
rename from vendor/github.com/knative/build/LICENSE
rename to vendor/contrib.go.opencensus.io/exporter/stackdriver/LICENSE
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go
new file mode 100644
index 00000000000..88835cc0fc3
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/label.go
@@ -0,0 +1,33 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+// Labels represents a set of Stackdriver Monitoring labels.
+type Labels struct {
+ m map[string]labelValue
+}
+
+type labelValue struct {
+ val, desc string
+}
+
+// Set stores a label with the given key, value and description,
+// overwriting any previous values with the given key.
+func (labels *Labels) Set(key, value, description string) {
+ if labels.m == nil {
+ labels.m = make(map[string]labelValue)
+ }
+ labels.m[key] = labelValue{value, description}
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go
new file mode 100644
index 00000000000..9b3b7bf19ec
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go
@@ -0,0 +1,547 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+/*
+The code in this file is responsible for converting OpenCensus Proto metrics
+directly to Stackdriver Metrics.
+*/
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path"
+
+ "github.com/golang/protobuf/ptypes/timestamp"
+ "go.opencensus.io/stats"
+ "go.opencensus.io/trace"
+
+ "cloud.google.com/go/monitoring/apiv3"
+ distributionpb "google.golang.org/genproto/googleapis/api/distribution"
+ labelpb "google.golang.org/genproto/googleapis/api/label"
+ googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+
+ commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
+ metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
+ resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+)
+
+var errNilMetric = errors.New("expecting a non-nil metric")
+
+type metricPayload struct {
+ node *commonpb.Node
+ resource *resourcepb.Resource
+ metric *metricspb.Metric
+}
+
+// ExportMetric exports OpenCensus Metrics to Stackdriver Monitoring.
+func (se *statsExporter) ExportMetric(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) error {
+ if metric == nil {
+ return errNilMetric
+ }
+
+ payload := &metricPayload{
+ metric: metric,
+ resource: rsc,
+ node: node,
+ }
+ se.protoMetricsBundler.Add(payload, 1)
+
+ return nil
+}
+
+func (se *statsExporter) handleMetricsUpload(payloads []*metricPayload) error {
+ ctx, cancel := se.o.newContextWithTimeout()
+ defer cancel()
+
+ ctx, span := trace.StartSpan(
+ ctx,
+ "contrib.go.opencensus.io/exporter/stackdriver.uploadMetrics",
+ trace.WithSampler(trace.NeverSample()),
+ )
+ defer span.End()
+
+ for _, payload := range payloads {
+ // Now create the metric descriptor remotely.
+ if err := se.createMetricDescriptor(ctx, payload.metric); err != nil {
+ span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
+ return err
+ }
+ }
+
+ var allTimeSeries []*monitoringpb.TimeSeries
+ for _, payload := range payloads {
+ tsl, err := se.protoMetricToTimeSeries(ctx, payload.node, payload.resource, payload.metric)
+ if err != nil {
+ span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
+ return err
+ }
+ allTimeSeries = append(allTimeSeries, tsl...)
+ }
+
+ // Now batch timeseries up and then export.
+ for start, end := 0, 0; start < len(allTimeSeries); start = end {
+ end = start + maxTimeSeriesPerUpload
+ if end > len(allTimeSeries) {
+ end = len(allTimeSeries)
+ }
+ batch := allTimeSeries[start:end]
+ ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(batch)
+ for _, ctsreq := range ctsreql {
+ if err := createTimeSeries(ctx, se.c, ctsreq); err != nil {
+ span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
+ // TODO(@odeke-em): Don't fail fast here, perhaps batch errors?
+ // return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (se *statsExporter) combineTimeSeriesToCreateTimeSeriesRequest(ts []*monitoringpb.TimeSeries) (ctsreql []*monitoringpb.CreateTimeSeriesRequest) {
+ if len(ts) == 0 {
+ return nil
+ }
+
+ // Since there are scenarios in which Metrics with the same Type
+ // can be bunched in the same TimeSeries, we have to ensure that
+ // we create a unique CreateTimeSeriesRequest with entirely unique Metrics
+ // per TimeSeries, lest we'll encounter:
+ //
+ // err: rpc error: code = InvalidArgument desc = One or more TimeSeries could not be written:
+ // Field timeSeries[2] had an invalid value: Duplicate TimeSeries encountered.
+ // Only one point can be written per TimeSeries per request.: timeSeries[2]
+ //
+ // This scenario happens when we are using the OpenCensus Agent in which multiple metrics
+ // are streamed by various client applications.
+ // See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/73
+ uniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts))
+ nonUniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts))
+ seenMetrics := make(map[string]struct{})
+
+ for _, tti := range ts {
+ signature := tti.Metric.GetType()
+ if _, alreadySeen := seenMetrics[signature]; !alreadySeen {
+ uniqueTimeSeries = append(uniqueTimeSeries, tti)
+ seenMetrics[signature] = struct{}{}
+ } else {
+ nonUniqueTimeSeries = append(nonUniqueTimeSeries, tti)
+ }
+ }
+
+ // UniqueTimeSeries can be bunched up together
+ // While for each nonUniqueTimeSeries, we have
+ // to make a unique CreateTimeSeriesRequest.
+ ctsreql = append(ctsreql, &monitoringpb.CreateTimeSeriesRequest{
+ Name: monitoring.MetricProjectPath(se.o.ProjectID),
+ TimeSeries: uniqueTimeSeries,
+ })
+
+ // Now recursively also combine the non-unique TimeSeries
+ // that were singly added to nonUniqueTimeSeries.
+ // The reason is that we need optimal combinations
+ // for optimal combinations because:
+ // * "a/b/c"
+ // * "a/b/c"
+ // * "x/y/z"
+ // * "a/b/c"
+ // * "x/y/z"
+ // * "p/y/z"
+ // * "d/y/z"
+ //
+ // should produce:
+ // CreateTimeSeries(uniqueTimeSeries) :: ["a/b/c", "x/y/z", "p/y/z", "d/y/z"]
+ // CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c"]
+ // CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c", "x/y/z"]
+ nonUniqueRequests := se.combineTimeSeriesToCreateTimeSeriesRequest(nonUniqueTimeSeries)
+ ctsreql = append(ctsreql, nonUniqueRequests...)
+
+ return ctsreql
+}
+
+// protoMetricToTimeSeries converts a metric into a Stackdriver Monitoring v3 API CreateTimeSeriesRequest
+// but it doesn't invoke any remote API.
+func (se *statsExporter) protoMetricToTimeSeries(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) ([]*monitoringpb.TimeSeries, error) {
+ if metric == nil {
+ return nil, errNilMetric
+ }
+
+ var resource = rsc
+ if metric.Resource != nil {
+ resource = metric.Resource
+ }
+
+ metricName, _, _, _ := metricProseFromProto(metric)
+ metricType, _ := se.metricTypeFromProto(metricName)
+ metricLabelKeys := metric.GetMetricDescriptor().GetLabelKeys()
+ metricKind, _ := protoMetricDescriptorTypeToMetricKind(metric)
+
+ timeSeries := make([]*monitoringpb.TimeSeries, 0, len(metric.Timeseries))
+ for _, protoTimeSeries := range metric.Timeseries {
+ sdPoints, err := se.protoTimeSeriesToMonitoringPoints(protoTimeSeries, metricKind)
+ if err != nil {
+ return nil, err
+ }
+
+ // Each TimeSeries has labelValues which MUST be correlated
+ // with that from the MetricDescriptor
+ labels, err := labelsPerTimeSeries(se.defaultLabels, metricLabelKeys, protoTimeSeries.GetLabelValues())
+ if err != nil {
+ // TODO: (@odeke-em) perhaps log this error from labels extraction, if non-nil.
+ continue
+ }
+ timeSeries = append(timeSeries, &monitoringpb.TimeSeries{
+ Metric: &googlemetricpb.Metric{
+ Type: metricType,
+ Labels: labels,
+ },
+ Resource: protoResourceToMonitoredResource(resource),
+ Points: sdPoints,
+ })
+ }
+
+ return timeSeries, nil
+}
+
+func labelsPerTimeSeries(defaults map[string]labelValue, labelKeys []*metricspb.LabelKey, labelValues []*metricspb.LabelValue) (map[string]string, error) {
+ labels := make(map[string]string)
+ // Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched.
+ for key, label := range defaults {
+ labels[sanitize(key)] = label.val
+ }
+
+ // Perform this sanity check now.
+ if len(labelKeys) != len(labelValues) {
+ return labels, fmt.Errorf("Length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues))
+ }
+
+ for i, labelKey := range labelKeys {
+ labelValue := labelValues[i]
+ labels[sanitize(labelKey.GetKey())] = labelValue.GetValue()
+ }
+
+ return labels, nil
+}
+
+func (se *statsExporter) protoMetricDescriptorToCreateMetricDescriptorRequest(ctx context.Context, metric *metricspb.Metric) (*monitoringpb.CreateMetricDescriptorRequest, error) {
+ // Otherwise, we encountered a cache-miss and
+ // should create the metric descriptor remotely.
+ inMD, err := se.protoToMonitoringMetricDescriptor(metric)
+ if err != nil {
+ return nil, err
+ }
+
+ cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s", se.o.ProjectID),
+ MetricDescriptor: inMD,
+ }
+
+ return cmrdesc, nil
+}
+
+// createMetricDescriptor creates a metric descriptor from the OpenCensus proto metric
+// and then creates it remotely using Stackdriver's API.
+func (se *statsExporter) createMetricDescriptor(ctx context.Context, metric *metricspb.Metric) error {
+ se.protoMu.Lock()
+ defer se.protoMu.Unlock()
+
+ name := metric.GetMetricDescriptor().GetName()
+ if _, created := se.protoMetricDescriptors[name]; created {
+ return nil
+ }
+
+ // Otherwise, we encountered a cache-miss and
+ // should create the metric descriptor remotely.
+ inMD, err := se.protoToMonitoringMetricDescriptor(metric)
+ if err != nil {
+ return err
+ }
+
+ var md *googlemetricpb.MetricDescriptor
+ if builtinMetric(inMD.Type) {
+ gmrdesc := &monitoringpb.GetMetricDescriptorRequest{
+ Name: inMD.Name,
+ }
+ md, err = getMetricDescriptor(ctx, se.c, gmrdesc)
+ } else {
+
+ cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s", se.o.ProjectID),
+ MetricDescriptor: inMD,
+ }
+ md, err = createMetricDescriptor(ctx, se.c, cmrdesc)
+ }
+
+ if err == nil {
+ // Now record the metric as having been created.
+ se.protoMetricDescriptors[name] = md
+ }
+
+ return err
+}
+
+func (se *statsExporter) protoTimeSeriesToMonitoringPoints(ts *metricspb.TimeSeries, metricKind googlemetricpb.MetricDescriptor_MetricKind) (sptl []*monitoringpb.Point, err error) {
+ for _, pt := range ts.Points {
+
+ // If we have a last value aggregation point i.e. MetricDescriptor_GAUGE
+ // StartTime should be nil.
+ startTime := ts.StartTimestamp
+ if metricKind == googlemetricpb.MetricDescriptor_GAUGE {
+ startTime = nil
+ }
+
+ spt, err := fromProtoPoint(startTime, pt)
+ if err != nil {
+ return nil, err
+ }
+ sptl = append(sptl, spt)
+ }
+ return sptl, nil
+}
+
+func (se *statsExporter) protoToMonitoringMetricDescriptor(metric *metricspb.Metric) (*googlemetricpb.MetricDescriptor, error) {
+ if metric == nil {
+ return nil, errNilMetric
+ }
+
+ metricName, description, unit, _ := metricProseFromProto(metric)
+ metricType, _ := se.metricTypeFromProto(metricName)
+ displayName := se.displayName(metricName)
+ metricKind, valueType := protoMetricDescriptorTypeToMetricKind(metric)
+
+ sdm := &googlemetricpb.MetricDescriptor{
+ Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", se.o.ProjectID, metricType),
+ DisplayName: displayName,
+ Description: description,
+ Unit: unit,
+ Type: metricType,
+ MetricKind: metricKind,
+ ValueType: valueType,
+ Labels: labelDescriptorsFromProto(se.defaultLabels, metric.GetMetricDescriptor().GetLabelKeys()),
+ }
+
+ return sdm, nil
+}
+
+func labelDescriptorsFromProto(defaults map[string]labelValue, protoLabelKeys []*metricspb.LabelKey) []*labelpb.LabelDescriptor {
+ labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(defaults)+len(protoLabelKeys))
+
+ // Fill in the defaults first.
+ for key, lbl := range defaults {
+ labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{
+ Key: sanitize(key),
+ Description: lbl.desc,
+ ValueType: labelpb.LabelDescriptor_STRING,
+ })
+ }
+
+ // Now fill in those from the metric.
+ for _, protoKey := range protoLabelKeys {
+ labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{
+ Key: sanitize(protoKey.GetKey()),
+ Description: protoKey.GetDescription(),
+ ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags
+ })
+ }
+ return labelDescriptors
+}
+
+func metricProseFromProto(metric *metricspb.Metric) (name, description, unit string, ok bool) {
+ mname := metric.GetName()
+ if mname != "" {
+ name = mname
+ return
+ }
+
+ md := metric.GetMetricDescriptor()
+
+ name = md.GetName()
+ unit = md.GetUnit()
+ description = md.GetDescription()
+
+ if md != nil && md.Type == metricspb.MetricDescriptor_CUMULATIVE_INT64 {
+ // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1",
+ // because this view does not apply to the recorded values.
+ unit = stats.UnitDimensionless
+ }
+
+ return
+}
+
+func (se *statsExporter) metricTypeFromProto(name string) (string, bool) {
+ // TODO: (@odeke-em) support non-"custom.googleapis.com" metrics names.
+ name = path.Join("custom.googleapis.com", "opencensus", name)
+ return name, true
+}
+
+func fromProtoPoint(startTime *timestamp.Timestamp, pt *metricspb.Point) (*monitoringpb.Point, error) {
+ if pt == nil {
+ return nil, nil
+ }
+
+ mptv, err := protoToMetricPoint(pt.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ mpt := &monitoringpb.Point{
+ Value: mptv,
+ Interval: &monitoringpb.TimeInterval{
+ StartTime: startTime,
+ EndTime: pt.Timestamp,
+ },
+ }
+ return mpt, nil
+}
+
+func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) {
+ if value == nil {
+ return nil, nil
+ }
+
+ var err error
+ var tval *monitoringpb.TypedValue
+ switch v := value.(type) {
+ default:
+ // All the other types are not yet handled.
+ // TODO: (@odeke-em, @songy23) talk to the Stackdriver team to determine
+ // the use cases for:
+ //
+ // *TypedValue_BoolValue
+ // *TypedValue_StringValue
+ //
+ // and then file feature requests on OpenCensus-Specs and then OpenCensus-Proto,
+ // lest we shall error here.
+ //
+ // TODO: Add conversion from SummaryValue when
+ // https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/66
+ // has been figured out.
+ err = fmt.Errorf("protoToMetricPoint: unknown Data type: %T", value)
+
+ case *metricspb.Point_Int64Value:
+ tval = &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: v.Int64Value,
+ },
+ }
+
+ case *metricspb.Point_DoubleValue:
+ tval = &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_DoubleValue{
+ DoubleValue: v.DoubleValue,
+ },
+ }
+
+ case *metricspb.Point_DistributionValue:
+ dv := v.DistributionValue
+ var mv *monitoringpb.TypedValue_DistributionValue
+ if dv != nil {
+ var mean float64
+ if dv.Count > 0 {
+ mean = float64(dv.Sum) / float64(dv.Count)
+ }
+ mv = &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: &distributionpb.Distribution{
+ Count: dv.Count,
+ Mean: mean,
+ SumOfSquaredDeviation: dv.SumOfSquaredDeviation,
+ BucketCounts: bucketCounts(dv.Buckets),
+ },
+ }
+
+ if bopts := dv.BucketOptions; bopts != nil && bopts.Type != nil {
+ bexp, ok := bopts.Type.(*metricspb.DistributionValue_BucketOptions_Explicit_)
+ if ok && bexp != nil && bexp.Explicit != nil {
+ mv.DistributionValue.BucketOptions = &distributionpb.Distribution_BucketOptions{
+ Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
+ Bounds: bexp.Explicit.Bounds[:],
+ },
+ },
+ }
+ }
+ }
+ }
+ tval = &monitoringpb.TypedValue{Value: mv}
+ }
+
+ return tval, err
+}
+
+func bucketCounts(buckets []*metricspb.DistributionValue_Bucket) []int64 {
+ bucketCounts := make([]int64, len(buckets))
+ for i, bucket := range buckets {
+ if bucket != nil {
+ bucketCounts[i] = bucket.Count
+ }
+ }
+ return bucketCounts
+}
+
+func protoMetricDescriptorTypeToMetricKind(m *metricspb.Metric) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) {
+ dt := m.GetMetricDescriptor()
+ if dt == nil {
+ return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
+ }
+
+ switch dt.Type {
+ case metricspb.MetricDescriptor_CUMULATIVE_INT64:
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64
+
+ case metricspb.MetricDescriptor_CUMULATIVE_DOUBLE:
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE
+
+ case metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION:
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION
+
+ case metricspb.MetricDescriptor_GAUGE_DOUBLE:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
+
+ case metricspb.MetricDescriptor_GAUGE_INT64:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
+
+ case metricspb.MetricDescriptor_GAUGE_DISTRIBUTION:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DISTRIBUTION
+
+ default:
+ return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
+ }
+}
+
+func protoResourceToMonitoredResource(rsp *resourcepb.Resource) *monitoredrespb.MonitoredResource {
+ if rsp == nil {
+ return &monitoredrespb.MonitoredResource{
+ Type: "global",
+ }
+ }
+ typ := rsp.Type
+ if typ == "" {
+ typ = "global"
+ }
+ mrsp := &monitoredrespb.MonitoredResource{
+ Type: typ,
+ }
+ if rsp.Labels != nil {
+ mrsp.Labels = make(map[string]string, len(rsp.Labels))
+ for k, v := range rsp.Labels {
+ mrsp.Labels[k] = v
+ }
+ }
+ return mrsp
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go
new file mode 100644
index 00000000000..d6a23a8cf7d
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws_identity_doc_utils.go
@@ -0,0 +1,53 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package monitoredresource
+
+import (
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/session"
+)
+
+// awsIdentityDocument is used to store parsed AWS Identity Document.
+type awsIdentityDocument struct {
+ // accountID is the AWS account number for the VM.
+ accountID string
+
+ // instanceID is the instance id of the instance.
+ instanceID string
+
+ // Region is the AWS region for the VM.
+ region string
+}
+
+// retrieveAWSIdentityDocument attempts to retrieve AWS Identity Document.
+// If the environment is AWS EC2 Instance then a valid document is retrieved.
+// Relevant attributes from the document are stored in awsIdentityDoc.
+// This is only done once.
+func retrieveAWSIdentityDocument() *awsIdentityDocument {
+ awsIdentityDoc := awsIdentityDocument{}
+ c := ec2metadata.New(session.New())
+ if c.Available() == false {
+ return nil
+ }
+ ec2InstanceIdentifyDocument, err := c.GetInstanceIdentityDocument()
+ if err != nil {
+ return nil
+ }
+ awsIdentityDoc.region = ec2InstanceIdentifyDocument.Region
+ awsIdentityDoc.instanceID = ec2InstanceIdentifyDocument.InstanceID
+ awsIdentityDoc.accountID = ec2InstanceIdentifyDocument.AccountID
+
+ return &awsIdentityDoc
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go
new file mode 100644
index 00000000000..ceb754e5156
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go
@@ -0,0 +1,90 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package monitoredresource
+
+import (
+ "log"
+ "os"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+// gcpMetadata represents metadata retrieved from GCP (GKE and GCE) environment.
+type gcpMetadata struct {
+
+ // projectID is the identifier of the GCP project associated with this resource, such as "my-project".
+ projectID string
+
+ // instanceID is the numeric VM instance identifier assigned by Compute Engine.
+ instanceID string
+
+ // clusterName is the name for the cluster the container is running in.
+ clusterName string
+
+ // containerName is the name of the container.
+ containerName string
+
+ // namespaceID is the identifier for the cluster namespace the container is running in
+ namespaceID string
+
+ // podID is the identifier for the pod the container is running in.
+ podID string
+
+ // zone is the Compute Engine zone in which the VM is running.
+ zone string
+}
+
+// retrieveGCPMetadata retrieves value of each Attribute from Metadata Server
+// in GKE container and GCE instance environment.
+// Some attributes are retrieved from the system environment.
+// This is only executed detectOnce.
+func retrieveGCPMetadata() *gcpMetadata {
+ gcpMetadata := gcpMetadata{}
+ var err error
+ gcpMetadata.instanceID, err = metadata.InstanceID()
+ if err != nil {
+ // Not a GCP environment
+ return &gcpMetadata
+ }
+
+ gcpMetadata.projectID, err = metadata.ProjectID()
+ logError(err)
+
+ gcpMetadata.zone, err = metadata.Zone()
+ logError(err)
+
+ clusterName, err := metadata.InstanceAttributeValue("cluster-name")
+ logError(err)
+ gcpMetadata.clusterName = strings.TrimSpace(clusterName)
+
+ // Following attributes are derived from environment variables. They are configured
+ // via yaml file. For details refer to:
+ // https://cloud.google.com/kubernetes-engine/docs/tutorials/custom-metrics-autoscaling#exporting_metrics_from_the_application
+ gcpMetadata.namespaceID = os.Getenv("NAMESPACE")
+ gcpMetadata.containerName = os.Getenv("CONTAINER_NAME")
+ gcpMetadata.podID = os.Getenv("HOSTNAME")
+
+ return &gcpMetadata
+}
+
+// logError logs error only if the error is present and it is not 'not defined'
+func logError(err error) {
+ if err != nil {
+ if !strings.Contains(err.Error(), "not defined") {
+ log.Printf("Error retrieving gcp metadata: %v", err)
+ }
+ }
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go
new file mode 100644
index 00000000000..c07e55ce07c
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go
@@ -0,0 +1,217 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package monitoredresource
+
+import (
+ "fmt"
+ "os"
+ "sync"
+)
+
+// Interface is a type that represent monitor resource that satisfies monitoredresource.Interface
+type Interface interface {
+
+ // MonitoredResource returns the resource type and resource labels.
+ MonitoredResource() (resType string, labels map[string]string)
+}
+
+// GKEContainer represents gke_container type monitored resource.
+// For definition refer to
+// https://cloud.google.com/monitoring/api/resources#tag_gke_container
+type GKEContainer struct {
+
+ // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project".
+ ProjectID string
+
+ // InstanceID is the numeric VM instance identifier assigned by Compute Engine.
+ InstanceID string
+
+ // ClusterName is the name for the cluster the container is running in.
+ ClusterName string
+
+ // ContainerName is the name of the container.
+ ContainerName string
+
+ // NamespaceID is the identifier for the cluster namespace the container is running in
+ NamespaceID string
+
+ // PodID is the identifier for the pod the container is running in.
+ PodID string
+
+ // Zone is the Compute Engine zone in which the VM is running.
+ Zone string
+}
+
+// MonitoredResource returns resource type and resource labels for GKEContainer
+func (gke *GKEContainer) MonitoredResource() (resType string, labels map[string]string) {
+ labels = map[string]string{
+ "project_id": gke.ProjectID,
+ "instance_id": gke.InstanceID,
+ "zone": gke.Zone,
+ "cluster_name": gke.ClusterName,
+ "container_name": gke.ContainerName,
+ "namespace_id": gke.NamespaceID,
+ "pod_id": gke.PodID,
+ }
+ return "gke_container", labels
+}
+
+// GCEInstance represents gce_instance type monitored resource.
+// For definition refer to
+// https://cloud.google.com/monitoring/api/resources#tag_gce_instance
+type GCEInstance struct {
+
+ // ProjectID is the identifier of the GCP project associated with this resource, such as "my-project".
+ ProjectID string
+
+ // InstanceID is the numeric VM instance identifier assigned by Compute Engine.
+ InstanceID string
+
+ // Zone is the Compute Engine zone in which the VM is running.
+ Zone string
+}
+
+// MonitoredResource returns resource type and resource labels for GCEInstance
+func (gce *GCEInstance) MonitoredResource() (resType string, labels map[string]string) {
+ labels = map[string]string{
+ "project_id": gce.ProjectID,
+ "instance_id": gce.InstanceID,
+ "zone": gce.Zone,
+ }
+ return "gce_instance", labels
+}
+
+// AWSEC2Instance represents aws_ec2_instance type monitored resource.
+// For definition refer to
+// https://cloud.google.com/monitoring/api/resources#tag_aws_ec2_instance
+type AWSEC2Instance struct {
+
+ // AWSAccount is the AWS account number for the VM.
+ AWSAccount string
+
+ // InstanceID is the instance id of the instance.
+ InstanceID string
+
+ // Region is the AWS region for the VM. The format of this field is "aws:{region}",
+ // where supported values for {region} are listed at
+ // http://docs.aws.amazon.com/general/latest/gr/rande.html.
+ Region string
+}
+
+// MonitoredResource returns resource type and resource labels for AWSEC2Instance
+func (aws *AWSEC2Instance) MonitoredResource() (resType string, labels map[string]string) {
+ labels = map[string]string{
+ "aws_account": aws.AWSAccount,
+ "instance_id": aws.InstanceID,
+ "region": aws.Region,
+ }
+ return "aws_ec2_instance", labels
+}
+
+// Autodetect auto detects monitored resources based on
+// the environment where the application is running.
+// It supports detection of following resource types
+// 1. gke_container:
+// 2. gce_instance:
+// 3. aws_ec2_instance:
+//
+// Returns MonitoredResInterface which implements getLabels() and getType()
+// For resource definition go to https://cloud.google.com/monitoring/api/resources
+func Autodetect() Interface {
+ return func() Interface {
+ var autoDetected Interface
+ var awsIdentityDoc *awsIdentityDocument
+ var gcpMetadata *gcpMetadata
+ detectOnce.Do(func() {
+
+ // First attempts to retrieve AWS Identity Doc and GCP metadata.
+ // It then determines the resource type
+ // In GCP and AWS environment both func finishes quickly. However,
+ // in an environment other than those (e.g local laptop) it
+ // takes 2 seconds for GCP and 5-6 for AWS.
+ var wg sync.WaitGroup
+ wg.Add(2)
+
+ go func() {
+ defer wg.Done()
+ awsIdentityDoc = retrieveAWSIdentityDocument()
+ }()
+ go func() {
+ defer wg.Done()
+ gcpMetadata = retrieveGCPMetadata()
+ }()
+
+ wg.Wait()
+ autoDetected = detectResourceType(awsIdentityDoc, gcpMetadata)
+ })
+ return autoDetected
+ }()
+
+}
+
+// createAWSEC2InstanceMonitoredResource creates a aws_ec2_instance monitored resource
+// awsIdentityDoc contains AWS EC2 specific attributes.
+func createAWSEC2InstanceMonitoredResource(awsIdentityDoc *awsIdentityDocument) *AWSEC2Instance {
+ awsInstance := AWSEC2Instance{
+ AWSAccount: awsIdentityDoc.accountID,
+ InstanceID: awsIdentityDoc.instanceID,
+ Region: fmt.Sprintf("aws:%s", awsIdentityDoc.region),
+ }
+ return &awsInstance
+}
+
+// createGCEInstanceMonitoredResource creates a gce_instance monitored resource
+// gcpMetadata contains GCP (GKE or GCE) specific attributes.
+func createGCEInstanceMonitoredResource(gcpMetadata *gcpMetadata) *GCEInstance {
+ gceInstance := GCEInstance{
+ ProjectID: gcpMetadata.projectID,
+ InstanceID: gcpMetadata.instanceID,
+ Zone: gcpMetadata.zone,
+ }
+ return &gceInstance
+}
+
+// createGKEContainerMonitoredResource creates a gke_container monitored resource
+// gcpMetadata contains GCP (GKE or GCE) specific attributes.
+func createGKEContainerMonitoredResource(gcpMetadata *gcpMetadata) *GKEContainer {
+ gkeContainer := GKEContainer{
+ ProjectID: gcpMetadata.projectID,
+ InstanceID: gcpMetadata.instanceID,
+ Zone: gcpMetadata.zone,
+ ContainerName: gcpMetadata.containerName,
+ ClusterName: gcpMetadata.clusterName,
+ NamespaceID: gcpMetadata.namespaceID,
+ PodID: gcpMetadata.podID,
+ }
+ return &gkeContainer
+}
+
+// detectOnce is used to make sure GCP and AWS metadata detect function executes only once.
+var detectOnce sync.Once
+
+// detectResourceType determines the resource type.
+// awsIdentityDoc contains AWS EC2 attributes. nil if it is not AWS EC2 environment
+// gcpMetadata contains GCP (GKE or GCE) specific attributes.
+func detectResourceType(awsIdentityDoc *awsIdentityDocument, gcpMetadata *gcpMetadata) Interface {
+ if os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
+ gcpMetadata != nil && gcpMetadata.instanceID != "" {
+ return createGKEContainerMonitoredResource(gcpMetadata)
+ } else if gcpMetadata != nil && gcpMetadata.instanceID != "" {
+ return createGCEInstanceMonitoredResource(gcpMetadata)
+ } else if awsIdentityDoc != nil {
+ return createAWSEC2InstanceMonitoredResource(awsIdentityDoc)
+ }
+ return nil
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go
new file mode 100644
index 00000000000..184bb1d435e
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/sanitize.go
@@ -0,0 +1,50 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+import (
+ "strings"
+ "unicode"
+)
+
+const labelKeySizeLimit = 100
+
+// sanitize returns a string that is trunacated to 100 characters if it's too
+// long, and replaces non-alphanumeric characters to underscores.
+func sanitize(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+ if len(s) > labelKeySizeLimit {
+ s = s[:labelKeySizeLimit]
+ }
+ s = strings.Map(sanitizeRune, s)
+ if unicode.IsDigit(rune(s[0])) {
+ s = "key_" + s
+ }
+ if s[0] == '_' {
+ s = "key" + s
+ }
+ return s
+}
+
+// converts anything that is not a letter or digit to an underscore
+func sanitizeRune(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ // Everything else turns into an underscore
+ return '_'
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go
new file mode 100644
index 00000000000..595377368e2
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go
@@ -0,0 +1,346 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package stackdriver contains the OpenCensus exporters for
+// Stackdriver Monitoring and Stackdriver Tracing.
+//
+// This exporter can be used to send metrics to Stackdriver Monitoring and traces
+// to Stackdriver trace.
+//
+// The package uses Application Default Credentials to authenticate by default.
+// See: https://developers.google.com/identity/protocols/application-default-credentials
+//
+// Alternatively, pass the authentication options in both the MonitoringClientOptions
+// and the TraceClientOptions fields of Options.
+//
+// Stackdriver Monitoring
+//
+// This exporter support exporting OpenCensus views to Stackdriver Monitoring.
+// Each registered view becomes a metric in Stackdriver Monitoring, with the
+// tags becoming labels.
+//
+// The aggregation function determines the metric kind: LastValue aggregations
+// generate Gauge metrics and all other aggregations generate Cumulative metrics.
+//
+// In order to be able to push your stats to Stackdriver Monitoring, you must:
+//
+// 1. Create a Cloud project: https://support.google.com/cloud/answer/6251787?hl=en
+// 2. Enable billing: https://support.google.com/cloud/answer/6288653#new-billing
+// 3. Enable the Stackdriver Monitoring API: https://console.cloud.google.com/apis/dashboard
+//
+// These steps enable the API but don't require that your app is hosted on Google Cloud Platform.
+//
+// Stackdriver Trace
+//
+// This exporter supports exporting Trace Spans to Stackdriver Trace. It also
+// supports the Google "Cloud Trace" propagation format header.
+package stackdriver // import "contrib.go.opencensus.io/exporter/stackdriver"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log"
+ "time"
+
+ traceapi "cloud.google.com/go/trace/apiv2"
+ "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+ "go.opencensus.io/trace"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/api/option"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+
+ commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
+ metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
+ resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+)
+
+// Options contains options for configuring the exporter.
+type Options struct {
+ // ProjectID is the identifier of the Stackdriver
+ // project the user is uploading the stats data to.
+ // If not set, this will default to your "Application Default Credentials".
+ // For details see: https://developers.google.com/accounts/docs/application-default-credentials
+ ProjectID string
+
+ // OnError is the hook to be called when there is
+ // an error uploading the stats or tracing data.
+ // If no custom hook is set, errors are logged.
+ // Optional.
+ OnError func(err error)
+
+ // MonitoringClientOptions are additional options to be passed
+ // to the underlying Stackdriver Monitoring API client.
+ // Optional.
+ MonitoringClientOptions []option.ClientOption
+
+ // TraceClientOptions are additional options to be passed
+ // to the underlying Stackdriver Trace API client.
+ // Optional.
+ TraceClientOptions []option.ClientOption
+
+ // BundleDelayThreshold determines the max amount of time
+ // the exporter can wait before uploading view data or trace spans to
+ // the backend.
+ // Optional.
+ BundleDelayThreshold time.Duration
+
+ // BundleCountThreshold determines how many view data events or trace spans
+ // can be buffered before batch uploading them to the backend.
+ // Optional.
+ BundleCountThreshold int
+
+ // TraceSpansBufferMaxBytes is the maximum size (in bytes) of spans that
+ // will be buffered in memory before being dropped.
+ //
+ // If unset, a default of 8MB will be used.
+ TraceSpansBufferMaxBytes int
+
+ // Resource sets the MonitoredResource against which all views will be
+ // recorded by this exporter.
+ //
+ // All Stackdriver metrics created by this exporter are custom metrics,
+ // so only a limited number of MonitoredResource types are supported, see:
+ // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource
+ //
+ // An important consideration when setting the Resource here is that
+ // Stackdriver Monitoring only allows a single writer per
+ // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series
+ // A TimeSeries is uniquely defined by the metric type name
+ // (constructed from the view name and the MetricPrefix), the Resource field,
+ // and the set of label key/value pairs (in OpenCensus terminology: tag).
+ //
+ // If no custom Resource is set, a default MonitoredResource
+ // with type global and no resource labels will be used. If you explicitly
+ // set this field, you may also want to set custom DefaultMonitoringLabels.
+ //
+ // Deprecated: Use MonitoredResource instead.
+ Resource *monitoredrespb.MonitoredResource
+
+ // MonitoredResource sets the MonitoredResource against which all views will be
+ // recorded by this exporter.
+ //
+ // All Stackdriver metrics created by this exporter are custom metrics,
+ // so only a limited number of MonitoredResource types are supported, see:
+ // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#which-resource
+ //
+ // An important consideration when setting the MonitoredResource here is that
+ // Stackdriver Monitoring only allows a single writer per
+ // TimeSeries, see: https://cloud.google.com/monitoring/api/v3/metrics-details#intro-time-series
+ // A TimeSeries is uniquely defined by the metric type name
+ // (constructed from the view name and the MetricPrefix), the MonitoredResource field,
+ // and the set of label key/value pairs (in OpenCensus terminology: tag).
+ //
+ // If no custom MonitoredResource is set AND if Resource is also not set then
+ // a default MonitoredResource with type global and no resource labels will be used.
+ // If you explicitly set this field, you may also want to set custom DefaultMonitoringLabels.
+ //
+ // This field replaces Resource field. If this is set then it will override the
+ // Resource field.
+ // Optional, but encouraged.
+ MonitoredResource monitoredresource.Interface
+
+ // MetricPrefix overrides the prefix of a Stackdriver metric display names.
+ // Optional. If unset defaults to "OpenCensus/".
+ // Deprecated: Provide GetMetricDisplayName to change the display name of
+ // the metric.
+ // If GetMetricDisplayName is non-nil, this option is ignored.
+ MetricPrefix string
+
+ // GetMetricDisplayName allows customizing the display name for the metric
+ // associated with the given view. By default it will be:
+ // MetricPrefix + view.Name
+ GetMetricDisplayName func(view *view.View) string
+
+ // GetMetricType allows customizing the metric type for the given view.
+ // By default, it will be:
+ // "custom.googleapis.com/opencensus/" + view.Name
+ //
+ // See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor
+ GetMetricType func(view *view.View) string
+
+ // DefaultTraceAttributes will be appended to every span that is exported to
+ // Stackdriver Trace.
+ DefaultTraceAttributes map[string]interface{}
+
+ // DefaultMonitoringLabels are labels added to every metric created by this
+ // exporter in Stackdriver Monitoring.
+ //
+ // If unset, this defaults to a single label with key "opencensus_task" and
+ // value "go-@". This default ensures that the set of labels
+ // together with the default Resource (global) are unique to this
+ // process, as required by Stackdriver Monitoring.
+ //
+ // If you set DefaultMonitoringLabels, make sure that the Resource field
+ // together with these labels is unique to the
+ // current process. This is to ensure that there is only a single writer to
+ // each TimeSeries in Stackdriver.
+ //
+ // Set this to &Labels{} (a pointer to an empty Labels) to avoid getting the
+ // default "opencensus_task" label. You should only do this if you know that
+ // the Resource you set uniquely identifies this Go process.
+ DefaultMonitoringLabels *Labels
+
+ // Context allows you to provide a custom context for API calls.
+ //
+ // This context will be used several times: first, to create Stackdriver
+ // trace and metric clients, and then every time a new batch of traces or
+ // stats needs to be uploaded.
+ //
+ // Do not set a timeout on this context. Instead, set the Timeout option.
+ //
+ // If unset, context.Background() will be used.
+ Context context.Context
+
+ // Timeout for all API calls. If not set, defaults to 5 seconds.
+ Timeout time.Duration
+
+ // GetMonitoredResource may be provided to supply the details of the
+ // monitored resource dynamically based on the tags associated with each
+ // data point. Most users will not need to set this, but should instead
+ // set the MonitoredResource field.
+ //
+ // GetMonitoredResource may add or remove tags by returning a new set of
+ // tags. It is safe for the function to mutate its argument and return it.
+ //
+ // See the documentation on the MonitoredResource field for guidance on the
+ // interaction between monitored resources and labels.
+ //
+ // The MonitoredResource field is ignored if this field is set to a non-nil
+ // value.
+ GetMonitoredResource func(*view.View, []tag.Tag) ([]tag.Tag, monitoredresource.Interface)
+}
+
+const defaultTimeout = 5 * time.Second
+
+// Exporter is a stats and trace exporter that uploads data to Stackdriver.
+//
+// You can create a single Exporter and register it as both a trace exporter
+// (to export to Stackdriver Trace) and a stats exporter (to integrate with
+// Stackdriver Monitoring).
+type Exporter struct {
+ traceExporter *traceExporter
+ statsExporter *statsExporter
+}
+
+// NewExporter creates a new Exporter that implements both stats.Exporter and
+// trace.Exporter.
+func NewExporter(o Options) (*Exporter, error) {
+ if o.ProjectID == "" {
+ ctx := o.Context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ creds, err := google.FindDefaultCredentials(ctx, traceapi.DefaultAuthScopes()...)
+ if err != nil {
+ return nil, fmt.Errorf("stackdriver: %v", err)
+ }
+ if creds.ProjectID == "" {
+ return nil, errors.New("stackdriver: no project found with application default credentials")
+ }
+ o.ProjectID = creds.ProjectID
+ }
+
+ if o.MonitoredResource != nil {
+ o.Resource = convertMonitoredResourceToPB(o.MonitoredResource)
+ }
+
+ se, err := newStatsExporter(o)
+ if err != nil {
+ return nil, err
+ }
+ te, err := newTraceExporter(o)
+ if err != nil {
+ return nil, err
+ }
+ return &Exporter{
+ statsExporter: se,
+ traceExporter: te,
+ }, nil
+}
+
+// ExportView exports to the Stackdriver Monitoring if view data
+// has one or more rows.
+func (e *Exporter) ExportView(vd *view.Data) {
+ e.statsExporter.ExportView(vd)
+}
+
+// ExportMetric exports OpenCensus Metrics to Stackdriver Monitoring.
+func (e *Exporter) ExportMetric(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric) error {
+ return e.statsExporter.ExportMetric(ctx, node, rsc, metric)
+}
+
+// ExportSpan exports a SpanData to Stackdriver Trace.
+func (e *Exporter) ExportSpan(sd *trace.SpanData) {
+ if len(e.traceExporter.o.DefaultTraceAttributes) > 0 {
+ sd = e.sdWithDefaultTraceAttributes(sd)
+ }
+ e.traceExporter.ExportSpan(sd)
+}
+
+func (e *Exporter) sdWithDefaultTraceAttributes(sd *trace.SpanData) *trace.SpanData {
+ newSD := *sd
+ newSD.Attributes = make(map[string]interface{})
+ for k, v := range e.traceExporter.o.DefaultTraceAttributes {
+ newSD.Attributes[k] = v
+ }
+ for k, v := range sd.Attributes {
+ newSD.Attributes[k] = v
+ }
+ return &newSD
+}
+
+// Flush waits for exported data to be uploaded.
+//
+// This is useful if your program is ending and you do not
+// want to lose recent stats or spans.
+func (e *Exporter) Flush() {
+ e.statsExporter.Flush()
+ e.traceExporter.Flush()
+}
+
+func (o Options) handleError(err error) {
+ if o.OnError != nil {
+ o.OnError(err)
+ return
+ }
+ log.Printf("Failed to export to Stackdriver: %v", err)
+}
+
+func (o Options) newContextWithTimeout() (context.Context, func()) {
+ ctx := o.Context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ timeout := o.Timeout
+ if timeout <= 0 {
+ timeout = defaultTimeout
+ }
+ return context.WithTimeout(ctx, timeout)
+}
+
+// convertMonitoredResourceToPB converts MonitoredResource data in to
+// protocol buffer.
+func convertMonitoredResourceToPB(mr monitoredresource.Interface) *monitoredrespb.MonitoredResource {
+ mrpb := new(monitoredrespb.MonitoredResource)
+ var labels map[string]string
+ mrpb.Type, labels = mr.MonitoredResource()
+ mrpb.Labels = make(map[string]string)
+ for k, v := range labels {
+ mrpb.Labels[k] = v
+ }
+ return mrpb
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go
new file mode 100644
index 00000000000..ca82ca71b40
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go
@@ -0,0 +1,571 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "go.opencensus.io"
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+ "go.opencensus.io/trace"
+
+ "cloud.google.com/go/monitoring/apiv3"
+ "github.com/golang/protobuf/ptypes/timestamp"
+ "google.golang.org/api/option"
+ "google.golang.org/api/support/bundler"
+ distributionpb "google.golang.org/genproto/googleapis/api/distribution"
+ labelpb "google.golang.org/genproto/googleapis/api/label"
+ "google.golang.org/genproto/googleapis/api/metric"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
+)
+
+const (
+ maxTimeSeriesPerUpload = 200
+ opencensusTaskKey = "opencensus_task"
+ opencensusTaskDescription = "Opencensus task identifier"
+ defaultDisplayNamePrefix = "OpenCensus"
+ version = "0.10.0"
+)
+
+var userAgent = fmt.Sprintf("opencensus-go %s; stackdriver-exporter %s", opencensus.Version(), version)
+
+// statsExporter exports stats to the Stackdriver Monitoring.
+type statsExporter struct {
+ o Options
+
+ viewDataBundler *bundler.Bundler
+ protoMetricsBundler *bundler.Bundler
+
+ createdViewsMu sync.Mutex
+ createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely
+
+ protoMu sync.Mutex
+ protoMetricDescriptors map[string]*metricpb.MetricDescriptor // Saves the metric descriptors that were already created remotely
+
+ c *monitoring.MetricClient
+ defaultLabels map[string]labelValue
+}
+
+var (
+ errBlankProjectID = errors.New("expecting a non-blank ProjectID")
+)
+
+// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring.
+// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent
+// invocations of NewExporter with the same ProjectID will return an error.
+func newStatsExporter(o Options) (*statsExporter, error) {
+ if strings.TrimSpace(o.ProjectID) == "" {
+ return nil, errBlankProjectID
+ }
+
+ opts := append(o.MonitoringClientOptions, option.WithUserAgent(userAgent))
+ ctx, cancel := o.newContextWithTimeout()
+ defer cancel()
+ client, err := monitoring.NewMetricClient(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+ e := &statsExporter{
+ c: client,
+ o: o,
+ createdViews: make(map[string]*metricpb.MetricDescriptor),
+ protoMetricDescriptors: make(map[string]*metricpb.MetricDescriptor),
+ }
+
+ if o.DefaultMonitoringLabels != nil {
+ e.defaultLabels = o.DefaultMonitoringLabels.m
+ } else {
+ e.defaultLabels = map[string]labelValue{
+ opencensusTaskKey: {val: getTaskValue(), desc: opencensusTaskDescription},
+ }
+ }
+
+ e.viewDataBundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
+ vds := bundle.([]*view.Data)
+ e.handleUpload(vds...)
+ })
+ e.protoMetricsBundler = bundler.NewBundler((*metricPayload)(nil), func(bundle interface{}) {
+ payloads := bundle.([]*metricPayload)
+ e.handleMetricsUpload(payloads)
+ })
+ if delayThreshold := e.o.BundleDelayThreshold; delayThreshold > 0 {
+ e.viewDataBundler.DelayThreshold = delayThreshold
+ e.protoMetricsBundler.DelayThreshold = delayThreshold
+ }
+ if countThreshold := e.o.BundleCountThreshold; countThreshold > 0 {
+ e.viewDataBundler.BundleCountThreshold = countThreshold
+ e.protoMetricsBundler.BundleCountThreshold = countThreshold
+ }
+ return e, nil
+}
+
+func (e *statsExporter) getMonitoredResource(v *view.View, tags []tag.Tag) ([]tag.Tag, *monitoredrespb.MonitoredResource) {
+ if get := e.o.GetMonitoredResource; get != nil {
+ newTags, mr := get(v, tags)
+ return newTags, convertMonitoredResourceToPB(mr)
+ } else {
+ resource := e.o.Resource
+ if resource == nil {
+ resource = &monitoredrespb.MonitoredResource{
+ Type: "global",
+ }
+ }
+ return tags, resource
+ }
+}
+
+// ExportView exports to the Stackdriver Monitoring if view data
+// has one or more rows.
+func (e *statsExporter) ExportView(vd *view.Data) {
+ if len(vd.Rows) == 0 {
+ return
+ }
+ err := e.viewDataBundler.Add(vd, 1)
+ switch err {
+ case nil:
+ return
+ case bundler.ErrOverflow:
+ e.o.handleError(errors.New("failed to upload: buffer full"))
+ default:
+ e.o.handleError(err)
+ }
+}
+
+// getTaskValue returns a task label value in the format of
+// "go-@".
+func getTaskValue() string {
+ hostname, err := os.Hostname()
+ if err != nil {
+ hostname = "localhost"
+ }
+ return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname
+}
+
+// handleUpload handles uploading a slice
+// of Data, as well as error handling.
+func (e *statsExporter) handleUpload(vds ...*view.Data) {
+ if err := e.uploadStats(vds); err != nil {
+ e.o.handleError(err)
+ }
+}
+
+// Flush waits for exported view data and metrics to be uploaded.
+//
+// This is useful if your program is ending and you do not
+// want to lose data that hasn't yet been exported.
+func (e *statsExporter) Flush() {
+ e.viewDataBundler.Flush()
+ e.protoMetricsBundler.Flush()
+}
+
+func (e *statsExporter) uploadStats(vds []*view.Data) error {
+ ctx, cancel := e.o.newContextWithTimeout()
+ defer cancel()
+ ctx, span := trace.StartSpan(
+ ctx,
+ "contrib.go.opencensus.io/exporter/stackdriver.uploadStats",
+ trace.WithSampler(trace.NeverSample()),
+ )
+ defer span.End()
+
+ for _, vd := range vds {
+ if err := e.createMeasure(ctx, vd.View); err != nil {
+ span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
+ return err
+ }
+ }
+ for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) {
+ if err := createTimeSeries(ctx, e.c, req); err != nil {
+ span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
+ // TODO(jbd): Don't fail fast here, batch errors?
+ return err
+ }
+ }
+ return nil
+}
+
+func (se *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest {
+ var reqs []*monitoringpb.CreateTimeSeriesRequest
+
+ var allTimeSeries []*monitoringpb.TimeSeries
+ for _, vd := range vds {
+ for _, row := range vd.Rows {
+ tags, resource := se.getMonitoredResource(vd.View, append([]tag.Tag(nil), row.Tags...))
+ ts := &monitoringpb.TimeSeries{
+ Metric: &metricpb.Metric{
+ Type: se.metricType(vd.View),
+ Labels: newLabels(se.defaultLabels, tags),
+ },
+ Resource: resource,
+ Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)},
+ }
+ allTimeSeries = append(allTimeSeries, ts)
+ }
+ }
+
+ var timeSeries []*monitoringpb.TimeSeries
+ for _, ts := range allTimeSeries {
+ timeSeries = append(timeSeries, ts)
+ if len(timeSeries) == limit {
+ ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries)
+ reqs = append(reqs, ctsreql...)
+ timeSeries = timeSeries[:0]
+ }
+ }
+
+ if len(timeSeries) > 0 {
+ ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(timeSeries)
+ reqs = append(reqs, ctsreql...)
+ }
+ return reqs
+}
+
+func (e *statsExporter) viewToMetricDescriptor(ctx context.Context, v *view.View) (*metricpb.MetricDescriptor, error) {
+ m := v.Measure
+ agg := v.Aggregation
+ viewName := v.Name
+
+ metricType := e.metricType(v)
+ var valueType metricpb.MetricDescriptor_ValueType
+ unit := m.Unit()
+ // Default metric Kind
+ metricKind := metricpb.MetricDescriptor_CUMULATIVE
+
+ switch agg.Type {
+ case view.AggTypeCount:
+ valueType = metricpb.MetricDescriptor_INT64
+ // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1",
+ // because this view does not apply to the recorded values.
+ unit = stats.UnitDimensionless
+ case view.AggTypeSum:
+ switch m.(type) {
+ case *stats.Int64Measure:
+ valueType = metricpb.MetricDescriptor_INT64
+ case *stats.Float64Measure:
+ valueType = metricpb.MetricDescriptor_DOUBLE
+ }
+ case view.AggTypeDistribution:
+ valueType = metricpb.MetricDescriptor_DISTRIBUTION
+ case view.AggTypeLastValue:
+ metricKind = metricpb.MetricDescriptor_GAUGE
+ switch m.(type) {
+ case *stats.Int64Measure:
+ valueType = metricpb.MetricDescriptor_INT64
+ case *stats.Float64Measure:
+ valueType = metricpb.MetricDescriptor_DOUBLE
+ }
+ default:
+ return nil, fmt.Errorf("unsupported aggregation type: %s", agg.Type.String())
+ }
+
+ var displayName string
+ if e.o.GetMetricDisplayName == nil {
+ displayName = e.displayName(viewName)
+ } else {
+ displayName = e.o.GetMetricDisplayName(v)
+ }
+
+ res := &metricpb.MetricDescriptor{
+ Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType),
+ DisplayName: displayName,
+ Description: v.Description,
+ Unit: unit,
+ Type: metricType,
+ MetricKind: metricKind,
+ ValueType: valueType,
+ Labels: newLabelDescriptors(e.defaultLabels, v.TagKeys),
+ }
+ return res, nil
+}
+
+func (e *statsExporter) viewToCreateMetricDescriptorRequest(ctx context.Context, v *view.View) (*monitoringpb.CreateMetricDescriptorRequest, error) {
+ inMD, err := e.viewToMetricDescriptor(ctx, v)
+ if err != nil {
+ return nil, err
+ }
+
+ cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
+ MetricDescriptor: inMD,
+ }
+ return cmrdesc, nil
+}
+
+// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring.
+// An error will be returned if there is already a metric descriptor created with the same name
+// but it has a different aggregation or keys.
+func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error {
+ e.createdViewsMu.Lock()
+ defer e.createdViewsMu.Unlock()
+
+ viewName := v.Name
+
+ if md, ok := e.createdViews[viewName]; ok {
+ // [TODO:rghetia] Temporary fix for https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/76#issuecomment-459459091
+ if builtinMetric(md.Type) {
+ return nil
+ }
+ return e.equalMeasureAggTagKeys(md, v.Measure, v.Aggregation, v.TagKeys)
+ }
+
+ inMD, err := e.viewToMetricDescriptor(ctx, v)
+ if err != nil {
+ return err
+ }
+
+ var dmd *metric.MetricDescriptor
+ if builtinMetric(inMD.Type) {
+ gmrdesc := &monitoringpb.GetMetricDescriptorRequest{
+ Name: inMD.Name,
+ }
+ dmd, err = getMetricDescriptor(ctx, e.c, gmrdesc)
+ } else {
+ cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
+ MetricDescriptor: inMD,
+ }
+ dmd, err = createMetricDescriptor(ctx, e.c, cmrdesc)
+ }
+ if err != nil {
+ return err
+ }
+
+ // Now cache the metric descriptor
+ e.createdViews[viewName] = dmd
+ return err
+}
+
+func (e *statsExporter) displayName(suffix string) string {
+ displayNamePrefix := defaultDisplayNamePrefix
+ if e.o.MetricPrefix != "" {
+ displayNamePrefix = e.o.MetricPrefix
+ }
+ return path.Join(displayNamePrefix, suffix)
+}
+
+func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {
+ switch v.Aggregation.Type {
+ case view.AggTypeLastValue:
+ return newGaugePoint(v, row, end)
+ default:
+ return newCumulativePoint(v, row, start, end)
+ }
+}
+
+func newCumulativePoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {
+ return &monitoringpb.Point{
+ Interval: &monitoringpb.TimeInterval{
+ StartTime: ×tamp.Timestamp{
+ Seconds: start.Unix(),
+ Nanos: int32(start.Nanosecond()),
+ },
+ EndTime: ×tamp.Timestamp{
+ Seconds: end.Unix(),
+ Nanos: int32(end.Nanosecond()),
+ },
+ },
+ Value: newTypedValue(v, row),
+ }
+}
+
+func newGaugePoint(v *view.View, row *view.Row, end time.Time) *monitoringpb.Point {
+ gaugeTime := ×tamp.Timestamp{
+ Seconds: end.Unix(),
+ Nanos: int32(end.Nanosecond()),
+ }
+ return &monitoringpb.Point{
+ Interval: &monitoringpb.TimeInterval{
+ EndTime: gaugeTime,
+ },
+ Value: newTypedValue(v, row),
+ }
+}
+
+func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
+ switch v := r.Data.(type) {
+ case *view.CountData:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: v.Value,
+ }}
+ case *view.SumData:
+ switch vd.Measure.(type) {
+ case *stats.Int64Measure:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: int64(v.Value),
+ }}
+ case *stats.Float64Measure:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
+ DoubleValue: v.Value,
+ }}
+ }
+ case *view.DistributionData:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: &distributionpb.Distribution{
+ Count: v.Count,
+ Mean: v.Mean,
+ SumOfSquaredDeviation: v.SumOfSquaredDev,
+ // TODO(songya): uncomment this once Stackdriver supports min/max.
+ // Range: &distributionpb.Distribution_Range{
+ // Min: v.Min,
+ // Max: v.Max,
+ // },
+ BucketOptions: &distributionpb.Distribution_BucketOptions{
+ Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
+ Bounds: vd.Aggregation.Buckets,
+ },
+ },
+ },
+ BucketCounts: v.CountPerBucket,
+ },
+ }}
+ case *view.LastValueData:
+ switch vd.Measure.(type) {
+ case *stats.Int64Measure:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: int64(v.Value),
+ }}
+ case *stats.Float64Measure:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
+ DoubleValue: v.Value,
+ }}
+ }
+ }
+ return nil
+}
+
+func (e *statsExporter) metricType(v *view.View) string {
+ if formatter := e.o.GetMetricType; formatter != nil {
+ return formatter(v)
+ } else {
+ return path.Join("custom.googleapis.com", "opencensus", v.Name)
+ }
+}
+
+func newLabels(defaults map[string]labelValue, tags []tag.Tag) map[string]string {
+ labels := make(map[string]string)
+ for k, lbl := range defaults {
+ labels[sanitize(k)] = lbl.val
+ }
+ for _, tag := range tags {
+ labels[sanitize(tag.Key.Name())] = tag.Value
+ }
+ return labels
+}
+
+func newLabelDescriptors(defaults map[string]labelValue, keys []tag.Key) []*labelpb.LabelDescriptor {
+ labelDescriptors := make([]*labelpb.LabelDescriptor, 0, len(keys)+len(defaults))
+ for key, lbl := range defaults {
+ labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{
+ Key: sanitize(key),
+ Description: lbl.desc,
+ ValueType: labelpb.LabelDescriptor_STRING,
+ })
+ }
+ for _, key := range keys {
+ labelDescriptors = append(labelDescriptors, &labelpb.LabelDescriptor{
+ Key: sanitize(key.Name()),
+ ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags
+ })
+ }
+ return labelDescriptors
+}
+
+func (e *statsExporter) equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error {
+ var aggTypeMatch bool
+ switch md.ValueType {
+ case metricpb.MetricDescriptor_INT64:
+ if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) {
+ return fmt.Errorf("stackdriver metric descriptor was not created as int64")
+ }
+ aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
+ case metricpb.MetricDescriptor_DOUBLE:
+ if _, ok := m.(*stats.Float64Measure); !ok {
+ return fmt.Errorf("stackdriver metric descriptor was not created as double")
+ }
+ aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
+ case metricpb.MetricDescriptor_DISTRIBUTION:
+ aggTypeMatch = agg.Type == view.AggTypeDistribution
+ }
+
+ if !aggTypeMatch {
+ return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type)
+ }
+
+ labels := make(map[string]struct{}, len(keys)+len(e.defaultLabels))
+ for _, k := range keys {
+ labels[sanitize(k.Name())] = struct{}{}
+ }
+ for k := range e.defaultLabels {
+ labels[sanitize(k)] = struct{}{}
+ }
+
+ for _, k := range md.Labels {
+ if _, ok := labels[k.Key]; !ok {
+ return fmt.Errorf("stackdriver metric descriptor %q was not created with label %q", md.Type, k)
+ }
+ delete(labels, k.Key)
+ }
+
+ if len(labels) > 0 {
+ extra := make([]string, 0, len(labels))
+ for k := range labels {
+ extra = append(extra, k)
+ }
+ return fmt.Errorf("stackdriver metric descriptor %q contains unexpected labels: %s", md.Type, strings.Join(extra, ", "))
+ }
+
+ return nil
+}
+
+var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
+ return c.CreateMetricDescriptor(ctx, mdr)
+}
+
+var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
+ return c.GetMetricDescriptor(ctx, mdr)
+}
+
+var createTimeSeries = func(ctx context.Context, c *monitoring.MetricClient, ts *monitoringpb.CreateTimeSeriesRequest) error {
+ return c.CreateTimeSeries(ctx, ts)
+}
+
+var knownExternalMetricPrefixes = []string{
+ "custom.googleapis.com/",
+ "external.googleapis.com/",
+}
+
+// builtinMetric returns true if a MetricType is a heuristically known
+// built-in Stackdriver metric
+func builtinMetric(metricType string) bool {
+ for _, knownExternalMetric := range knownExternalMetricPrefixes {
+ if strings.HasPrefix(metricType, knownExternalMetric) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go
new file mode 100644
index 00000000000..71e7f36d22a
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace.go
@@ -0,0 +1,178 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sync"
+ "time"
+
+ tracingclient "cloud.google.com/go/trace/apiv2"
+ "github.com/golang/protobuf/proto"
+ "go.opencensus.io/trace"
+ "google.golang.org/api/support/bundler"
+ tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
+)
+
+// traceExporter is an implementation of trace.Exporter that uploads spans to
+// Stackdriver.
+//
+type traceExporter struct {
+ o Options
+ projectID string
+ bundler *bundler.Bundler
+ // uploadFn defaults to uploadSpans; it can be replaced for tests.
+ uploadFn func(spans []*tracepb.Span)
+ overflowLogger
+ client *tracingclient.Client
+}
+
+var _ trace.Exporter = (*traceExporter)(nil)
+
+func newTraceExporter(o Options) (*traceExporter, error) {
+ ctx := o.Context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ client, err := tracingclient.NewClient(ctx, o.TraceClientOptions...)
+ if err != nil {
+ return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err)
+ }
+ return newTraceExporterWithClient(o, client), nil
+}
+
+const defaultBufferedByteLimit = 8 * 1024 * 1024
+
+func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter {
+ e := &traceExporter{
+ projectID: o.ProjectID,
+ client: c,
+ o: o,
+ }
+ b := bundler.NewBundler((*tracepb.Span)(nil), func(bundle interface{}) {
+ e.uploadFn(bundle.([]*tracepb.Span))
+ })
+ if o.BundleDelayThreshold > 0 {
+ b.DelayThreshold = o.BundleDelayThreshold
+ } else {
+ b.DelayThreshold = 2 * time.Second
+ }
+ if o.BundleCountThreshold > 0 {
+ b.BundleCountThreshold = o.BundleCountThreshold
+ } else {
+ b.BundleCountThreshold = 50
+ }
+ // The measured "bytes" are not really bytes, see exportReceiver.
+ b.BundleByteThreshold = b.BundleCountThreshold * 200
+ b.BundleByteLimit = b.BundleCountThreshold * 1000
+ if o.TraceSpansBufferMaxBytes > 0 {
+ b.BufferedByteLimit = o.TraceSpansBufferMaxBytes
+ } else {
+ b.BufferedByteLimit = defaultBufferedByteLimit
+ }
+
+ e.bundler = b
+ e.uploadFn = e.uploadSpans
+ return e
+}
+
+// ExportSpan exports a SpanData to Stackdriver Trace.
+func (e *traceExporter) ExportSpan(s *trace.SpanData) {
+ protoSpan := protoFromSpanData(s, e.projectID, e.o.Resource)
+ protoSize := proto.Size(protoSpan)
+ err := e.bundler.Add(protoSpan, protoSize)
+ switch err {
+ case nil:
+ return
+ case bundler.ErrOversizedItem:
+ case bundler.ErrOverflow:
+ e.overflowLogger.log()
+ default:
+ e.o.handleError(err)
+ }
+}
+
+// Flush waits for exported trace spans to be uploaded.
+//
+// This is useful if your program is ending and you do not want to lose recent
+// spans.
+func (e *traceExporter) Flush() {
+ e.bundler.Flush()
+}
+
+// uploadSpans uploads a set of spans to Stackdriver.
+func (e *traceExporter) uploadSpans(spans []*tracepb.Span) {
+ req := tracepb.BatchWriteSpansRequest{
+ Name: "projects/" + e.projectID,
+ Spans: spans,
+ }
+ // Create a never-sampled span to prevent traces associated with exporter.
+ ctx, cancel := e.o.newContextWithTimeout()
+ defer cancel()
+ ctx, span := trace.StartSpan(
+ ctx,
+ "contrib.go.opencensus.io/exporter/stackdriver.uploadSpans",
+ trace.WithSampler(trace.NeverSample()),
+ )
+ defer span.End()
+ span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans))))
+
+ err := e.client.BatchWriteSpans(ctx, &req)
+ if err != nil {
+ span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
+ e.o.handleError(err)
+ }
+}
+
+// overflowLogger ensures that at most one overflow error log message is
+// written every 5 seconds.
+type overflowLogger struct {
+ mu sync.Mutex
+ pause bool
+ accum int
+}
+
+func (o *overflowLogger) delay() {
+ o.pause = true
+ time.AfterFunc(5*time.Second, func() {
+ o.mu.Lock()
+ defer o.mu.Unlock()
+ switch {
+ case o.accum == 0:
+ o.pause = false
+ case o.accum == 1:
+ log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full")
+ o.accum = 0
+ o.delay()
+ default:
+ log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum)
+ o.accum = 0
+ o.delay()
+ }
+ })
+}
+
+func (o *overflowLogger) log() {
+ o.mu.Lock()
+ defer o.mu.Unlock()
+ if !o.pause {
+ log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full")
+ o.delay()
+ } else {
+ o.accum++
+ }
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go
new file mode 100644
index 00000000000..2d259cf3c39
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go
@@ -0,0 +1,277 @@
+// Copyright 2017, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver
+
+import (
+ "fmt"
+ "math"
+ "time"
+ "unicode/utf8"
+
+ timestamppb "github.com/golang/protobuf/ptypes/timestamp"
+ wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
+ "go.opencensus.io/plugin/ochttp"
+ "go.opencensus.io/trace"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2"
+ statuspb "google.golang.org/genproto/googleapis/rpc/status"
+)
+
+const (
+ maxAnnotationEventsPerSpan = 32
+ maxMessageEventsPerSpan = 128
+ maxAttributeStringValue = 256
+ agentLabel = "g.co/agent"
+
+ labelHTTPHost = `/http/host`
+ labelHTTPMethod = `/http/method`
+ labelHTTPStatusCode = `/http/status_code`
+ labelHTTPPath = `/http/path`
+ labelHTTPUserAgent = `/http/user_agent`
+)
+
+// proto returns a protocol buffer representation of a SpanData.
+func protoFromSpanData(s *trace.SpanData, projectID string, mr *monitoredrespb.MonitoredResource) *tracepb.Span {
+ if s == nil {
+ return nil
+ }
+
+ traceIDString := s.SpanContext.TraceID.String()
+ spanIDString := s.SpanContext.SpanID.String()
+
+ name := s.Name
+ switch s.SpanKind {
+ case trace.SpanKindClient:
+ name = "Sent." + name
+ case trace.SpanKindServer:
+ name = "Recv." + name
+ }
+
+ sp := &tracepb.Span{
+ Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString,
+ SpanId: spanIDString,
+ DisplayName: trunc(name, 128),
+ StartTime: timestampProto(s.StartTime),
+ EndTime: timestampProto(s.EndTime),
+ SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent},
+ }
+ if p := s.ParentSpanID; p != (trace.SpanID{}) {
+ sp.ParentSpanId = p.String()
+ }
+ if s.Status.Code != 0 || s.Status.Message != "" {
+ sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message}
+ }
+
+ var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int
+ copyAttributes(&sp.Attributes, s.Attributes)
+
+ // Copy MonitoredResources as span Attributes
+ sp.Attributes = copyMonitoredResourceAttributes(sp.Attributes, mr)
+
+ as := s.Annotations
+ for i, a := range as {
+ if annotations >= maxAnnotationEventsPerSpan {
+ droppedAnnotationsCount = len(as) - i
+ break
+ }
+ annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)}
+ copyAttributes(&annotation.Attributes, a.Attributes)
+ event := &tracepb.Span_TimeEvent{
+ Time: timestampProto(a.Time),
+ Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation},
+ }
+ annotations++
+ if sp.TimeEvents == nil {
+ sp.TimeEvents = &tracepb.Span_TimeEvents{}
+ }
+ sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event)
+ }
+
+ if sp.Attributes == nil {
+ sp.Attributes = &tracepb.Span_Attributes{
+ AttributeMap: make(map[string]*tracepb.AttributeValue),
+ }
+ }
+ sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_StringValue{
+ StringValue: trunc(userAgent, maxAttributeStringValue),
+ },
+ }
+
+ es := s.MessageEvents
+ for i, e := range es {
+ if messageEvents >= maxMessageEventsPerSpan {
+ droppedMessageEventsCount = len(es) - i
+ break
+ }
+ messageEvents++
+ if sp.TimeEvents == nil {
+ sp.TimeEvents = &tracepb.Span_TimeEvents{}
+ }
+ sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{
+ Time: timestampProto(e.Time),
+ Value: &tracepb.Span_TimeEvent_MessageEvent_{
+ MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
+ Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
+ Id: e.MessageID,
+ UncompressedSizeBytes: e.UncompressedByteSize,
+ CompressedSizeBytes: e.CompressedByteSize,
+ },
+ },
+ })
+ }
+
+ if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 {
+ if sp.TimeEvents == nil {
+ sp.TimeEvents = &tracepb.Span_TimeEvents{}
+ }
+ sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
+ sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
+ }
+
+ if len(s.Links) > 0 {
+ sp.Links = &tracepb.Span_Links{}
+ sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links))
+ for _, l := range s.Links {
+ link := &tracepb.Span_Link{
+ TraceId: l.TraceID.String(),
+ SpanId: l.SpanID.String(),
+ Type: tracepb.Span_Link_Type(l.Type),
+ }
+ copyAttributes(&link.Attributes, l.Attributes)
+ sp.Links.Link = append(sp.Links.Link, link)
+ }
+ }
+ return sp
+}
+
+// timestampProto creates a timestamp proto for a time.Time.
+func timestampProto(t time.Time) *timestamppb.Timestamp {
+ return ×tamppb.Timestamp{
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
+ }
+}
+
+// copyMonitoredResourceAttributes copies proto monitoredResource to proto map field (Span_Attributes)
+// it creates the map if it is nil.
+func copyMonitoredResourceAttributes(out *tracepb.Span_Attributes, mr *monitoredrespb.MonitoredResource) *tracepb.Span_Attributes {
+ if mr == nil {
+ return out
+ }
+ if out == nil {
+ out = &tracepb.Span_Attributes{}
+ }
+ if out.AttributeMap == nil {
+ out.AttributeMap = make(map[string]*tracepb.AttributeValue)
+ }
+ for k, v := range mr.Labels {
+ av := attributeValue(v)
+ out.AttributeMap[fmt.Sprintf("g.co/r/%s/%s", mr.Type, k)] = av
+ }
+ return out
+}
+
+// copyAttributes copies a map of attributes to a proto map field.
+// It creates the map if it is nil.
+func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) {
+ if len(in) == 0 {
+ return
+ }
+ if *out == nil {
+ *out = &tracepb.Span_Attributes{}
+ }
+ if (*out).AttributeMap == nil {
+ (*out).AttributeMap = make(map[string]*tracepb.AttributeValue)
+ }
+ var dropped int32
+ for key, value := range in {
+ av := attributeValue(value)
+ if av == nil {
+ continue
+ }
+ switch key {
+ case ochttp.PathAttribute:
+ (*out).AttributeMap[labelHTTPPath] = av
+ case ochttp.HostAttribute:
+ (*out).AttributeMap[labelHTTPHost] = av
+ case ochttp.MethodAttribute:
+ (*out).AttributeMap[labelHTTPMethod] = av
+ case ochttp.UserAgentAttribute:
+ (*out).AttributeMap[labelHTTPUserAgent] = av
+ case ochttp.StatusCodeAttribute:
+ (*out).AttributeMap[labelHTTPStatusCode] = av
+ default:
+ if len(key) > 128 {
+ dropped++
+ continue
+ }
+ (*out).AttributeMap[key] = av
+ }
+ }
+ (*out).DroppedAttributesCount = dropped
+}
+
+func attributeValue(v interface{}) *tracepb.AttributeValue {
+ switch value := v.(type) {
+ case bool:
+ return &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_BoolValue{BoolValue: value},
+ }
+ case int64:
+ return &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_IntValue{IntValue: value},
+ }
+ case string:
+ return &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)},
+ }
+ }
+ return nil
+}
+
+// trunc returns a TruncatableString truncated to the given limit.
+func trunc(s string, limit int) *tracepb.TruncatableString {
+ if len(s) > limit {
+ b := []byte(s[:limit])
+ for {
+ r, size := utf8.DecodeLastRune(b)
+ if r == utf8.RuneError && size == 1 {
+ b = b[:len(b)-1]
+ } else {
+ break
+ }
+ }
+ return &tracepb.TruncatableString{
+ Value: string(b),
+ TruncatedByteCount: clip32(len(s) - len(b)),
+ }
+ }
+ return &tracepb.TruncatableString{
+ Value: s,
+ TruncatedByteCount: 0,
+ }
+}
+
+// clip32 clips an int to the range of an int32.
+func clip32(x int) int32 {
+ if x < math.MinInt32 {
+ return math.MinInt32
+ }
+ if x > math.MaxInt32 {
+ return math.MaxInt32
+ }
+ return int32(x)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
new file mode 100644
index 00000000000..899129ecc46
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100644
index 00000000000..56fdfc2bfc7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -0,0 +1,145 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+// // Prints out full error message, including original error if there was one.
+// log.Println("Error:", awsErr.Error())
+//
+// // Get original error
+// if origErr := awsErr.OrigErr(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+ // Satisfy the base Error interface.
+ Error
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ var errs []error
+ if origErr != nil {
+ errs = append(errs, origErr)
+ }
+ return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+ return newBaseError(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Println("Error:", err.Error())
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a new request error wrapper for the given Error
+// provided.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100644
index 00000000000..0202a008f5d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -0,0 +1,194 @@
+package awserr
+
+import "fmt"
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ errs: origErrs,
+ }
+
+ return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ size := len(b.errs)
+ if size > 0 {
+ return SprintError(b.code, b.message, "", errorList(b.errs))
+ }
+
+ return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+ switch len(b.errs) {
+ case 0:
+ return nil
+ case 1:
+ return b.errs[0]
+ default:
+ if err, ok := b.errs[0].(Error); ok {
+ return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+ }
+ return NewBatchError("BatchedErrors",
+ "multiple errors occurred", b.errs)
+ }
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+ return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+}
+
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+ if b, ok := r.awsError.(BatchedErrors); ok {
+ return b.OrigErrs()
+ }
+ return []error{r.OrigErr()}
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ // How do we want to handle the array size being zero
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += fmt.Sprintf("%s", e[i].Error())
+ // We check the next index to see if it is within the slice.
+ // If it is, then we append a newline. We do this, because unit tests
+ // could be broken with the additional '\n'
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100644
index 00000000000..1a3d106d5c1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,108 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+ "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ if _, ok := src.Interface().(*time.Time); !ok {
+ dst.Set(reflect.New(e))
+ } else {
+ tempValue := reflect.New(e)
+ tempValue.Elem().Set(src.Elem())
+ // Sets time.Time's unexported values
+ dst.Set(tempValue)
+ }
+ }
+ if src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
new file mode 100644
index 00000000000..142a7a01c52
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type they are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100644
index 00000000000..11c52c38968
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,222 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, valItem := range values {
+ value := reflect.Indirect(valItem)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ dstVal.Set(srcVal)
+ }
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
new file mode 100644
index 00000000000..710eb432f85
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -0,0 +1,113 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, " len %d", v.Len())
+ break
+ }
+
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "")
+ return
+ }
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100644
index 00000000000..645df2450fc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,88 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ for i := 0; i < v.Type().NumField(); i++ {
+ ft := v.Type().Field(i)
+ fv := v.Field(i)
+
+ if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
+ continue // ignore unset fields
+ }
+
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(ft.Name + ": ")
+
+ if tag := ft.Tag.Get("sensitive"); tag == "true" {
+ buf.WriteString("")
+ } else {
+ stringValue(fv, indent+2, buf)
+ }
+
+ buf.WriteString(",\n")
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100644
index 00000000000..70960538409
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -0,0 +1,96 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+ Config *aws.Config
+ Handlers request.Handlers
+ Endpoint string
+ SigningRegion string
+ SigningName string
+
+ // States that the signing name did not come from a modeled source but
+ // was derived based on other data. Used by service client constructors
+ // to determine if the signin name can be overridden based on metadata the
+ // service has.
+ SigningNameDerived bool
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+ ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
+// resolve the endpoint automatically. The service client's endpoint must be
+// provided via the aws.Config.Endpoint field.
+type ConfigNoResolveEndpointProvider interface {
+ ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ request.Retryer
+ metadata.ClientInfo
+
+ Config aws.Config
+ Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+ svc := &Client{
+ Config: cfg,
+ ClientInfo: info,
+ Handlers: handlers.Copy(),
+ }
+
+ switch retryer, ok := cfg.Retryer.(request.Retryer); {
+ case ok:
+ svc.Retryer = retryer
+ case cfg.Retryer != nil && cfg.Logger != nil:
+ s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+ cfg.Logger.Log(s)
+ fallthrough
+ default:
+ maxRetries := aws.IntValue(cfg.MaxRetries)
+ if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = 3
+ }
+ svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+ }
+
+ svc.AddDebugHandlers()
+
+ for _, option := range options {
+ option(svc)
+ }
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+ return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
+ c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
+ c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
new file mode 100644
index 00000000000..a397b0d044c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -0,0 +1,116 @@
+package client
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkrand"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, implement the
+// request.Retryer interface or create a structure type that composes this
+// struct and override the specific methods. For example, to override only
+// the MaxRetries method:
+//
+// type retryer struct {
+// client.DefaultRetryer
+// }
+//
+// // This implementation always has 100 max retries
+// func (d retryer) MaxRetries() int { return 100 }
+type DefaultRetryer struct {
+ NumMaxRetries int
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
+}
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+ // Set the upper limit of delay in retrying at ~five minutes
+ minTime := 30
+ throttle := d.shouldThrottle(r)
+ if throttle {
+ if delay, ok := getRetryDelay(r); ok {
+ return delay
+ }
+
+ minTime = 500
+ }
+
+ retryCount := r.RetryCount
+ if throttle && retryCount > 8 {
+ retryCount = 8
+ } else if retryCount > 13 {
+ retryCount = 13
+ }
+
+ delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
+ return time.Duration(delay) * time.Millisecond
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable != nil {
+ return *r.Retryable
+ }
+
+ if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
+ return true
+ }
+ return r.IsErrorRetryable() || d.shouldThrottle(r)
+}
+
+// ShouldThrottle returns true if the request should be throttled.
+func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 502:
+ case 503:
+ case 504:
+ default:
+ return r.IsErrorThrottle()
+ }
+
+ return true
+}
+
+// This will look in the Retry-After header, RFC 7231, for how long
+// it will wait before attempting another request
+func getRetryDelay(r *request.Request) (time.Duration, bool) {
+ if !canUseRetryAfterHeader(r) {
+ return 0, false
+ }
+
+ delayStr := r.HTTPResponse.Header.Get("Retry-After")
+ if len(delayStr) == 0 {
+ return 0, false
+ }
+
+ delay, err := strconv.Atoi(delayStr)
+ if err != nil {
+ return 0, false
+ }
+
+ return time.Duration(delay) * time.Second, true
+}
+
+// Will look at the status code to see if the retry header pertains to
+// the status code.
+func canUseRetryAfterHeader(r *request.Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 503:
+ default:
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
new file mode 100644
index 00000000000..7b5e1276acf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
@@ -0,0 +1,190 @@
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http/httputil"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
+---[ REQUEST DUMP ERROR ]-----------------------------
+%s
+------------------------------------------------------`
+
+type logWriter struct {
+ // Logger is what we will use to log the payload of a response.
+ Logger aws.Logger
+ // buf stores the contents of what has been read
+ buf *bytes.Buffer
+}
+
+func (logger *logWriter) Write(b []byte) (int, error) {
+ return logger.buf.Write(b)
+}
+
+type teeReaderCloser struct {
+ // io.Reader will be a tee reader that is used during logging.
+ // This structure will read from a body and write the contents to a logger.
+ io.Reader
+ // Source is used just to close when we are done reading.
+ Source io.ReadCloser
+}
+
+func (reader *teeReaderCloser) Close() error {
+ return reader.Source.Close()
+}
+
+// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will include the HTTP request body if the LogLevel of the
+// request matches LogDebugWithHTTPBody.
+var LogHTTPRequestHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequest",
+ Fn: logRequest,
+}
+
+func logRequest(r *request.Request) {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ bodySeekable := aws.IsReaderSeekable(r.Body)
+
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ if logBody {
+ if !bodySeekable {
+ r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
+ }
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+ // Body as a NoOpCloser and will not be reset after read by the HTTP
+ // client reader.
+ r.ResetBody()
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will only log the HTTP request's headers. The request payload
+// will not be read.
+var LogHTTPRequestHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequestHeader",
+ Fn: logRequestHeader,
+}
+
+func logRequestHeader(r *request.Request) {
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
+---[ RESPONSE DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
+// received from a service. Will include the HTTP response body if the LogLevel
+// of the request matches LogDebugWithHTTPBody.
+var LogHTTPResponseHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponse",
+ Fn: logResponse,
+}
+
+func logResponse(r *request.Request) {
+ lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
+
+ if r.HTTPResponse == nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
+ return
+ }
+
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ if logBody {
+ r.HTTPResponse.Body = &teeReaderCloser{
+ Reader: io.TeeReader(r.HTTPResponse.Body, lw),
+ Source: r.HTTPResponse.Body,
+ }
+ }
+
+ handlerFn := func(req *request.Request) {
+ b, err := httputil.DumpResponse(req.HTTPResponse, false)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(fmt.Sprintf(logRespMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
+
+ if logBody {
+ b, err := ioutil.ReadAll(lw.buf)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(string(b))
+ }
+ }
+
+ const handlerName = "awsdk.client.LogResponse.ResponseBody"
+
+ r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+ r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+}
+
+// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
+// response received from a service. Will only log the HTTP response's headers.
+// The response payload will not be read.
+var LogHTTPResponseHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponseHeader",
+ Fn: logResponseHeader,
+}
+
+func logResponseHeader(r *request.Request) {
+ if r.Config.Logger == nil {
+ return
+ }
+
+ b, err := httputil.DumpResponse(r.HTTPResponse, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100644
index 00000000000..920e9fddf87
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,13 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+ ServiceName string
+ ServiceID string
+ APIVersion string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100644
index 00000000000..10634d173d3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -0,0 +1,536 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+)
+
+// UseServiceDefaultRetries instructs the config to use the service's own
+// default number of retries. This will be the default action if
+// Config.MaxRetries is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer
+// interface.
+type RequestRetryer interface{}
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the defaults.DefaultConfig structure.
+//
+// // Create Session with MaxRetry configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(&aws.Config{
+// MaxRetries: aws.Int(3),
+// }))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"),
+// })
+type Config struct {
+ // Enables verbose error printing of all credential chain errors.
+ // Should be used when wanting to see all errors while attempting to
+ // retrieve credentials.
+ CredentialsChainVerboseErrors *bool
+
+ // The credentials object to use when signing requests. Defaults to a
+ // chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials *credentials.Credentials
+
+ // An optional endpoint URL (hostname only or fully qualified URI)
+ // that overrides the default generated endpoint for a client. Set this
+ // to `""` to use the default generated endpoint.
+ //
+ // Note: You must still provide a `Region` value when specifying an
+ // endpoint for a client.
+ Endpoint *string
+
+ // The resolver to use for looking up endpoints for AWS service clients
+ // to use based on region.
+ EndpointResolver endpoints.Resolver
+
+ // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+ // ShouldRetry regardless of whether or not if request.Retryable is set.
+ // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
+ // is not set, then ShouldRetry will only be called if request.Retryable is nil.
+ // Proper handling of the request.Retryable field is important when setting this field.
+ EnforceShouldRetryCheck *bool
+
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
+ // Regions and Endpoints.
+ Region *string
+
+ // Set this to `true` to disable SSL when sending requests. Defaults
+ // to `false`.
+ DisableSSL *bool
+
+ // The HTTP client to use when sending requests. Defaults to
+ // `http.DefaultClient`.
+ HTTPClient *http.Client
+
+ // An integer value representing the logging level. The default log level
+ // is zero (LogOff), which represents no logging. To enable logging set
+ // to a LogLevel Value.
+ LogLevel *LogLevelType
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard out.
+ Logger Logger
+
+ // The maximum number of times that a request will be retried for failures.
+ // Defaults to -1, which defers the max retry setting to the service
+ // specific configuration.
+ MaxRetries *int
+
+ // Retryer guides how HTTP requests should be retried in case of
+ // recoverable failures.
+ //
+ // When nil or the value does not implement the request.Retryer interface,
+ // the client.DefaultRetryer will be used.
+ //
+ // When both Retryer and MaxRetries are non-nil, the former is used and
+ // the latter ignored.
+ //
+ // To set the Retryer field in a type-safe manner and with chaining, use
+ // the request.WithRetryer helper function:
+ //
+ // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+ //
+ Retryer RequestRetryer
+
+ // Disables semantic parameter validation, which validates input for
+ // missing required fields and/or other semantic request input errors.
+ DisableParamValidation *bool
+
+ // Disables the computation of request and response checksums, e.g.,
+ // CRC32 checksums in Amazon DynamoDB.
+ DisableComputeChecksums *bool
+
+ // Set this to `true` to force the request to use path-style addressing,
+ // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
+ // will use virtual hosted bucket addressing when possible
+ // (`http://BUCKET.s3.amazonaws.com/KEY`).
+ //
+ // Note: This configuration option is specific to the Amazon S3 service.
+ //
+ // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+ // for Amazon S3: Virtual Hosting of Buckets
+ S3ForcePathStyle *bool
+
+ // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
+ // header to PUT requests over 2MB of content. 100-Continue instructs the
+ // HTTP client not to send the body until the service responds with a
+ // `continue` status. This is useful to prevent sending the request body
+ // until after the request is authenticated, and validated.
+ //
+ // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+ //
+ // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
+ // `ExpectContinueTimeout` for information on adjusting the continue wait
+ // timeout. https://golang.org/pkg/net/http/#Transport
+ //
+ // You should use this flag to disble 100-Continue if you experience issues
+ // with proxies or third party S3 compatible services.
+ S3Disable100Continue *bool
+
+ // Set this to `true` to enable S3 Accelerate feature. For all operations
+ // compatible with S3 Accelerate will use the accelerate endpoint for
+ // requests. Requests not compatible will fall back to normal S3 requests.
+ //
+ // The bucket must be enable for accelerate to be used with S3 client with
+ // accelerate enabled. If the bucket is not enabled for accelerate an error
+ // will be returned. The bucket name must be DNS compatible to also work
+ // with accelerate.
+ S3UseAccelerate *bool
+
+ // S3DisableContentMD5Validation config option is temporarily disabled,
+ // For S3 GetObject API calls, #1837.
+ //
+ // Set this to `true` to disable the S3 service client from automatically
+ // adding the ContentMD5 to S3 Object Put and Upload API calls. This option
+ // will also disable the SDK from performing object ContentMD5 validation
+ // on GetObject API calls.
+ S3DisableContentMD5Validation *bool
+
+ // Set this to `true` to disable the EC2Metadata client from overriding the
+ // default http.Client's Timeout. This is helpful if you do not want the
+ // EC2Metadata client to create a new http.Client. This options is only
+ // meaningful if you're not already using a custom HTTP client with the
+ // SDK. Enabled by default.
+ //
+ // Must be set and provided to the session.NewSession() in order to disable
+ // the EC2Metadata overriding the timeout for default credentials chain.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(aws.NewConfig()
+ // .WithEC2MetadataDiableTimeoutOverride(true)))
+ //
+ // svc := s3.New(sess)
+ //
+ EC2MetadataDisableTimeoutOverride *bool
+
+ // Instructs the endpoint to be generated for a service client to
+ // be the dual stack endpoint. The dual stack endpoint will support
+ // both IPv4 and IPv6 addressing.
+ //
+ // Setting this for a service which does not support dual stack will fail
+ // to make requets. It is not recommended to set this value on the session
+ // as it will apply to all service clients created with the session. Even
+ // services which don't support dual stack endpoints.
+ //
+ // If the Endpoint config value is also provided the UseDualStack flag
+ // will be ignored.
+ //
+ // Only supported with.
+ //
+ // sess := session.Must(session.NewSession())
+ //
+ // svc := s3.New(sess, &aws.Config{
+ // UseDualStack: aws.Bool(true),
+ // })
+ UseDualStack *bool
+
+ // SleepDelay is an override for the func the SDK will call when sleeping
+ // during the lifecycle of a request. Specifically this will be used for
+ // request delays. This value should only be used for testing. To adjust
+ // the delay of a request see the aws/client.DefaultRetryer and
+ // aws/request.Retryer.
+ //
+ // SleepDelay will prevent any Context from being used for canceling retry
+ // delay of an API operation. It is recommended to not use SleepDelay at all
+ // and specify a Retryer instead.
+ SleepDelay func(time.Duration)
+
+ // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
+ // Will default to false. This would only be used for empty directory names in s3 requests.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // DisableRestProtocolURICleaning: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("//foo//bar//moo"),
+ // })
+ DisableRestProtocolURICleaning *bool
+
+ // EnableEndpointDiscovery will allow for endpoint discovery on operations that
+ // have the definition in its model. By default, endpoint discovery is off.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // EnableEndpointDiscovery: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("/foo/bar/moo"),
+ // })
+ EnableEndpointDiscovery *bool
+
+ // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
+ // request endpoint hosts with modeled information.
+ //
+ // Disabling this feature is useful when you want to use local endpoints
+ // for testing that do not support the modeled host prefix pattern.
+ DisableEndpointHostPrefix *bool
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+//
+// // Create Session with MaxRetry configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(aws.NewConfig().
+// WithMaxRetries(3),
+// ))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, aws.NewConfig().
+// WithRegion("us-west-2"),
+// )
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+ c.CredentialsChainVerboseErrors = &verboseErrs
+ return c
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+ c.Credentials = creds
+ return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+ c.Endpoint = &endpoint
+ return c
+}
+
+// WithEndpointResolver sets a config EndpointResolver value returning a
+// Config pointer for chaining.
+func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
+ c.EndpointResolver = resolver
+ return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+ c.Region = ®ion
+ return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+ c.DisableSSL = &disable
+ return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+ c.HTTPClient = client
+ return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+ c.MaxRetries = &max
+ return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+ c.DisableParamValidation = &disable
+ return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+ c.DisableComputeChecksums = &disable
+ return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+ c.LogLevel = &level
+ return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+ c.Logger = logger
+ return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+ c.S3ForcePathStyle = &force
+ return c
+}
+
+// WithS3Disable100Continue sets a config S3Disable100Continue value returning
+// a Config pointer for chaining.
+func (c *Config) WithS3Disable100Continue(disable bool) *Config {
+ c.S3Disable100Continue = &disable
+ return c
+}
+
+// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3UseAccelerate(enable bool) *Config {
+ c.S3UseAccelerate = &enable
+ return c
+
+}
+
+// WithS3DisableContentMD5Validation sets a config
+// S3DisableContentMD5Validation value returning a Config pointer for chaining.
+func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
+ c.S3DisableContentMD5Validation = &enable
+ return c
+
+}
+
+// WithUseDualStack sets a config UseDualStack value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseDualStack(enable bool) *Config {
+ c.UseDualStack = &enable
+ return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+ c.EC2MetadataDisableTimeoutOverride = &enable
+ return c
+}
+
+// WithSleepDelay overrides the function used to sleep while waiting for the
+// next retry. Defaults to time.Sleep.
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
+ c.SleepDelay = fn
+ return c
+}
+
+// WithEndpointDiscovery will set whether or not to use endpoint discovery.
+func (c *Config) WithEndpointDiscovery(t bool) *Config {
+ c.EnableEndpointDiscovery = &t
+ return c
+}
+
+// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
+// when making requests.
+func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
+ c.DisableEndpointHostPrefix = &t
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
+func mergeInConfig(dst *Config, other *Config) {
+ if other == nil {
+ return
+ }
+
+ if other.CredentialsChainVerboseErrors != nil {
+ dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+ }
+
+ if other.Credentials != nil {
+ dst.Credentials = other.Credentials
+ }
+
+ if other.Endpoint != nil {
+ dst.Endpoint = other.Endpoint
+ }
+
+ if other.EndpointResolver != nil {
+ dst.EndpointResolver = other.EndpointResolver
+ }
+
+ if other.Region != nil {
+ dst.Region = other.Region
+ }
+
+ if other.DisableSSL != nil {
+ dst.DisableSSL = other.DisableSSL
+ }
+
+ if other.HTTPClient != nil {
+ dst.HTTPClient = other.HTTPClient
+ }
+
+ if other.LogLevel != nil {
+ dst.LogLevel = other.LogLevel
+ }
+
+ if other.Logger != nil {
+ dst.Logger = other.Logger
+ }
+
+ if other.MaxRetries != nil {
+ dst.MaxRetries = other.MaxRetries
+ }
+
+ if other.Retryer != nil {
+ dst.Retryer = other.Retryer
+ }
+
+ if other.DisableParamValidation != nil {
+ dst.DisableParamValidation = other.DisableParamValidation
+ }
+
+ if other.DisableComputeChecksums != nil {
+ dst.DisableComputeChecksums = other.DisableComputeChecksums
+ }
+
+ if other.S3ForcePathStyle != nil {
+ dst.S3ForcePathStyle = other.S3ForcePathStyle
+ }
+
+ if other.S3Disable100Continue != nil {
+ dst.S3Disable100Continue = other.S3Disable100Continue
+ }
+
+ if other.S3UseAccelerate != nil {
+ dst.S3UseAccelerate = other.S3UseAccelerate
+ }
+
+ if other.S3DisableContentMD5Validation != nil {
+ dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
+ }
+
+ if other.UseDualStack != nil {
+ dst.UseDualStack = other.UseDualStack
+ }
+
+ if other.EC2MetadataDisableTimeoutOverride != nil {
+ dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+ }
+
+ if other.SleepDelay != nil {
+ dst.SleepDelay = other.SleepDelay
+ }
+
+ if other.DisableRestProtocolURICleaning != nil {
+ dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
+ }
+
+ if other.EnforceShouldRetryCheck != nil {
+ dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
+ }
+
+ if other.EnableEndpointDiscovery != nil {
+ dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
+ }
+
+ if other.DisableEndpointHostPrefix != nil {
+ dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
+ }
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+ dst := &Config{}
+ dst.MergeIn(c)
+
+ for _, cfg := range cfgs {
+ dst.MergeIn(cfg)
+ }
+
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
new file mode 100644
index 00000000000..2866f9a7fb9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
@@ -0,0 +1,37 @@
+// +build !go1.9
+
+package aws
+
+import "time"
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ Value(key interface{}) interface{}
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
new file mode 100644
index 00000000000..3718b26e101
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
@@ -0,0 +1,11 @@
+// +build go1.9
+
+package aws
+
+import "context"
+
+// Context is an alias of the Go stdlib's context.Context interface.
+// It can be used within the SDK's API operation "WithContext" methods.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context = context.Context
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
new file mode 100644
index 00000000000..66c5945db15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
@@ -0,0 +1,56 @@
+// +build !go1.7
+
+package aws
+
+import "time"
+
+// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
+// provide a 1.6 and 1.5 safe version of context that is compatible with Go
+// 1.7's Context.
+//
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case backgroundCtx:
+ return "aws.BackgroundContext"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ backgroundCtx = new(emptyCtx)
+)
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return backgroundCtx
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
new file mode 100644
index 00000000000..9c29f29af17
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
@@ -0,0 +1,20 @@
+// +build go1.7
+
+package aws
+
+import "context"
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return context.Background()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
new file mode 100644
index 00000000000..304fd156120
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
@@ -0,0 +1,24 @@
+package aws
+
+import (
+ "time"
+)
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// Expects Context to always return a non-nil error if the Done channel is closed.
+func SleepWithContext(ctx Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
new file mode 100644
index 00000000000..ff5d58e0683
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -0,0 +1,387 @@
+package aws
+
+import "time"
+
+// String returns a pointer to the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// SecondsTimeValue converts an int64 pointer to a time.Time value
+// representing seconds since Epoch or time.Time{} if the pointer is nil.
+func SecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix((*v / 1000), 0)
+ }
+ return time.Time{}
+}
+
+// MillisecondsTimeValue converts an int64 pointer to a time.Time value
+// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
+func MillisecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix(0, (*v * 1000000))
+ }
+ return time.Time{}
+}
+
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
new file mode 100644
index 00000000000..f8853d78af2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -0,0 +1,228 @@
+package corehandlers
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+//
+// The Content-Length will only be added to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+ var length int64
+
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ = strconv.ParseInt(slength, 10, 64)
+ } else {
+ if r.Body != nil {
+ var err error
+ length, err = aws.SeekerLen(r.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
+ return
+ }
+ }
+ }
+
+ if length > 0 {
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+ } else {
+ r.HTTPRequest.ContentLength = 0
+ r.HTTPRequest.Header.Del("Content-Length")
+ }
+}}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// ValidateReqSigHandler is a request handler to ensure that the request's
+// signature doesn't expire before it is sent. This can happen when a request
+// is built and signed significantly before it is sent. Or significant delays
+// occur when retrying requests that would cause the signature to expire.
+var ValidateReqSigHandler = request.NamedHandler{
+ Name: "core.ValidateReqSigHandler",
+ Fn: func(r *request.Request) {
+ // Unsigned requests are not signed
+ if r.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ signedTime := r.Time
+ if !r.LastSignedAt.IsZero() {
+ signedTime = r.LastSignedAt
+ }
+
+ // 5 minutes to allow for some clock skew/delays in transmission.
+ // Would be improved with aws/aws-sdk-go#423
+ if signedTime.Add(5 * time.Minute).After(time.Now()) {
+ return
+ }
+
+ fmt.Println("request expired, resigning")
+ r.Sign()
+ },
+}
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = request.NamedHandler{
+ Name: "core.SendHandler",
+ Fn: func(r *request.Request) {
+ sender := sendFollowRedirects
+ if r.DisableFollowRedirects {
+ sender = sendWithoutFollowRedirects
+ }
+
+ if request.NoBody == r.HTTPRequest.Body {
+ // Strip off the request body if the NoBody reader was used as a
+ // place holder for a request body. This prevents the SDK from
+ // making requests with a request body when it would be invalid
+ // to do so.
+ //
+ // Use a shallow copy of the http.Request to ensure the race condition
+ // of transport on Body will not trigger
+ reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
+ reqCopy.Body = nil
+ r.HTTPRequest = &reqCopy
+ defer func() {
+ r.HTTPRequest = reqOrig
+ }()
+ }
+
+ var err error
+ r.HTTPResponse, err = sender(r)
+ if err != nil {
+ handleSendError(r, err)
+ }
+ },
+}
+
+func sendFollowRedirects(r *request.Request) (*http.Response, error) {
+ return r.Config.HTTPClient.Do(r.HTTPRequest)
+}
+
+func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
+ transport := r.Config.HTTPClient.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ return transport.RoundTrip(r.HTTPRequest)
+}
+
+func handleSendError(r *request.Request, err error) {
+ // Prevent leaking if an HTTPResponse was returned. Clean up
+ // the body.
+ if r.HTTPResponse != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other URL redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all other request errors.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Retryable = aws.Bool(true) // network errors are retryable
+
+ // Override the error with a context canceled error, if that was canceled.
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", ctx.Err())
+ r.Retryable = aws.Bool(false)
+ default:
+ }
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
+ }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
+ r.Retryable = aws.Bool(r.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.RetryRules(r)
+
+ if sleepFn := r.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(r.RetryDelay)
+ } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", err)
+ r.Retryable = aws.Bool(false)
+ return
+ }
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if r.IsErrorExpired() {
+ r.Config.Credentials.Expire()
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+}}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+ if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
+ r.Error = aws.ErrMissingRegion
+ } else if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
new file mode 100644
index 00000000000..7d50b1557cc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
@@ -0,0 +1,17 @@
+package corehandlers
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+ if !r.ParamsFilled() {
+ return
+ }
+
+ if v, ok := r.Params.(request.Validator); ok {
+ if err := v.Validate(); err != nil {
+ r.Error = err
+ }
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
new file mode 100644
index 00000000000..ab69c7a6f38
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
@@ -0,0 +1,37 @@
+package corehandlers
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
+// to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+ Name: "core.SDKVersionUserAgentHandler",
+ Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+ runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+const execEnvVar = `AWS_EXECUTION_ENV`
+const execEnvUAKey = `exec-env`
+
+// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
+// execution environment to the user agent.
+//
+// If the environment variable AWS_EXECUTION_ENV is set, its value will be
+// appended to the user agent string.
+var AddHostExecEnvUserAgentHander = request.NamedHandler{
+ Name: "core.AddHostExecEnvUserAgentHander",
+ Fn: func(r *request.Request) {
+ v := os.Getenv(execEnvVar)
+ if len(v) == 0 {
+ return
+ }
+
+ request.AddToUserAgent(r, execEnvUAKey+"/"+v)
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100644
index 00000000000..3ad1e798df8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,100 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+ // providers in the ChainProvider.
+ //
+ // This has been deprecated. For verbose error messaging set
+ // aws.Config.CredentialsChainVerboseErrors to true.
+ ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+ `no valid providers in chain. Deprecated.
+ For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+ nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// via the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvProvider{},
+// &ec2rolecreds.EC2RoleProvider{
+// Client: ec2metadata.New(sess),
+// },
+// })
+//
+// // Usage of ChainCredentials with aws.Config
+// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: creds,
+// })))
+//
+type ChainProvider struct {
+ Providers []Provider
+ curr Provider
+ VerboseErrors bool
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err == nil {
+ c.curr = p
+ return creds, nil
+ }
+ errs = append(errs, err)
+ }
+ c.curr = nil
+
+ var err error
+ err = ErrNoValidProvidersFoundInChain
+ if c.VerboseErrors {
+ err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+ }
+ return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100644
index 00000000000..894bbc7f82c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,292 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := credentials.NewEnvCredentials()
+//
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := credentials.NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
+
+import (
+ "fmt"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "sync"
+ "time"
+)
+
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// svc := s3.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: credentials.AnonymousCredentials,
+// })))
+// // Access public S3 buckets.
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Provider used to get credentials
+ ProviderName string
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// An Expirer is an interface that Providers can implement to expose the expiration
+// time, if known. If the Provider cannot accurately provide this info,
+// it should not implement this interface.
+type Expirer interface {
+ // The time at which the credentials are no longer valid
+ ExpiresAt() time.Time
+}
+
+// An ErrorProvider is a stub credentials provider that always returns an error
+// this is used by the SDK when construction a known provider is not possible
+// due to an error.
+type ErrorProvider struct {
+ // The error to be returned from Retrieve
+ Err error
+
+ // The provider name to set on the Retrieved returned Value
+ ProviderName string
+}
+
+// Retrieve will always return the error that the ErrorProvider was created with.
+func (p ErrorProvider) Retrieve() (Value, error) {
+ return Value{ProviderName: p.ProviderName}, p.Err
+}
+
+// IsExpired will always return not expired.
+func (p ErrorProvider) IsExpired() bool {
+ return false
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type EC2RoleProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set. Available for testing
+ // to be able to mock out the current time.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ curTime := e.CurrentTime
+ if curTime == nil {
+ curTime = time.Now
+ }
+ return e.expiration.Before(curTime())
+}
+
+// ExpiresAt returns the expiration time of the credential
+func (e *Expiry) ExpiresAt() time.Time {
+ return e.expiration
+}
+
+// A Credentials provides concurrency safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ creds Value
+ forceRefresh bool
+
+ m sync.RWMutex
+
+ provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ // Check the cached credentials first with just the read lock.
+ c.m.RLock()
+ if !c.isExpired() {
+ creds := c.creds
+ c.m.RUnlock()
+ return creds, nil
+ }
+ c.m.RUnlock()
+
+ // Credentials are expired need to retrieve the credentials taking the full
+ // lock.
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
+
+// ExpiresAt provides access to the functionality of the Expirer interface of
+// the underlying Provider, if it supports that interface. Otherwise, it returns
+// an error.
+func (c *Credentials) ExpiresAt() (time.Time, error) {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ expirer, ok := c.provider.(Expirer)
+ if !ok {
+ return time.Time{}, awserr.New("ProviderNotExpirer",
+ fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
+ nil)
+ }
+ if c.forceRefresh {
+ // set expiration time to the distant past
+ return time.Time{}, nil
+ }
+ return expirer.ExpiresAt(), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
new file mode 100644
index 00000000000..0ed791be641
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -0,0 +1,178 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/internal/sdkuri"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+// p := &ec2rolecreds.EC2RoleProvider{
+// // Pass in a custom timeout to be used when requesting
+// // IAM EC2 Role credentials.
+// Client: ec2metadata.New(sess, aws.Config{
+// HTTPClient: &http.Client{Timeout: 10 * time.Second},
+// }),
+//
+// // Do not use early expiry of credentials. If a non zero value is
+// // specified the credentials will be expired early
+// ExpiryWindow: 0,
+// }
+type EC2RoleProvider struct {
+ credentials.Expiry
+
+ // Required EC2Metadata client to use when connecting to EC2 metadata service.
+ Client *ec2metadata.EC2Metadata
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: ec2metadata.New(c),
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: client,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
+ credsList, err := requestCredList(m.Client)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ if len(credsList) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(m.Client, credsName)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "iam/security-credentials/"
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
+ resp, err := client.GetMetadata(iamSecurityCredsPath)
+ if err != nil {
+ return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(strings.NewReader(resp))
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("EC2RoleRequestError",
+ fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("SerializationError",
+ fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
new file mode 100644
index 00000000000..ace51313820
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -0,0 +1,198 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// "Token" : "AQoDY....=",
+// "Expiration" : "2016-02-25T06:03:31Z"
+// }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+// {
+// "code": "ErrorCode",
+// "message": "Helpful error message."
+// }
+package endpointcreds
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+// Provider satisfies the credentials.Provider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+ staticCreds bool
+ credentials.Expiry
+
+ // Requires a AWS Client to make HTTP requests to the endpoint with.
+ // the Endpoint the request will be made to is provided by the aws.Config's
+ // Endpoint value.
+ Client *client.Client
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // Optional authorization token value if set will be used as the value of
+ // the Authorization header of the endpoint credential request.
+ AuthorizationToken string
+}
+
+// NewProviderClient returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
+ p := &Provider{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "CredentialsEndpoint",
+ Endpoint: endpoint,
+ },
+ handlers,
+ ),
+ }
+
+ p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
+ p.Client.Handlers.Validate.Clear()
+ p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
+// from an arbitrary endpoint concurrently. The client will request the
+func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
+ return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *Provider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve() (credentials.Value, error) {
+ resp, err := p.getCredentials()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("CredentialsEndpointError", "failed to load credentials", err)
+ }
+
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ } else {
+ p.staticCreds = true
+ }
+
+ return credentials.Value{
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+type getCredentialsOutput struct {
+ Expiration *time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+}
+
+type errorOutput struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
+ op := &request.Operation{
+ Name: "GetCredentials",
+ HTTPMethod: "GET",
+ }
+
+ out := &getCredentialsOutput{}
+ req := p.Client.NewRequest(op, nil, out)
+ req.HTTPRequest.Header.Set("Accept", "application/json")
+ if authToken := p.AuthorizationToken; len(authToken) != 0 {
+ req.HTTPRequest.Header.Set("Authorization", authToken)
+ }
+
+ return out, req.Send()
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if len(r.ClientInfo.Endpoint) == 0 {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ out := r.Data.(*getCredentialsOutput)
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var errOut errorOutput
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New(errOut.Code, errOut.Message, nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100644
index 00000000000..54c5cf7333f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,74 @@
+package credentials
+
+import (
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
+var (
+ // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+ // found in the process's environment.
+ ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+ // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+ // can't be found in the process's environment.
+ ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+//
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+ retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ if id == "" {
+ return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
+ }
+
+ if secret == "" {
+ return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ ProviderName: EnvProviderName,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
new file mode 100644
index 00000000000..1980c8c140a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
@@ -0,0 +1,425 @@
+/*
+Package processcreds is a credential Provider to retrieve `credential_process`
+credentials.
+
+WARNING: The following describes a method of sourcing credentials from an external
+process. This can potentially be dangerous, so proceed with caution. Other
+credential providers should be preferred if at all possible. If using this
+option, you should make sure that the config file is as locked down as possible
+using security best practices for your operating system.
+
+You can use credentials from a `credential_process` in a variety of ways.
+
+One way is to setup your shared config file, located in the default
+location, with the `credential_process` key and the command you want to be
+called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
+(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
+
+ [default]
+ credential_process = /command/to/call
+
+Creating a new session will use the credential process to retrieve credentials.
+NOTE: If there are credentials in the profile you are using, the credential
+process will not be used.
+
+ // Initialize a session to load credentials.
+ sess, _ := session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1")},
+ )
+
+ // Create S3 service client to use the credentials.
+ svc := s3.New(sess)
+
+Another way to use the `credential_process` method is by using
+`credentials.NewCredentials()` and providing a command to be executed to
+retrieve credentials:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentials("/path/to/command")
+
+ // Create service client value configured for credentials.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+You can set a non-default timeout for the `credential_process` with another
+constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
+set a one minute timeout:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentialsTimeout(
+ "/path/to/command",
+ time.Duration(500) * time.Millisecond)
+
+If you need more control, you can set any configurable options in the
+credentials using one or more option functions. For example, you can set a two
+minute timeout, a credential duration of 60 minutes, and a maximum stdout
+buffer size of 2k.
+
+ creds := processcreds.NewCredentials(
+ "/path/to/command",
+ func(opt *ProcessProvider) {
+ opt.Timeout = time.Duration(2) * time.Minute
+ opt.Duration = time.Duration(60) * time.Minute
+ opt.MaxBufSize = 2048
+ })
+
+You can also use your own `exec.Cmd`:
+
+ // Create an exec.Cmd
+ myCommand := exec.Command("/path/to/command")
+
+ // Create credentials using your exec.Cmd and custom timeout
+ creds := processcreds.NewCredentialsCommand(
+ myCommand,
+ func(opt *processcreds.ProcessProvider) {
+ opt.Timeout = time.Duration(1) * time.Second
+ })
+*/
+package processcreds
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+const (
+ // ProviderName is the name this credentials provider will label any
+ // returned credentials Value with.
+ ProviderName = `ProcessProvider`
+
+ // ErrCodeProcessProviderParse error parsing process output
+ ErrCodeProcessProviderParse = "ProcessProviderParseError"
+
+ // ErrCodeProcessProviderVersion version error in output
+ ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
+
+ // ErrCodeProcessProviderRequired required attribute missing in output
+ ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
+
+ // ErrCodeProcessProviderExecution execution of command failed
+ ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
+
+ // errMsgProcessProviderTimeout process took longer than allowed
+ errMsgProcessProviderTimeout = "credential process timed out"
+
+ // errMsgProcessProviderProcess process error
+ errMsgProcessProviderProcess = "error in credential_process"
+
+ // errMsgProcessProviderParse problem parsing output
+ errMsgProcessProviderParse = "parse failed of credential_process output"
+
+ // errMsgProcessProviderVersion version error in output
+ errMsgProcessProviderVersion = "wrong version in process output (not 1)"
+
+ // errMsgProcessProviderMissKey missing access key id in output
+ errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
+
+ // errMsgProcessProviderMissSecret missing secret acess key in output
+ errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
+
+ // errMsgProcessProviderPrepareCmd prepare of command failed
+ errMsgProcessProviderPrepareCmd = "failed to prepare command"
+
+ // errMsgProcessProviderEmptyCmd command must not be empty
+ errMsgProcessProviderEmptyCmd = "command must not be empty"
+
+ // errMsgProcessProviderPipe failed to initialize pipe
+ errMsgProcessProviderPipe = "failed to initialize pipe"
+
+ // DefaultDuration is the default amount of time in minutes that the
+ // credentials will be valid for.
+ DefaultDuration = time.Duration(15) * time.Minute
+
+ // DefaultBufSize limits buffer size from growing to an enormous
+ // amount due to a faulty process.
+ DefaultBufSize = 1024
+
+ // DefaultTimeout default limit on time a process can run.
+ DefaultTimeout = time.Duration(1) * time.Minute
+)
+
+// ProcessProvider satisfies the credentials.Provider interface, and is a
+// client to retrieve credentials from a process.
+type ProcessProvider struct {
+ staticCreds bool
+ credentials.Expiry
+ originalCommand []string
+
+ // Expiry duration of the credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // A string representing an os command that should return a JSON with
+ // credential information.
+ command *exec.Cmd
+
+ // MaxBufSize limits memory usage from growing to an enormous
+ // amount due to a faulty process.
+ MaxBufSize int
+
+ // Timeout limits the time a process can run.
+ Timeout time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// ProcessProvider. The credentials will expire every 15 minutes by default.
+func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: exec.Command(command),
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsTimeout returns a pointer to a new Credentials object with
+// the specified command and timeout, and default duration and max buffer size.
+func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
+ p := NewCredentials(command, func(opt *ProcessProvider) {
+ opt.Timeout = timeout
+ })
+
+ return p
+}
+
+// NewCredentialsCommand returns a pointer to a new Credentials object with
+// the specified command, and default timeout, duration and max buffer size.
+func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: command,
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+type credentialProcessResponse struct {
+ Version int
+ AccessKeyID string `json:"AccessKeyId"`
+ SecretAccessKey string
+ SessionToken string
+ Expiration *time.Time
+}
+
+// Retrieve executes the 'credential_process' and returns the credentials.
+func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
+ out, err := p.executeCredentialProcess()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // Serialize and validate response
+ resp := &credentialProcessResponse{}
+ if err = json.Unmarshal(out, resp); err != nil {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderParse,
+ fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
+ err)
+ }
+
+ if resp.Version != 1 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderVersion,
+ errMsgProcessProviderVersion,
+ nil)
+ }
+
+ if len(resp.AccessKeyID) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissKey,
+ nil)
+ }
+
+ if len(resp.SecretAccessKey) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissSecret,
+ nil)
+ }
+
+ // Handle expiration
+ p.staticCreds = resp.Expiration == nil
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ }
+
+ return credentials.Value{
+ ProviderName: ProviderName,
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.SessionToken,
+ }, nil
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *ProcessProvider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// prepareCommand prepares the command to be executed.
+func (p *ProcessProvider) prepareCommand() error {
+
+ var cmdArgs []string
+ if runtime.GOOS == "windows" {
+ cmdArgs = []string{"cmd.exe", "/C"}
+ } else {
+ cmdArgs = []string{"sh", "-c"}
+ }
+
+ if len(p.originalCommand) == 0 {
+ p.originalCommand = make([]string, len(p.command.Args))
+ copy(p.originalCommand, p.command.Args)
+
+ // check for empty command because it succeeds
+ if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
+ return awserr.New(
+ ErrCodeProcessProviderExecution,
+ fmt.Sprintf(
+ "%s: %s",
+ errMsgProcessProviderPrepareCmd,
+ errMsgProcessProviderEmptyCmd),
+ nil)
+ }
+ }
+
+ cmdArgs = append(cmdArgs, p.originalCommand...)
+ p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
+ p.command.Env = os.Environ()
+
+ return nil
+}
+
+// executeCredentialProcess starts the credential process on the OS and
+// returns the results or an error.
+func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
+
+ if err := p.prepareCommand(); err != nil {
+ return nil, err
+ }
+
+ // Setup the pipes
+ outReadPipe, outWritePipe, err := os.Pipe()
+ if err != nil {
+ return nil, awserr.New(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderPipe,
+ err)
+ }
+
+ p.command.Stderr = os.Stderr // display stderr on console for MFA
+ p.command.Stdout = outWritePipe // get creds json on process's stdout
+ p.command.Stdin = os.Stdin // enable stdin for MFA
+
+ output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
+
+ stdoutCh := make(chan error, 1)
+ go readInput(
+ io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
+ output,
+ stdoutCh)
+
+ execCh := make(chan error, 1)
+ go executeCommand(*p.command, execCh)
+
+ finished := false
+ var errors []error
+ for !finished {
+ select {
+ case readError := <-stdoutCh:
+ errors = appendError(errors, readError)
+ finished = true
+ case execError := <-execCh:
+ err := outWritePipe.Close()
+ errors = appendError(errors, err)
+ errors = appendError(errors, execError)
+ if errors != nil {
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderProcess,
+ errors)
+ }
+ case <-time.After(p.Timeout):
+ finished = true
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderTimeout,
+ errors) // errors can be nil
+ }
+ }
+
+ out := output.Bytes()
+
+ if runtime.GOOS == "windows" {
+ // windows adds slashes to quotes
+ out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
+ }
+
+ return out, nil
+}
+
+// appendError conveniently checks for nil before appending slice
+func appendError(errors []error, err error) []error {
+ if err != nil {
+ return append(errors, err)
+ }
+ return errors
+}
+
+func executeCommand(cmd exec.Cmd, exec chan error) {
+ // Start the command
+ err := cmd.Start()
+ if err == nil {
+ err = cmd.Wait()
+ }
+
+ exec <- err
+}
+
+func readInput(r io.Reader, w io.Writer, read chan error) {
+ tee := io.TeeReader(r, w)
+
+ _, err := ioutil.ReadAll(tee)
+
+ if err == io.EOF {
+ err = nil
+ }
+
+ read <- err // will only arrive here when write end of pipe is closed
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644
index 00000000000..e1551495812
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,150 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/ini"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
+var (
+ // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+ ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+ return NewCredentials(&SharedCredentialsProvider{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ filename, err := p.filename()
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ creds, err := loadProfile(filename, p.profile())
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ p.retrieved = true
+ return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+ config, err := ini.OpenFile(filename)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+ }
+
+ iniProfile, ok := config.GetSection(profile)
+ if !ok {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
+ }
+
+ id := iniProfile.String("aws_access_key_id")
+ if len(id) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+ nil)
+ }
+
+ secret := iniProfile.String("aws_secret_access_key")
+ if len(secret) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+ nil)
+ }
+
+ // Default to empty string if not found
+ token := iniProfile.String("aws_session_token")
+
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ ProviderName: SharedCredsProviderName,
+ }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+ if len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if home := shareddefaults.UserHomeDir(); len(home) == 0 {
+ // Backwards compatibility of home directly not found error being returned.
+ // This error is too verbose, failure when opening the file would of been
+ // a better error to return.
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = shareddefaults.SharedCredentialsFilename()
+
+ return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile. If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ }
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+
+ return p.Profile
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100644
index 00000000000..531139e3971
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,55 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }})
+}
+
+// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
+// wrapping the static credentials value provide. Same as NewStaticCredentials
+// but takes the creds Value instead of individual fields
+func NewStaticCredentialsFromCreds(creds Value) *Credentials {
+ return NewCredentials(&StaticProvider{Value: creds})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
+ }
+
+ if len(s.Value.ProviderName) == 0 {
+ s.Value.ProviderName = StaticProviderName
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
new file mode 100644
index 00000000000..0d1b5504700
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,299 @@
+/*
+Package stscreds are credential Providers to retrieve STS AWS credentials.
+
+STS provides multiple ways to retrieve credentials which can be used when making
+future AWS service API operation calls.
+
+The SDK will ensure that per instance of credentials.Credentials all requests
+to refresh the credentials will be synchronized. But, the SDK is unable to
+ensure synchronous usage of the AssumeRoleProvider if the value is shared
+between multiple Credentials, Sessions or service clients.
+
+Assume Role
+
+To assume an IAM role using STS with the SDK you can create a new Credentials
+with the SDKs's stscreds package.
+
+ // Initial credentials loaded from SDK's default credential chain. Such as
+ // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+ // Role. These credentials will be used to to make the STS Assume Role API.
+ sess := session.Must(session.NewSession())
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN.
+ creds := stscreds.NewCredentials(sess, "myRoleArn")
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with static MFA Token
+
+To assume an IAM role with a MFA token you can either specify a MFA token code
+directly or provide a function to prompt the user each time the credentials
+need to refresh the role's credentials. Specifying the TokenCode should be used
+for short lived operations that will not need to be refreshed, and when you do
+not want to have direct control over the user provides their MFA token.
+
+With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
+credentials.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN using the MFA token code provided.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenCode = aws.String("00000000")
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with MFA Token Provider
+
+To assume an IAM role with MFA for longer running tasks where the credentials
+may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+will allow the credential provider to prompt for new MFA token code when the
+role's credentials need to be refreshed.
+
+The StdinTokenProvider function is available to prompt on stdin to retrieve
+the MFA token code from the user. You can also implement custom prompts by
+satisfing the TokenProvider function signature.
+
+Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+have undesirable results as the StdinTokenProvider will not be synchronized. A
+single Credentials with an AssumeRoleProvider can be shared safely.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenProvider = stscreds.StdinTokenProvider
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+*/
+package stscreds
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+ var v string
+ fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
+ _, err := fmt.Scanln(&v)
+
+ return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoler represents the minimal subset of the STS client API used by this provider.
+type AssumeRoler interface {
+ AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+ credentials.Expiry
+
+ // STS client to make assume role request with.
+ Client AssumeRoler
+
+ // Role to be assumed.
+ RoleARN string
+
+ // Session name, if you wish to reuse the credentials elsewhere.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // Optional ExternalID to pass along, defaults to nil if not set.
+ ExternalID *string
+
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ SerialNumber *string
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // If SerialNumber is set and neither TokenCode nor TokenProvider are also
+ // set an error will be returned.
+ TokenCode *string
+
+ // Async method of providing MFA token code for assuming an IAM role with MFA.
+ // The value returned by the function will be used as the TokenCode in the Retrieve
+ // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed when SerialNumber is also set and
+ // TokenCode is not set.
+ //
+ // If both TokenCode and TokenProvider is set, TokenProvider will be used and
+ // TokenCode is ignored.
+ TokenProvider func() (string, error)
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes a Config provider to create the STS client. The ConfigProvider is
+// satisfied by the session.Session type.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: sts.New(c),
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes an AssumeRoler which can be satisfied by the STS client.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: svc,
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+
+ // Apply defaults where parameters are not set.
+ if p.RoleSessionName == "" {
+ // Try to work out a role name that will hopefully end up unique.
+ p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
+ }
+ if p.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.Duration = DefaultDuration
+ }
+ input := &sts.AssumeRoleInput{
+ DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
+ RoleArn: aws.String(p.RoleARN),
+ RoleSessionName: aws.String(p.RoleSessionName),
+ ExternalId: p.ExternalID,
+ }
+ if p.Policy != nil {
+ input.Policy = p.Policy
+ }
+ if p.SerialNumber != nil {
+ if p.TokenCode != nil {
+ input.SerialNumber = p.SerialNumber
+ input.TokenCode = p.TokenCode
+ } else if p.TokenProvider != nil {
+ input.SerialNumber = p.SerialNumber
+ code, err := p.TokenProvider()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+ input.TokenCode = aws.String(code)
+ } else {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("AssumeRoleTokenNotAvailable",
+ "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
+ }
+ }
+
+ roleOutput, err := p.Client.AssumeRole(input)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // We will proactively generate new credentials before they expire.
+ p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: *roleOutput.Credentials.AccessKeyId,
+ SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
+ SessionToken: *roleOutput.Credentials.SessionToken,
+ ProviderName: ProviderName,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
new file mode 100644
index 00000000000..152d785b362
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
@@ -0,0 +1,46 @@
+// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
+// via UDP connection. Using the Start function will enable the reporting of
+// metrics on a given port. If Start is called, with different parameters, again,
+// a panic will occur.
+//
+// Pause can be called to pause any metrics publishing on a given port. Sessions
+// that have had their handlers modified via InjectHandlers may still be used.
+// However, the handlers will act as a no-op meaning no metrics will be published.
+//
+// Example:
+// r, err := csm.Start("clientID", ":31000")
+// if err != nil {
+// panic(fmt.Errorf("failed starting CSM: %v", err))
+// }
+//
+// sess, err := session.NewSession(&aws.Config{})
+// if err != nil {
+// panic(fmt.Errorf("failed loading session: %v", err))
+// }
+//
+// r.InjectHandlers(&sess.Handlers)
+//
+// client := s3.New(sess)
+// resp, err := client.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+//
+// // Will pause monitoring
+// r.Pause()
+// resp, err = client.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+//
+// // Resume monitoring
+// r.Continue()
+//
+// Start returns a Reporter that is used to enable or disable monitoring. If
+// access to the Reporter is required later, calling Get will return the Reporter
+// singleton.
+//
+// Example:
+// r := csm.Get()
+// r.Continue()
+package csm
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
new file mode 100644
index 00000000000..2f0c6eac9a8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
@@ -0,0 +1,67 @@
+package csm
+
+import (
+ "fmt"
+ "sync"
+)
+
+var (
+ lock sync.Mutex
+)
+
+// Client side metric handler names
+const (
+ APICallMetricHandlerName = "awscsm.SendAPICallMetric"
+ APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
+)
+
+// Start will start the a long running go routine to capture
+// client side metrics. Calling start multiple time will only
+// start the metric listener once and will panic if a different
+// client ID or port is passed in.
+//
+// Example:
+// r, err := csm.Start("clientID", "127.0.0.1:8094")
+// if err != nil {
+// panic(fmt.Errorf("expected no error, but received %v", err))
+// }
+// sess := session.NewSession()
+// r.InjectHandlers(sess.Handlers)
+//
+// svc := s3.New(sess)
+// out, err := svc.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+func Start(clientID string, url string) (*Reporter, error) {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if sender == nil {
+ sender = newReporter(clientID, url)
+ } else {
+ if sender.clientID != clientID {
+ panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
+ }
+
+ if sender.url != url {
+ panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
+ }
+ }
+
+ if err := connect(url); err != nil {
+ sender = nil
+ return nil, err
+ }
+
+ return sender, nil
+}
+
+// Get will return a reporter if one exists, if one does not exist, nil will
+// be returned.
+func Get() *Reporter {
+ lock.Lock()
+ defer lock.Unlock()
+
+ return sender
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
new file mode 100644
index 00000000000..5bacc791a1e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
@@ -0,0 +1,109 @@
+package csm
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+type metricTime time.Time
+
+func (t metricTime) MarshalJSON() ([]byte, error) {
+ ns := time.Duration(time.Time(t).UnixNano())
+ return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
+}
+
+type metric struct {
+ ClientID *string `json:"ClientId,omitempty"`
+ API *string `json:"Api,omitempty"`
+ Service *string `json:"Service,omitempty"`
+ Timestamp *metricTime `json:"Timestamp,omitempty"`
+ Type *string `json:"Type,omitempty"`
+ Version *int `json:"Version,omitempty"`
+
+ AttemptCount *int `json:"AttemptCount,omitempty"`
+ Latency *int `json:"Latency,omitempty"`
+
+ Fqdn *string `json:"Fqdn,omitempty"`
+ UserAgent *string `json:"UserAgent,omitempty"`
+ AttemptLatency *int `json:"AttemptLatency,omitempty"`
+
+ SessionToken *string `json:"SessionToken,omitempty"`
+ Region *string `json:"Region,omitempty"`
+ AccessKey *string `json:"AccessKey,omitempty"`
+ HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
+ XAmzID2 *string `json:"XAmzId2,omitempty"`
+ XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
+
+ AWSException *string `json:"AwsException,omitempty"`
+ AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
+ SDKException *string `json:"SdkException,omitempty"`
+ SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
+
+ FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
+ FinalAWSException *string `json:"FinalAwsException,omitempty"`
+ FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
+ FinalSDKException *string `json:"FinalSdkException,omitempty"`
+ FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
+
+ DestinationIP *string `json:"DestinationIp,omitempty"`
+ ConnectionReused *int `json:"ConnectionReused,omitempty"`
+
+ AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
+ ConnectLatency *int `json:"ConnectLatency,omitempty"`
+ RequestLatency *int `json:"RequestLatency,omitempty"`
+ DNSLatency *int `json:"DnsLatency,omitempty"`
+ TCPLatency *int `json:"TcpLatency,omitempty"`
+ SSLLatency *int `json:"SslLatency,omitempty"`
+
+ MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
+}
+
+func (m *metric) TruncateFields() {
+ m.ClientID = truncateString(m.ClientID, 255)
+ m.UserAgent = truncateString(m.UserAgent, 256)
+
+ m.AWSException = truncateString(m.AWSException, 128)
+ m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
+
+ m.SDKException = truncateString(m.SDKException, 128)
+ m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
+
+ m.FinalAWSException = truncateString(m.FinalAWSException, 128)
+ m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
+
+ m.FinalSDKException = truncateString(m.FinalSDKException, 128)
+ m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
+}
+
+func truncateString(v *string, l int) *string {
+ if v != nil && len(*v) > l {
+ nv := (*v)[:l]
+ return &nv
+ }
+
+ return v
+}
+
+func (m *metric) SetException(e metricException) {
+ switch te := e.(type) {
+ case awsException:
+ m.AWSException = aws.String(te.exception)
+ m.AWSExceptionMessage = aws.String(te.message)
+ case sdkException:
+ m.SDKException = aws.String(te.exception)
+ m.SDKExceptionMessage = aws.String(te.message)
+ }
+}
+
+func (m *metric) SetFinalException(e metricException) {
+ switch te := e.(type) {
+ case awsException:
+ m.FinalAWSException = aws.String(te.exception)
+ m.FinalAWSExceptionMessage = aws.String(te.message)
+ case sdkException:
+ m.FinalSDKException = aws.String(te.exception)
+ m.FinalSDKExceptionMessage = aws.String(te.message)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
new file mode 100644
index 00000000000..514fc3739a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
@@ -0,0 +1,54 @@
+package csm
+
+import (
+ "sync/atomic"
+)
+
+const (
+ runningEnum = iota
+ pausedEnum
+)
+
+var (
+ // MetricsChannelSize of metrics to hold in the channel
+ MetricsChannelSize = 100
+)
+
+type metricChan struct {
+ ch chan metric
+ paused int64
+}
+
+func newMetricChan(size int) metricChan {
+ return metricChan{
+ ch: make(chan metric, size),
+ }
+}
+
+func (ch *metricChan) Pause() {
+ atomic.StoreInt64(&ch.paused, pausedEnum)
+}
+
+func (ch *metricChan) Continue() {
+ atomic.StoreInt64(&ch.paused, runningEnum)
+}
+
+func (ch *metricChan) IsPaused() bool {
+ v := atomic.LoadInt64(&ch.paused)
+ return v == pausedEnum
+}
+
+// Push will push metrics to the metric channel if the channel
+// is not paused
+func (ch *metricChan) Push(m metric) bool {
+ if ch.IsPaused() {
+ return false
+ }
+
+ select {
+ case ch.ch <- m:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
new file mode 100644
index 00000000000..54a99280ce9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
@@ -0,0 +1,26 @@
+package csm
+
+type metricException interface {
+ Exception() string
+ Message() string
+}
+
+type requestException struct {
+ exception string
+ message string
+}
+
+func (e requestException) Exception() string {
+ return e.exception
+}
+func (e requestException) Message() string {
+ return e.message
+}
+
+type awsException struct {
+ requestException
+}
+
+type sdkException struct {
+ requestException
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
new file mode 100644
index 00000000000..0b5571acfbf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
@@ -0,0 +1,260 @@
+package csm
+
+import (
+ "encoding/json"
+ "net"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const (
+ // DefaultPort is used when no port is specified
+ DefaultPort = "31000"
+)
+
+// Reporter will gather metrics of API requests made and
+// send those metrics to the CSM endpoint.
+type Reporter struct {
+ clientID string
+ url string
+ conn net.Conn
+ metricsCh metricChan
+ done chan struct{}
+}
+
+var (
+ sender *Reporter
+)
+
+func connect(url string) error {
+ const network = "udp"
+ if err := sender.connect(network, url); err != nil {
+ return err
+ }
+
+ if sender.done == nil {
+ sender.done = make(chan struct{})
+ go sender.start()
+ }
+
+ return nil
+}
+
+func newReporter(clientID, url string) *Reporter {
+ return &Reporter{
+ clientID: clientID,
+ url: url,
+ metricsCh: newMetricChan(MetricsChannelSize),
+ }
+}
+
+func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
+ if rep == nil {
+ return
+ }
+
+ now := time.Now()
+ creds, _ := r.Config.Credentials.Get()
+
+ m := metric{
+ ClientID: aws.String(rep.clientID),
+ API: aws.String(r.Operation.Name),
+ Service: aws.String(r.ClientInfo.ServiceID),
+ Timestamp: (*metricTime)(&now),
+ UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
+ Region: r.Config.Region,
+ Type: aws.String("ApiCallAttempt"),
+ Version: aws.Int(1),
+
+ XAmzRequestID: aws.String(r.RequestID),
+
+ AttemptCount: aws.Int(r.RetryCount + 1),
+ AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
+ AccessKey: aws.String(creds.AccessKeyID),
+ }
+
+ if r.HTTPResponse != nil {
+ m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
+ }
+
+ if r.Error != nil {
+ if awserr, ok := r.Error.(awserr.Error); ok {
+ m.SetException(getMetricException(awserr))
+ }
+ }
+
+ m.TruncateFields()
+ rep.metricsCh.Push(m)
+}
+
+func getMetricException(err awserr.Error) metricException {
+ msg := err.Error()
+ code := err.Code()
+
+ switch code {
+ case "RequestError",
+ "SerializationError",
+ request.CanceledErrorCode:
+ return sdkException{
+ requestException{exception: code, message: msg},
+ }
+ default:
+ return awsException{
+ requestException{exception: code, message: msg},
+ }
+ }
+}
+
+func (rep *Reporter) sendAPICallMetric(r *request.Request) {
+ if rep == nil {
+ return
+ }
+
+ now := time.Now()
+ m := metric{
+ ClientID: aws.String(rep.clientID),
+ API: aws.String(r.Operation.Name),
+ Service: aws.String(r.ClientInfo.ServiceID),
+ Timestamp: (*metricTime)(&now),
+ UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
+ Type: aws.String("ApiCall"),
+ AttemptCount: aws.Int(r.RetryCount + 1),
+ Region: r.Config.Region,
+ Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
+ XAmzRequestID: aws.String(r.RequestID),
+ MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
+ }
+
+ if r.HTTPResponse != nil {
+ m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
+ }
+
+ if r.Error != nil {
+ if awserr, ok := r.Error.(awserr.Error); ok {
+ m.SetFinalException(getMetricException(awserr))
+ }
+ }
+
+ m.TruncateFields()
+
+ // TODO: Probably want to figure something out for logging dropped
+ // metrics
+ rep.metricsCh.Push(m)
+}
+
+func (rep *Reporter) connect(network, url string) error {
+ if rep.conn != nil {
+ rep.conn.Close()
+ }
+
+ conn, err := net.Dial(network, url)
+ if err != nil {
+ return awserr.New("UDPError", "Could not connect", err)
+ }
+
+ rep.conn = conn
+
+ return nil
+}
+
+func (rep *Reporter) close() {
+ if rep.done != nil {
+ close(rep.done)
+ }
+
+ rep.metricsCh.Pause()
+}
+
+func (rep *Reporter) start() {
+ defer func() {
+ rep.metricsCh.Pause()
+ }()
+
+ for {
+ select {
+ case <-rep.done:
+ rep.done = nil
+ return
+ case m := <-rep.metricsCh.ch:
+ // TODO: What to do with this error? Probably should just log
+ b, err := json.Marshal(m)
+ if err != nil {
+ continue
+ }
+
+ rep.conn.Write(b)
+ }
+ }
+}
+
+// Pause will pause the metric channel preventing any new metrics from
+// being added.
+func (rep *Reporter) Pause() {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if rep == nil {
+ return
+ }
+
+ rep.close()
+}
+
+// Continue will reopen the metric channel and allow for monitoring
+// to be resumed.
+func (rep *Reporter) Continue() {
+ lock.Lock()
+ defer lock.Unlock()
+ if rep == nil {
+ return
+ }
+
+ if !rep.metricsCh.IsPaused() {
+ return
+ }
+
+ rep.metricsCh.Continue()
+}
+
+// InjectHandlers will will enable client side metrics and inject the proper
+// handlers to handle how metrics are sent.
+//
+// Example:
+// // Start must be called in order to inject the correct handlers
+// r, err := csm.Start("clientID", "127.0.0.1:8094")
+// if err != nil {
+// panic(fmt.Errorf("expected no error, but received %v", err))
+// }
+//
+// sess := session.NewSession()
+// r.InjectHandlers(&sess.Handlers)
+//
+// // create a new service client with our client side metric session
+// svc := s3.New(sess)
+func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
+ if rep == nil {
+ return
+ }
+
+ handlers.Complete.PushFrontNamed(request.NamedHandler{
+ Name: APICallMetricHandlerName,
+ Fn: rep.sendAPICallMetric,
+ })
+
+ handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
+ Name: APICallAttemptMetricHandlerName,
+ Fn: rep.sendAPICallAttemptMetric,
+ })
+}
+
+// boolIntValue return 1 for true and 0 for false.
+func boolIntValue(b bool) int {
+ if b {
+ return 1
+ }
+
+ return 0
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
new file mode 100644
index 00000000000..23bb639e018
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -0,0 +1,207 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+ cfg := Config()
+ handlers := Handlers()
+ cfg.Credentials = CredChain(cfg, handlers)
+
+ return Defaults{
+ Config: cfg,
+ Handlers: handlers,
+ }
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+ return aws.NewConfig().
+ WithCredentials(credentials.AnonymousCredentials).
+ WithRegion(os.Getenv("AWS_REGION")).
+ WithHTTPClient(http.DefaultClient).
+ WithMaxRetries(aws.UseServiceDefaultRetries).
+ WithLogger(aws.NewDefaultLogger()).
+ WithLogLevel(aws.LogOff).
+ WithEndpointResolver(endpoints.DefaultResolver())
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+ var handlers request.Handlers
+
+ handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+ handlers.Validate.AfterEachFn = request.HandlerListStopOnError
+ handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+ handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
+ handlers.Build.AfterEachFn = request.HandlerListStopOnError
+ handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
+ handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+ return handlers
+}
+
+// CredChain returns the default credential chain.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+ return credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: CredProviders(cfg, handlers),
+ })
+}
+
+// CredProviders returns the slice of providers used in
+// the default credential chain.
+//
+// For applications that need to use some other provider (for example use
+// different environment variables for legacy reasons) but still fall back
+// on the default chain of providers. This allows that default chaint to be
+// automatically updated
+func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
+ return []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ RemoteCredProvider(*cfg, handlers),
+ }
+}
+
+const (
+ httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+ httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+)
+
+// RemoteCredProvider returns a credentials provider for the default remote
+// endpoints such as EC2 or ECS Roles.
+func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
+ return localHTTPCredProvider(cfg, handlers, u)
+ }
+
+ if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
+ u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
+ return httpCredProvider(cfg, handlers, u)
+ }
+
+ return ec2RoleProvider(cfg, handlers)
+}
+
+var lookupHostFn = net.LookupHost
+
+func isLoopbackHost(host string) (bool, error) {
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return ip.IsLoopback(), nil
+ }
+
+ // Host is not an ip, perform lookup
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+ for _, addr := range addrs {
+ if !net.ParseIP(addr).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ var errMsg string
+
+ parsed, err := url.Parse(u)
+ if err != nil {
+ errMsg = fmt.Sprintf("invalid URL, %v", err)
+ } else {
+ host := aws.URLHostname(parsed)
+ if len(host) == 0 {
+ errMsg = "unable to parse host from local HTTP cred provider URL"
+ } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
+ errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
+ } else if !isLoopback {
+ errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
+ }
+ }
+
+ if len(errMsg) > 0 {
+ if cfg.Logger != nil {
+ cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
+ }
+ return credentials.ErrorProvider{
+ Err: awserr.New("CredentialsEndpointError", errMsg, err),
+ ProviderName: endpointcreds.ProviderName,
+ }
+ }
+
+ return httpCredProvider(cfg, handlers, u)
+}
+
+func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ return endpointcreds.NewProviderClient(cfg, handlers, u,
+ func(p *endpointcreds.Provider) {
+ p.ExpiryWindow = 5 * time.Minute
+ p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
+ },
+ )
+}
+
+func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ resolver := cfg.EndpointResolver
+ if resolver == nil {
+ resolver = endpoints.DefaultResolver()
+ }
+
+ e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
+ return &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
+ ExpiryWindow: 5 * time.Minute,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
new file mode 100644
index 00000000000..ca0ee1dcc78
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
@@ -0,0 +1,27 @@
+package defaults
+
+import (
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return shareddefaults.SharedCredentialsFilename()
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return shareddefaults.SharedConfigFilename()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
new file mode 100644
index 00000000000..4fcb6161848
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
@@ -0,0 +1,56 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+// var strPtr *string
+//
+// // Without the SDK's conversion functions
+// str := "my string"
+// strPtr = &str
+//
+// // With the SDK's conversion functions
+// strPtr = aws.String("my string")
+//
+// // Convert *string to string value
+// str = aws.StringValue(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+// var strPtrs []*string
+// var strs []string = []string{"Go", "Gophers", "Go"}
+//
+// // Convert []string to []*string
+// strPtrs = aws.StringSlice(strs)
+//
+// // Convert []*string to []string
+// strs = aws.StringValueSlice(strPtrs)
+//
+// SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
new file mode 100644
index 00000000000..d57a1af5992
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -0,0 +1,169 @@
+package ec2metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkuri"
+)
+
+// GetMetadata uses the path provided to request information from the EC2
+// instance metdata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetMetadata",
+ HTTPMethod: "GET",
+ HTTPPath: sdkuri.PathJoin("/meta-data", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetUserData returns the userdata that was configured for the service. If
+// there is no user-data setup for the EC2 instance a "NotFoundError" error
+// code will be returned.
+func (c *EC2Metadata) GetUserData() (string, error) {
+ op := &request.Operation{
+ Name: "GetUserData",
+ HTTPMethod: "GET",
+ HTTPPath: "/user-data",
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == http.StatusNotFound {
+ r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
+ }
+ })
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetDynamicData",
+ HTTPMethod: "GET",
+ HTTPPath: sdkuri.PathJoin("/dynamic", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
+ resp, err := c.GetDynamicData("instance-identity/document")
+ if err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 instance identity document", err)
+ }
+
+ doc := EC2InstanceIdentityDocument{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 instance identity document", err)
+ }
+
+ return doc, nil
+}
+
+// IAMInfo retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
+ resp, err := c.GetMetadata("iam/info")
+ if err != nil {
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 IAM info", err)
+ }
+
+ info := EC2IAMInfo{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
+ return EC2IAMInfo{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 IAM info", err)
+ }
+
+ if info.Code != "Success" {
+ errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataError", errMsg, nil)
+ }
+
+ return info, nil
+}
+
+// Region returns the region the instance is running in.
+func (c *EC2Metadata) Region() (string, error) {
+ resp, err := c.GetMetadata("placement/availability-zone")
+ if err != nil {
+ return "", err
+ }
+
+ if len(resp) == 0 {
+ return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
+ }
+
+ // returns region without the suffix. Eg: us-west-2a becomes us-west-2
+ return resp[:len(resp)-1], nil
+}
+
+// Available returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) Available() bool {
+ if _, err := c.GetMetadata("instance-id"); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// An EC2IAMInfo provides the shape for unmarshaling
+// an IAM info from the metadata API
+type EC2IAMInfo struct {
+ Code string
+ LastUpdated time.Time
+ InstanceProfileArn string
+ InstanceProfileID string
+}
+
+// An EC2InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type EC2InstanceIdentityDocument struct {
+ DevpayProductCodes []string `json:"devpayProductCodes"`
+ AvailabilityZone string `json:"availabilityZone"`
+ PrivateIP string `json:"privateIp"`
+ Version string `json:"version"`
+ Region string `json:"region"`
+ InstanceID string `json:"instanceId"`
+ BillingProducts []string `json:"billingProducts"`
+ InstanceType string `json:"instanceType"`
+ AccountID string `json:"accountId"`
+ PendingTime time.Time `json:"pendingTime"`
+ ImageID string `json:"imageId"`
+ KernelID string `json:"kernelId"`
+ RamdiskID string `json:"ramdiskId"`
+ Architecture string `json:"architecture"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
new file mode 100644
index 00000000000..f4438eae9c9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -0,0 +1,152 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
+//
+// This package's client can be disabled completely by setting the environment
+// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
+// true instructs the SDK to disable the EC2 Metadata client. The client cannot
+// be used while the environment variable is set to true, (case insensitive).
+package ec2metadata
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
+const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
+
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+ *client.Client
+}
+
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
+//
+//
+// Example:
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
+//
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+//
+// If an unmodified HTTP client is provided from the stdlib default, or no client
+// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
+// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+ if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
+ // If the http client is unmodified and this feature is not disabled
+ // set custom timeouts for EC2Metadata requests.
+ cfg.HTTPClient = &http.Client{
+ // use a shorter timeout than default because the metadata
+ // service is local if it is running, and to fail faster
+ // if not running on an ec2 instance.
+ Timeout: 5 * time.Second,
+ }
+ }
+
+ svc := &EC2Metadata{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceName,
+ Endpoint: endpoint,
+ APIVersion: "latest",
+ },
+ handlers,
+ ),
+ }
+
+ svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+ svc.Handlers.Validate.Clear()
+ svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ // Disable the EC2 Metadata service if the environment variable is set.
+ // This shortcirctes the service's functionality to always fail to send
+ // requests.
+ if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
+ svc.Handlers.Send.SwapNamed(request.NamedHandler{
+ Name: corehandlers.SendHandler.Name,
+ Fn: func(r *request.Request) {
+ r.HTTPResponse = &http.Response{
+ Header: http.Header{},
+ }
+ r.Error = awserr.New(
+ request.CanceledErrorCode,
+ "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
+ nil)
+ },
+ })
+ }
+
+ // Add additional options to the service config
+ for _, option := range opts {
+ option(svc.Client)
+ }
+
+ return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+ return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
+}
+
+type metadataOutput struct {
+ Content string
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
+ return
+ }
+
+ if data, ok := r.Data.(*metadataOutput); ok {
+ data.Content = b.String()
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
new file mode 100644
index 00000000000..87b9ff3ffec
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
@@ -0,0 +1,188 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+type modelDefinition map[string]json.RawMessage
+
+// A DecodeModelOptions are the options for how the endpoints model definition
+// are decoded.
+type DecodeModelOptions struct {
+ SkipCustomizations bool
+}
+
+// Set combines all of the option functions together.
+func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// DecodeModel unmarshals a Regions and Endpoint model definition file into
+// a endpoint Resolver. If the file format is not supported, or an error occurs
+// when unmarshaling the model an error will be returned.
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+// resolver, err := endpoints.DecodeModel(reader)
+//
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
+ var opts DecodeModelOptions
+ opts.Set(optFns...)
+
+ // Get the version of the partition file to determine what
+ // unmarshaling model to use.
+ modelDef := modelDefinition{}
+ if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ var version string
+ if b, ok := modelDef["version"]; ok {
+ version = string(b)
+ } else {
+ return nil, newDecodeModelError("endpoints version not found in model", nil)
+ }
+
+ if version == "3" {
+ return decodeV3Endpoints(modelDef, opts)
+ }
+
+ return nil, newDecodeModelError(
+ fmt.Sprintf("endpoints version %s, not supported", version), nil)
+}
+
+func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
+ b, ok := modelDef["partitions"]
+ if !ok {
+ return nil, newDecodeModelError("endpoints model missing partitions", nil)
+ }
+
+ ps := partitions{}
+ if err := json.Unmarshal(b, &ps); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ if opts.SkipCustomizations {
+ return ps, nil
+ }
+
+ // Customization
+ for i := 0; i < len(ps); i++ {
+ p := &ps[i]
+ custAddEC2Metadata(p)
+ custAddS3DualStack(p)
+ custRmIotDataService(p)
+ custFixAppAutoscalingChina(p)
+ custFixAppAutoscalingUsGov(p)
+ }
+
+ return ps, nil
+}
+
+func custAddS3DualStack(p *partition) {
+ if p.ID != "aws" {
+ return
+ }
+
+ custAddDualstack(p, "s3")
+ custAddDualstack(p, "s3-control")
+}
+
+func custAddDualstack(p *partition, svcName string) {
+ s, ok := p.Services[svcName]
+ if !ok {
+ return
+ }
+
+ s.Defaults.HasDualStack = boxedTrue
+ s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
+
+ p.Services[svcName] = s
+}
+
+func custAddEC2Metadata(p *partition) {
+ p.Services["ec2metadata"] = service{
+ IsRegionalized: boxedFalse,
+ PartitionEndpoint: "aws-global",
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ }
+}
+
+func custRmIotDataService(p *partition) {
+ delete(p.Services, "data.iot")
+}
+
+func custFixAppAutoscalingChina(p *partition) {
+ if p.ID != "aws-cn" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ const expectHostname = `autoscaling.{region}.amazonaws.com`
+ if e, a := s.Defaults.Hostname, expectHostname; e != a {
+ fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
+ return
+ }
+
+ s.Defaults.Hostname = expectHostname + ".cn"
+ p.Services[serviceName] = s
+}
+
+func custFixAppAutoscalingUsGov(p *partition) {
+ if p.ID != "aws-us-gov" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ if a := s.Defaults.CredentialScope.Service; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
+ return
+ }
+
+ if a := s.Defaults.Hostname; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
+ return
+ }
+
+ s.Defaults.CredentialScope.Service = "application-autoscaling"
+ s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com"
+
+ p.Services[serviceName] = s
+}
+
+type decodeModelError struct {
+ awsError
+}
+
+func newDecodeModelError(msg string, err error) decodeModelError {
+ return decodeModelError{
+ awsError: awserr.New("DecodeEndpointsModelError", msg, err),
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
new file mode 100644
index 00000000000..d020c66c28c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -0,0 +1,4158 @@
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+// Partition identifiers
+const (
+ AwsPartitionID = "aws" // AWS Standard partition.
+ AwsCnPartitionID = "aws-cn" // AWS China partition.
+ AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
+)
+
+// AWS Standard partition's regions.
+const (
+ ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
+ ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+ ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
+ ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
+ ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
+ CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
+ EuNorth1RegionID = "eu-north-1" // EU (Stockholm).
+ EuWest1RegionID = "eu-west-1" // EU (Ireland).
+ EuWest2RegionID = "eu-west-2" // EU (London).
+ EuWest3RegionID = "eu-west-3" // EU (Paris).
+ SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
+ UsEast1RegionID = "us-east-1" // US East (N. Virginia).
+ UsEast2RegionID = "us-east-2" // US East (Ohio).
+ UsWest1RegionID = "us-west-1" // US West (N. California).
+ UsWest2RegionID = "us-west-2" // US West (Oregon).
+)
+
+// AWS China partition's regions.
+const (
+ CnNorth1RegionID = "cn-north-1" // China (Beijing).
+ CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia).
+)
+
+// AWS GovCloud (US) partition's regions.
+const (
+ UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East).
+ UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
+)
+
+// DefaultResolver returns an Endpoint resolver that will be able
+// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// Use DefaultPartitions() to get the list of the default partitions.
+func DefaultResolver() Resolver {
+ return defaultPartitions
+}
+
+// DefaultPartitions returns a list of the partitions the SDK is bundled
+// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// partitions := endpoints.DefaultPartitions
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+}
+
+var defaultPartitions = partitions{
+ awsPartition,
+ awscnPartition,
+ awsusgovPartition,
+}
+
+// AwsPartition returns the Resolver for AWS Standard.
+func AwsPartition() Partition {
+ return awsPartition.Partition()
+}
+
+var awsPartition = partition{
+ ID: "aws",
+ Name: "AWS Standard",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "ap-northeast-1": region{
+ Description: "Asia Pacific (Tokyo)",
+ },
+ "ap-northeast-2": region{
+ Description: "Asia Pacific (Seoul)",
+ },
+ "ap-south-1": region{
+ Description: "Asia Pacific (Mumbai)",
+ },
+ "ap-southeast-1": region{
+ Description: "Asia Pacific (Singapore)",
+ },
+ "ap-southeast-2": region{
+ Description: "Asia Pacific (Sydney)",
+ },
+ "ca-central-1": region{
+ Description: "Canada (Central)",
+ },
+ "eu-central-1": region{
+ Description: "EU (Frankfurt)",
+ },
+ "eu-north-1": region{
+ Description: "EU (Stockholm)",
+ },
+ "eu-west-1": region{
+ Description: "EU (Ireland)",
+ },
+ "eu-west-2": region{
+ Description: "EU (London)",
+ },
+ "eu-west-3": region{
+ Description: "EU (Paris)",
+ },
+ "sa-east-1": region{
+ Description: "South America (Sao Paulo)",
+ },
+ "us-east-1": region{
+ Description: "US East (N. Virginia)",
+ },
+ "us-east-2": region{
+ Description: "US East (Ohio)",
+ },
+ "us-west-1": region{
+ Description: "US West (N. California)",
+ },
+ "us-west-2": region{
+ Description: "US West (Oregon)",
+ },
+ },
+ Services: services{
+ "a4b": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "acm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "acm-pca": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "api.ecr.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "api.ecr.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "api.ecr.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "api.ecr.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "api.ecr.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "api.ecr.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "api.ecr.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "api.ecr.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "api.ecr.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "api.ecr.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "api.ecr.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "api.ecr.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "api.ecr.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "api.ecr.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{
+ Hostname: "api.ecr.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "api.ecr.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "api.mediatailor": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.pricing": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appstream2": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appsync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "athena": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling-plans": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "autoscaling-plans",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "batch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "budgets.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "ce": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "ce.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "chime": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "service.chime.aws.amazon.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "service.chime.aws.amazon.com",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloud9": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "cloudfront.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudsearch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "codecommit-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "codedeploy-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "codedeploy-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codepipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codestar": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-idp": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-sync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "comprehendmedical": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cur": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "datapipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "datasync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dax": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "devicefarm": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "discovery": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "docdb": service{
+
+ Endpoints: endpoints{
+ "eu-central-1": endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.{service}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elastictranscoder": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "email": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "entitlement.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "es-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "fms": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "fsx": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "iam.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "importexport": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "importexport.amazonaws.com",
+ SignatureVersions: []string{"v2", "v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ Service: "IngestionService",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iotanalytics": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisvideo": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lightsail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "machinelearning": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "marketplacecommerceanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "mediaconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "medialive": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediapackage": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediastore": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mgh": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mq": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mturk-requester": service{
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "sandbox": endpoint{
+ Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ },
+ "us-east-1": endpoint{},
+ },
+ },
+ "neptune": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "rds.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "rds.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "rds.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "opsworks": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "opsworks-cm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "organizations.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "resource-groups": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "robomaker": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "route53.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "route53domains": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "route53resolver": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "runtime.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "s3": service{
+ PartitionEndpoint: "us-east-1",
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "s3.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{
+ Hostname: "s3.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{
+ Hostname: "s3.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "s3-external-1": endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{
+ Hostname: "s3.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "s3-control.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "s3-control.ap-northeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "s3-control.ap-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "s3-control.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3-control.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "s3-control.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "s3-control.eu-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "s3-control.eu-north-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "s3-control.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "s3-control.eu-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "s3-control.eu-west-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3-control.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3-control.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "s3-control.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-east-2-fips": endpoint{
+ Hostname: "s3-control-fips.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{
+ Hostname: "s3-control.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3-control.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-west-2-fips": endpoint{
+ Hostname: "s3-control-fips.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "sdb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v2"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ Hostname: "sdb.amazonaws.com",
+ },
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "secretsmanager": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "securityhub": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-northeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-south-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ca-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-3": endpoint{
+ Protocols: []string{"https"},
+ },
+ "sa-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "servicediscovery": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "shield": service{
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "shield.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "sqs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "sqs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "sqs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "sqs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "queue.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sts": service{
+ PartitionEndpoint: "aws-global",
+ Defaults: endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{
+ Hostname: "sts.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "aws-global": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "support": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "transfer": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "translate-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "translate-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "translate-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "waf": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "waf.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workdocs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workmail": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "xray": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsCnPartition returns the Resolver for AWS China.
+func AwsCnPartition() Partition {
+ return awscnPartition.Partition()
+}
+
+var awscnPartition = partition{
+ ID: "aws-cn",
+ Name: "AWS China",
+ DNSSuffix: "amazonaws.com.cn",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "cn-north-1": region{
+ Description: "China (Beijing)",
+ },
+ "cn-northwest-1": region{
+ Description: "China (Ningxia)",
+ },
+ },
+ Services: services{
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "api.ecr.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "iam.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "s3-control.cn-north-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
+func AwsUsGovPartition() Partition {
+ return awsusgovPartition.Partition()
+}
+
+var awsusgovPartition = partition{
+ ID: "aws-us-gov",
+ Name: "AWS GovCloud (US)",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "us-gov-east-1": region{
+ Description: "AWS GovCloud (US-East)",
+ },
+ "us-gov-west-1": region{
+ Description: "AWS GovCloud (US)",
+ },
+ },
+ Services: services{
+ "acm": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "api.ecr.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "api.ecr.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "athena": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "ec2": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "es-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "glacier": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{
+ Hostname: "s3.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "s3-control.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3-control.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "sns": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sqs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
new file mode 100644
index 00000000000..000dd79eec4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
@@ -0,0 +1,141 @@
+package endpoints
+
+// Service identifiers
+//
+// Deprecated: Use client package's EndpointID value instead of these
+// ServiceIDs. These IDs are not maintained, and are out of date.
+const (
+ A4bServiceID = "a4b" // A4b.
+ AcmServiceID = "acm" // Acm.
+ AcmPcaServiceID = "acm-pca" // AcmPca.
+ ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
+ ApiPricingServiceID = "api.pricing" // ApiPricing.
+ ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
+ ApigatewayServiceID = "apigateway" // Apigateway.
+ ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
+ Appstream2ServiceID = "appstream2" // Appstream2.
+ AppsyncServiceID = "appsync" // Appsync.
+ AthenaServiceID = "athena" // Athena.
+ AutoscalingServiceID = "autoscaling" // Autoscaling.
+ AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
+ BatchServiceID = "batch" // Batch.
+ BudgetsServiceID = "budgets" // Budgets.
+ CeServiceID = "ce" // Ce.
+ ChimeServiceID = "chime" // Chime.
+ Cloud9ServiceID = "cloud9" // Cloud9.
+ ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
+ CloudformationServiceID = "cloudformation" // Cloudformation.
+ CloudfrontServiceID = "cloudfront" // Cloudfront.
+ CloudhsmServiceID = "cloudhsm" // Cloudhsm.
+ Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
+ CloudsearchServiceID = "cloudsearch" // Cloudsearch.
+ CloudtrailServiceID = "cloudtrail" // Cloudtrail.
+ CodebuildServiceID = "codebuild" // Codebuild.
+ CodecommitServiceID = "codecommit" // Codecommit.
+ CodedeployServiceID = "codedeploy" // Codedeploy.
+ CodepipelineServiceID = "codepipeline" // Codepipeline.
+ CodestarServiceID = "codestar" // Codestar.
+ CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
+ CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
+ CognitoSyncServiceID = "cognito-sync" // CognitoSync.
+ ComprehendServiceID = "comprehend" // Comprehend.
+ ConfigServiceID = "config" // Config.
+ CurServiceID = "cur" // Cur.
+ DatapipelineServiceID = "datapipeline" // Datapipeline.
+ DaxServiceID = "dax" // Dax.
+ DevicefarmServiceID = "devicefarm" // Devicefarm.
+ DirectconnectServiceID = "directconnect" // Directconnect.
+ DiscoveryServiceID = "discovery" // Discovery.
+ DmsServiceID = "dms" // Dms.
+ DsServiceID = "ds" // Ds.
+ DynamodbServiceID = "dynamodb" // Dynamodb.
+ Ec2ServiceID = "ec2" // Ec2.
+ Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
+ EcrServiceID = "ecr" // Ecr.
+ EcsServiceID = "ecs" // Ecs.
+ ElasticacheServiceID = "elasticache" // Elasticache.
+ ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
+ ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
+ ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
+ ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
+ ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
+ EmailServiceID = "email" // Email.
+ EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
+ EsServiceID = "es" // Es.
+ EventsServiceID = "events" // Events.
+ FirehoseServiceID = "firehose" // Firehose.
+ FmsServiceID = "fms" // Fms.
+ GameliftServiceID = "gamelift" // Gamelift.
+ GlacierServiceID = "glacier" // Glacier.
+ GlueServiceID = "glue" // Glue.
+ GreengrassServiceID = "greengrass" // Greengrass.
+ GuarddutyServiceID = "guardduty" // Guardduty.
+ HealthServiceID = "health" // Health.
+ IamServiceID = "iam" // Iam.
+ ImportexportServiceID = "importexport" // Importexport.
+ InspectorServiceID = "inspector" // Inspector.
+ IotServiceID = "iot" // Iot.
+ IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
+ KinesisServiceID = "kinesis" // Kinesis.
+ KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
+ KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
+ KmsServiceID = "kms" // Kms.
+ LambdaServiceID = "lambda" // Lambda.
+ LightsailServiceID = "lightsail" // Lightsail.
+ LogsServiceID = "logs" // Logs.
+ MachinelearningServiceID = "machinelearning" // Machinelearning.
+ MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
+ MediaconvertServiceID = "mediaconvert" // Mediaconvert.
+ MedialiveServiceID = "medialive" // Medialive.
+ MediapackageServiceID = "mediapackage" // Mediapackage.
+ MediastoreServiceID = "mediastore" // Mediastore.
+ MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
+ MghServiceID = "mgh" // Mgh.
+ MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
+ ModelsLexServiceID = "models.lex" // ModelsLex.
+ MonitoringServiceID = "monitoring" // Monitoring.
+ MturkRequesterServiceID = "mturk-requester" // MturkRequester.
+ NeptuneServiceID = "neptune" // Neptune.
+ OpsworksServiceID = "opsworks" // Opsworks.
+ OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
+ OrganizationsServiceID = "organizations" // Organizations.
+ PinpointServiceID = "pinpoint" // Pinpoint.
+ PollyServiceID = "polly" // Polly.
+ RdsServiceID = "rds" // Rds.
+ RedshiftServiceID = "redshift" // Redshift.
+ RekognitionServiceID = "rekognition" // Rekognition.
+ ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
+ Route53ServiceID = "route53" // Route53.
+ Route53domainsServiceID = "route53domains" // Route53domains.
+ RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
+ RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
+ S3ServiceID = "s3" // S3.
+ S3ControlServiceID = "s3-control" // S3Control.
+ SagemakerServiceID = "api.sagemaker" // Sagemaker.
+ SdbServiceID = "sdb" // Sdb.
+ SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
+ ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
+ ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
+ ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
+ ShieldServiceID = "shield" // Shield.
+ SmsServiceID = "sms" // Sms.
+ SnowballServiceID = "snowball" // Snowball.
+ SnsServiceID = "sns" // Sns.
+ SqsServiceID = "sqs" // Sqs.
+ SsmServiceID = "ssm" // Ssm.
+ StatesServiceID = "states" // States.
+ StoragegatewayServiceID = "storagegateway" // Storagegateway.
+ StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
+ StsServiceID = "sts" // Sts.
+ SupportServiceID = "support" // Support.
+ SwfServiceID = "swf" // Swf.
+ TaggingServiceID = "tagging" // Tagging.
+ TransferServiceID = "transfer" // Transfer.
+ TranslateServiceID = "translate" // Translate.
+ WafServiceID = "waf" // Waf.
+ WafRegionalServiceID = "waf-regional" // WafRegional.
+ WorkdocsServiceID = "workdocs" // Workdocs.
+ WorkmailServiceID = "workmail" // Workmail.
+ WorkspacesServiceID = "workspaces" // Workspaces.
+ XrayServiceID = "xray" // Xray.
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
new file mode 100644
index 00000000000..84316b92c05
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
@@ -0,0 +1,66 @@
+// Package endpoints provides the types and functionality for defining regions
+// and endpoints, as well as querying those definitions.
+//
+// The SDK's Regions and Endpoints metadata is code generated into the endpoints
+// package, and is accessible via the DefaultResolver function. This function
+// returns a endpoint Resolver will search the metadata and build an associated
+// endpoint if one is found. The default resolver will search all partitions
+// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
+// AWS GovCloud (US) (aws-us-gov).
+// .
+//
+// Enumerating Regions and Endpoint Metadata
+//
+// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
+// will allow you to get access to the list of underlying Partitions with the
+// Partitions method. This is helpful if you want to limit the SDK's endpoint
+// resolving to a single partition, or enumerate regions, services, and endpoints
+// in the partition.
+//
+// resolver := endpoints.DefaultResolver()
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+//
+// for _, p := range partitions {
+// fmt.Println("Regions for", p.ID())
+// for id, _ := range p.Regions() {
+// fmt.Println("*", id)
+// }
+//
+// fmt.Println("Services for", p.ID())
+// for id, _ := range p.Services() {
+// fmt.Println("*", id)
+// }
+// }
+//
+// Using Custom Endpoints
+//
+// The endpoints package also gives you the ability to use your own logic how
+// endpoints are resolved. This is a great way to define a custom endpoint
+// for select services, without passing that logic down through your code.
+//
+// If a type implements the Resolver interface it can be used to resolve
+// endpoints. To use this with the SDK's Session and Config set the value
+// of the type to the EndpointsResolver field of aws.Config when initializing
+// the session, or service client.
+//
+// In addition the ResolverFunc is a wrapper for a func matching the signature
+// of Resolver.EndpointFor, converting it to a type that satisfies the
+// Resolver interface.
+//
+//
+// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+// if service == endpoints.S3ServiceID {
+// return endpoints.ResolvedEndpoint{
+// URL: "s3.custom.endpoint.com",
+// SigningRegion: "custom-signing-region",
+// }, nil
+// }
+//
+// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
+// }
+//
+// sess := session.Must(session.NewSession(&aws.Config{
+// Region: aws.String("us-west-2"),
+// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
+// }))
+package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
new file mode 100644
index 00000000000..f82babf6f95
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -0,0 +1,449 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Options provide the configuration needed to direct how the
+// endpoints will be resolved.
+type Options struct {
+ // DisableSSL forces the endpoint to be resolved as HTTP.
+ // instead of HTTPS if the service supports it.
+ DisableSSL bool
+
+ // Sets the resolver to resolve the endpoint as a dualstack endpoint
+ // for the service. If dualstack support for a service is not known and
+ // StrictMatching is not enabled a dualstack endpoint for the service will
+ // be returned. This endpoint may not be valid. If StrictMatching is
+ // enabled only services that are known to support dualstack will return
+ // dualstack endpoints.
+ UseDualStack bool
+
+ // Enables strict matching of services and regions resolved endpoints.
+ // If the partition doesn't enumerate the exact service and region an
+ // error will be returned. This option will prevent returning endpoints
+ // that look valid, but may not resolve to any real endpoint.
+ StrictMatching bool
+
+ // Enables resolving a service endpoint based on the region provided if the
+ // service does not exist. The service endpoint ID will be used as the service
+ // domain name prefix. By default the endpoint resolver requires the service
+ // to be known when resolving endpoints.
+ //
+ // If resolving an endpoint on the partition list the provided region will
+ // be used to determine which partition's domain name pattern to the service
+ // endpoint ID with. If both the service and region are unknown and resolving
+ // the endpoint on partition list an UnknownEndpointError error will be returned.
+ //
+ // If resolving and endpoint on a partition specific resolver that partition's
+ // domain name pattern will be used with the service endpoint ID. If both
+ // region and service do not exist when resolving an endpoint on a specific
+ // partition the partition's domain pattern will be used to combine the
+ // endpoint and region together.
+ //
+ // This option is ignored if StrictMatching is enabled.
+ ResolveUnknownService bool
+}
+
+// Set combines all of the option functions together.
+func (o *Options) Set(optFns ...func(*Options)) {
+ for _, fn := range optFns {
+ fn(o)
+ }
+}
+
+// DisableSSLOption sets the DisableSSL options. Can be used as a functional
+// option when resolving endpoints.
+func DisableSSLOption(o *Options) {
+ o.DisableSSL = true
+}
+
+// UseDualStackOption sets the UseDualStack option. Can be used as a functional
+// option when resolving endpoints.
+func UseDualStackOption(o *Options) {
+ o.UseDualStack = true
+}
+
+// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
+// option when resolving endpoints.
+func StrictMatchingOption(o *Options) {
+ o.StrictMatching = true
+}
+
+// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
+// as a functional option when resolving endpoints.
+func ResolveUnknownServiceOption(o *Options) {
+ o.ResolveUnknownService = true
+}
+
+// A Resolver provides the interface for functionality to resolve endpoints.
+// The build in Partition and DefaultResolver return value satisfy this interface.
+type Resolver interface {
+ EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+}
+
+// ResolverFunc is a helper utility that wraps a function so it satisfies the
+// Resolver interface. This is useful when you want to add additional endpoint
+// resolving logic, or stub out specific endpoints with custom values.
+type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+
+// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
+func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return fn(service, region, opts...)
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
+//
+// If disableSSL is set, it will only set the URL's scheme if the URL does not
+// contain a scheme.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
+
+// EnumPartitions a provides a way to retrieve the underlying partitions that
+// make up the SDK's default Resolver, or any resolver decoded from a model
+// file.
+//
+// Use this interface with DefaultResolver and DecodeModels to get the list of
+// Partitions.
+type EnumPartitions interface {
+ Partitions() []Partition
+}
+
+// RegionsForService returns a map of regions for the partition and service.
+// If either the partition or service does not exist false will be returned
+// as the second parameter.
+//
+// This example shows how to get the regions for DynamoDB in the AWS partition.
+// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
+//
+// This is equivalent to using the partition directly.
+// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
+func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
+ for _, p := range ps {
+ if p.ID() != partitionID {
+ continue
+ }
+ if _, ok := p.p.Services[serviceID]; !ok {
+ break
+ }
+
+ s := Service{
+ id: serviceID,
+ p: p.p,
+ }
+ return s.Regions(), true
+ }
+
+ return map[string]Region{}, false
+}
+
+// PartitionForRegion returns the first partition which includes the region
+// passed in. This includes both known regions and regions which match
+// a pattern supported by the partition which may include regions that are
+// not explicitly known by the partition. Use the Regions method of the
+// returned Partition if explicit support is needed.
+func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
+ for _, p := range ps {
+ if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
+ return p, true
+ }
+ }
+
+ return Partition{}, false
+}
+
+// A Partition provides the ability to enumerate the partition's regions
+// and services.
+type Partition struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier of the partition.
+func (p Partition) ID() string { return p.id }
+
+// EndpointFor attempts to resolve the endpoint based on service and region.
+// See Options for information on configuring how the endpoint is resolved.
+//
+// If the service cannot be found in the metadata the UnknownServiceError
+// error will be returned. This validation will occur regardless if
+// StrictMatching is enabled. To enable resolving unknown services set the
+// "ResolveUnknownService" option to true. When StrictMatching is disabled
+// this option allows the partition resolver to resolve a endpoint based on
+// the service endpoint ID provided.
+//
+// When resolving endpoints you can choose to enable StrictMatching. This will
+// require the provided service and region to be known by the partition.
+// If the endpoint cannot be strictly resolved an error will be returned. This
+// mode is useful to ensure the endpoint resolved is valid. Without
+// StrictMatching enabled the endpoint returned my look valid but may not work.
+// StrictMatching requires the SDK to be updated if you want to take advantage
+// of new regions and services expansions.
+//
+// Errors that can be returned.
+// * UnknownServiceError
+// * UnknownEndpointError
+func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return p.p.EndpointFor(service, region, opts...)
+}
+
+// Regions returns a map of Regions indexed by their ID. This is useful for
+// enumerating over the regions in a partition.
+func (p Partition) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id, r := range p.p.Regions {
+ rs[id] = Region{
+ id: id,
+ desc: r.Description,
+ p: p.p,
+ }
+ }
+
+ return rs
+}
+
+// Services returns a map of Service indexed by their ID. This is useful for
+// enumerating over the services in a partition.
+func (p Partition) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id := range p.p.Services {
+ ss[id] = Service{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return ss
+}
+
+// A Region provides information about a region, and ability to resolve an
+// endpoint from the context of a region, given a service.
+type Region struct {
+ id, desc string
+ p *partition
+}
+
+// ID returns the region's identifier.
+func (r Region) ID() string { return r.id }
+
+// Description returns the region's description. The region description
+// is free text, it can be empty, and it may change between SDK releases.
+func (r Region) Description() string { return r.desc }
+
+// ResolveEndpoint resolves an endpoint from the context of the region given
+// a service. See Partition.EndpointFor for usage and errors that can be returned.
+func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return r.p.EndpointFor(service, r.id, opts...)
+}
+
+// Services returns a list of all services that are known to be in this region.
+func (r Region) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id, s := range r.p.Services {
+ if _, ok := s.Endpoints[r.id]; ok {
+ ss[id] = Service{
+ id: id,
+ p: r.p,
+ }
+ }
+ }
+
+ return ss
+}
+
+// A Service provides information about a service, and ability to resolve an
+// endpoint from the context of a service, given a region.
+type Service struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier for the service.
+func (s Service) ID() string { return s.id }
+
+// ResolveEndpoint resolves an endpoint from the context of a service given
+// a region. See Partition.EndpointFor for usage and errors that can be returned.
+func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return s.p.EndpointFor(s.id, region, opts...)
+}
+
+// Regions returns a map of Regions that the service is present in.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range s.p.Services[s.id].Endpoints {
+ if r, ok := s.p.Regions[id]; ok {
+ rs[id] = Region{
+ id: id,
+ desc: r.Description,
+ p: s.p,
+ }
+ }
+ }
+
+ return rs
+}
+
+// Endpoints returns a map of Endpoints indexed by their ID for all known
+// endpoints for a service.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Endpoints() map[string]Endpoint {
+ es := map[string]Endpoint{}
+ for id := range s.p.Services[s.id].Endpoints {
+ es[id] = Endpoint{
+ id: id,
+ serviceID: s.id,
+ p: s.p,
+ }
+ }
+
+ return es
+}
+
+// A Endpoint provides information about endpoints, and provides the ability
+// to resolve that endpoint for the service, and the region the endpoint
+// represents.
+type Endpoint struct {
+ id string
+ serviceID string
+ p *partition
+}
+
+// ID returns the identifier for an endpoint.
+func (e Endpoint) ID() string { return e.id }
+
+// ServiceID returns the identifier the endpoint belongs to.
+func (e Endpoint) ServiceID() string { return e.serviceID }
+
+// ResolveEndpoint resolves an endpoint from the context of a service and
+// region the endpoint represents. See Partition.EndpointFor for usage and
+// errors that can be returned.
+func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return e.p.EndpointFor(e.serviceID, e.id, opts...)
+}
+
+// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
+// service, and region.
+type ResolvedEndpoint struct {
+ // The endpoint URL
+ URL string
+
+ // The region that should be used for signing requests.
+ SigningRegion string
+
+ // The service name that should be used for signing requests.
+ SigningName string
+
+ // States that the signing name for this endpoint was derived from metadata
+ // passed in, but was not explicitly modeled.
+ SigningNameDerived bool
+
+ // The signing method that should be used for signing requests.
+ SigningMethod string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A EndpointNotFoundError is returned when in StrictMatching mode, and the
+// endpoint for the service and region cannot be found in any of the partitions.
+type EndpointNotFoundError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+}
+
+// A UnknownServiceError is returned when the service does not resolve to an
+// endpoint. Includes a list of all known services for the partition. Returned
+// when a partition does not support the service.
+type UnknownServiceError struct {
+ awsError
+ Partition string
+ Service string
+ Known []string
+}
+
+// NewUnknownServiceError builds and returns UnknownServiceError.
+func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
+ return UnknownServiceError{
+ awsError: awserr.New("UnknownServiceError",
+ "could not resolve endpoint for unknown service", nil),
+ Partition: p,
+ Service: s,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q",
+ e.Partition, e.Service)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) String() string {
+ return e.Error()
+}
+
+// A UnknownEndpointError is returned when in StrictMatching mode and the
+// service is valid, but the region does not resolve to an endpoint. Includes
+// a list of all known endpoints for the service.
+type UnknownEndpointError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+ Known []string
+}
+
+// NewUnknownEndpointError builds and returns UnknownEndpointError.
+func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
+ return UnknownEndpointError{
+ awsError: awserr.New("UnknownEndpointError",
+ "could not resolve endpoint", nil),
+ Partition: p,
+ Service: s,
+ Region: r,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+ e.Partition, e.Service, e.Region)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) String() string {
+ return e.Error()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
new file mode 100644
index 00000000000..ff6f76db6eb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -0,0 +1,307 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type partitions []partition
+
+func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ var opt Options
+ opt.Set(opts...)
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
+ continue
+ }
+
+ return ps[i].EndpointFor(service, region, opts...)
+ }
+
+ // If loose matching fallback to first partition format to use
+ // when resolving the endpoint.
+ if !opt.StrictMatching && len(ps) > 0 {
+ return ps[0].EndpointFor(service, region, opts...)
+ }
+
+ return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
+}
+
+// Partitions satisfies the EnumPartitions interface and returns a list
+// of Partitions representing each partition represented in the SDK's
+// endpoints model.
+func (ps partitions) Partitions() []Partition {
+ parts := make([]Partition, 0, len(ps))
+ for i := 0; i < len(ps); i++ {
+ parts = append(parts, ps[i].Partition())
+ }
+
+ return parts
+}
+
+type partition struct {
+ ID string `json:"partition"`
+ Name string `json:"partitionName"`
+ DNSSuffix string `json:"dnsSuffix"`
+ RegionRegex regionRegex `json:"regionRegex"`
+ Defaults endpoint `json:"defaults"`
+ Regions regions `json:"regions"`
+ Services services `json:"services"`
+}
+
+func (p partition) Partition() Partition {
+ return Partition{
+ id: p.ID,
+ p: &p,
+ }
+}
+
+func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
+ s, hasService := p.Services[service]
+ _, hasEndpoint := s.Endpoints[region]
+
+ if hasEndpoint && hasService {
+ return true
+ }
+
+ if strictMatch {
+ return false
+ }
+
+ return p.RegionRegex.MatchString(region)
+}
+
+func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
+ var opt Options
+ opt.Set(opts...)
+
+ s, hasService := p.Services[service]
+ if !(hasService || opt.ResolveUnknownService) {
+ // Only return error if the resolver will not fallback to creating
+ // endpoint based on service endpoint ID passed in.
+ return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
+ }
+
+ e, hasEndpoint := s.endpointForRegion(region)
+ if !hasEndpoint && opt.StrictMatching {
+ return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
+ }
+
+ defs := []endpoint{p.Defaults, s.Defaults}
+ return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
+}
+
+func serviceList(ss services) []string {
+ list := make([]string, 0, len(ss))
+ for k := range ss {
+ list = append(list, k)
+ }
+ return list
+}
+func endpointList(es endpoints) []string {
+ list := make([]string, 0, len(es))
+ for k := range es {
+ list = append(list, k)
+ }
+ return list
+}
+
+type regionRegex struct {
+ *regexp.Regexp
+}
+
+func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
+ // Strip leading and trailing quotes
+ regex, err := strconv.Unquote(string(b))
+ if err != nil {
+ return fmt.Errorf("unable to strip quotes from regex, %v", err)
+ }
+
+ rr.Regexp, err = regexp.Compile(regex)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal region regex, %v", err)
+ }
+ return nil
+}
+
+type regions map[string]region
+
+type region struct {
+ Description string `json:"description"`
+}
+
+type services map[string]service
+
+type service struct {
+ PartitionEndpoint string `json:"partitionEndpoint"`
+ IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
+ Defaults endpoint `json:"defaults"`
+ Endpoints endpoints `json:"endpoints"`
+}
+
+func (s *service) endpointForRegion(region string) (endpoint, bool) {
+ if s.IsRegionalized == boxedFalse {
+ return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
+ }
+
+ if e, ok := s.Endpoints[region]; ok {
+ return e, true
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return endpoint{}, false
+}
+
+type endpoints map[string]endpoint
+
+type endpoint struct {
+ Hostname string `json:"hostname"`
+ Protocols []string `json:"protocols"`
+ CredentialScope credentialScope `json:"credentialScope"`
+
+ // Custom fields not modeled
+ HasDualStack boxedBool `json:"-"`
+ DualStackHostname string `json:"-"`
+
+ // Signature Version not used
+ SignatureVersions []string `json:"signatureVersions"`
+
+ // SSLCommonName not used.
+ SSLCommonName string `json:"sslCommonName"`
+}
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4", "v2"}
+)
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
+ var merged endpoint
+ for _, def := range defs {
+ merged.mergeIn(def)
+ }
+ merged.mergeIn(e)
+ e = merged
+
+ hostname := e.Hostname
+
+ // Offset the hostname for dualstack if enabled
+ if opts.UseDualStack && e.HasDualStack == boxedTrue {
+ hostname = e.DualStackHostname
+ }
+
+ u := strings.Replace(hostname, "{service}", service, 1)
+ u = strings.Replace(u, "{region}", region, 1)
+ u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
+
+ scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
+ u = fmt.Sprintf("%s://%s", scheme, u)
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+
+ signingName := e.CredentialScope.Service
+ var signingNameDerived bool
+ if len(signingName) == 0 {
+ signingName = service
+ signingNameDerived = true
+ }
+
+ return ResolvedEndpoint{
+ URL: u,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningNameDerived: signingNameDerived,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func getEndpointScheme(protocols []string, disableSSL bool) string {
+ if disableSSL {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func (e *endpoint) mergeIn(other endpoint) {
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SSLCommonName) > 0 {
+ e.SSLCommonName = other.SSLCommonName
+ }
+ if other.HasDualStack != boxedBoolUnset {
+ e.HasDualStack = other.HasDualStack
+ }
+ if len(other.DualStackHostname) > 0 {
+ e.DualStackHostname = other.DualStackHostname
+ }
+}
+
+type credentialScope struct {
+ Region string `json:"region"`
+ Service string `json:"service"`
+}
+
+type boxedBool int
+
+func (b *boxedBool) UnmarshalJSON(buf []byte) error {
+ v, err := strconv.ParseBool(string(buf))
+ if err != nil {
+ return err
+ }
+
+ if v {
+ *b = boxedTrue
+ } else {
+ *b = boxedFalse
+ }
+
+ return nil
+}
+
+const (
+ boxedBoolUnset boxedBool = iota
+ boxedFalse
+ boxedTrue
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
new file mode 100644
index 00000000000..0fdfcc56e05
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -0,0 +1,351 @@
+// +build codegen
+
+package endpoints
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+// A CodeGenOptions are the options for code generating the endpoints into
+// Go code from the endpoints model definition.
+type CodeGenOptions struct {
+ // Options for how the model will be decoded.
+ DecodeModelOptions DecodeModelOptions
+
+ // Disables code generation of the service endpoint prefix IDs defined in
+ // the model.
+ DisableGenerateServiceIDs bool
+}
+
+// Set combines all of the option functions together
+func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// CodeGenModel given a endpoints model file will decode it and attempt to
+// generate Go code from the model definition. Error will be returned if
+// the code is unable to be generated, or decoded.
+func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
+ var opts CodeGenOptions
+ opts.Set(optFns...)
+
+ resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
+ *d = opts.DecodeModelOptions
+ })
+ if err != nil {
+ return err
+ }
+
+ v := struct {
+ Resolver
+ CodeGenOptions
+ }{
+ Resolver: resolver,
+ CodeGenOptions: opts,
+ }
+
+ tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
+ if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
+ return fmt.Errorf("failed to execute template, %v", err)
+ }
+
+ return nil
+}
+
+func toSymbol(v string) string {
+ out := []rune{}
+ for _, c := range strings.Title(v) {
+ if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
+ continue
+ }
+
+ out = append(out, c)
+ }
+
+ return string(out)
+}
+
+func quoteString(v string) string {
+ return fmt.Sprintf("%q", v)
+}
+
+func regionConstName(p, r string) string {
+ return toSymbol(p) + toSymbol(r)
+}
+
+func partitionGetter(id string) string {
+ return fmt.Sprintf("%sPartition", toSymbol(id))
+}
+
+func partitionVarName(id string) string {
+ return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
+}
+
+func listPartitionNames(ps partitions) string {
+ names := []string{}
+ switch len(ps) {
+ case 1:
+ return ps[0].Name
+ case 2:
+ return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
+ default:
+ for i, p := range ps {
+ if i == len(ps)-1 {
+ names = append(names, "and "+p.Name)
+ } else {
+ names = append(names, p.Name)
+ }
+ }
+ return strings.Join(names, ", ")
+ }
+}
+
+func boxedBoolIfSet(msg string, v boxedBool) string {
+ switch v {
+ case boxedTrue:
+ return fmt.Sprintf(msg, "boxedTrue")
+ case boxedFalse:
+ return fmt.Sprintf(msg, "boxedFalse")
+ default:
+ return ""
+ }
+}
+
+func stringIfSet(msg, v string) string {
+ if len(v) == 0 {
+ return ""
+ }
+
+ return fmt.Sprintf(msg, v)
+}
+
+func stringSliceIfSet(msg string, vs []string) string {
+ if len(vs) == 0 {
+ return ""
+ }
+
+ names := []string{}
+ for _, v := range vs {
+ names = append(names, `"`+v+`"`)
+ }
+
+ return fmt.Sprintf(msg, strings.Join(names, ","))
+}
+
+func endpointIsSet(v endpoint) bool {
+ return !reflect.DeepEqual(v, endpoint{})
+}
+
+func serviceSet(ps partitions) map[string]struct{} {
+ set := map[string]struct{}{}
+ for _, p := range ps {
+ for id := range p.Services {
+ set[id] = struct{}{}
+ }
+ }
+
+ return set
+}
+
+var funcMap = template.FuncMap{
+ "ToSymbol": toSymbol,
+ "QuoteString": quoteString,
+ "RegionConst": regionConstName,
+ "PartitionGetter": partitionGetter,
+ "PartitionVarName": partitionVarName,
+ "ListPartitionNames": listPartitionNames,
+ "BoxedBoolIfSet": boxedBoolIfSet,
+ "StringIfSet": stringIfSet,
+ "StringSliceIfSet": stringSliceIfSet,
+ "EndpointIsSet": endpointIsSet,
+ "ServicesSet": serviceSet,
+}
+
+const v3Tmpl = `
+{{ define "defaults" -}}
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+ {{ template "partition consts" $.Resolver }}
+
+ {{ range $_, $partition := $.Resolver }}
+ {{ template "partition region consts" $partition }}
+ {{ end }}
+
+ {{ if not $.DisableGenerateServiceIDs -}}
+ {{ template "service consts" $.Resolver }}
+ {{- end }}
+
+ {{ template "endpoint resolvers" $.Resolver }}
+{{- end }}
+
+{{ define "partition consts" }}
+ // Partition identifiers
+ const (
+ {{ range $_, $p := . -}}
+ {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "partition region consts" }}
+ // {{ .Name }} partition's regions.
+ const (
+ {{ range $id, $region := .Regions -}}
+ {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "service consts" }}
+ // Service identifiers
+ const (
+ {{ $serviceSet := ServicesSet . -}}
+ {{ range $id, $_ := $serviceSet -}}
+ {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "endpoint resolvers" }}
+ // DefaultResolver returns an Endpoint resolver that will be able
+ // to resolve endpoints for: {{ ListPartitionNames . }}.
+ //
+ // Use DefaultPartitions() to get the list of the default partitions.
+ func DefaultResolver() Resolver {
+ return defaultPartitions
+ }
+
+ // DefaultPartitions returns a list of the partitions the SDK is bundled
+ // with. The available partitions are: {{ ListPartitionNames . }}.
+ //
+ // partitions := endpoints.DefaultPartitions
+ // for _, p := range partitions {
+ // // ... inspect partitions
+ // }
+ func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+ }
+
+ var defaultPartitions = partitions{
+ {{ range $_, $partition := . -}}
+ {{ PartitionVarName $partition.ID }},
+ {{ end }}
+ }
+
+ {{ range $_, $partition := . -}}
+ {{ $name := PartitionGetter $partition.ID -}}
+ // {{ $name }} returns the Resolver for {{ $partition.Name }}.
+ func {{ $name }}() Partition {
+ return {{ PartitionVarName $partition.ID }}.Partition()
+ }
+ var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
+ {{ end }}
+{{ end }}
+
+{{ define "default partitions" }}
+ func DefaultPartitions() []Partition {
+ return []partition{
+ {{ range $_, $partition := . -}}
+ // {{ ToSymbol $partition.ID}}Partition(),
+ {{ end }}
+ }
+ }
+{{ end }}
+
+{{ define "gocode Partition" -}}
+partition{
+ {{ StringIfSet "ID: %q,\n" .ID -}}
+ {{ StringIfSet "Name: %q,\n" .Name -}}
+ {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+ RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults }},
+ {{- end }}
+ Regions: {{ template "gocode Regions" .Regions }},
+ Services: {{ template "gocode Services" .Services }},
+}
+{{- end }}
+
+{{ define "gocode RegionRegex" -}}
+regionRegex{
+ Regexp: func() *regexp.Regexp{
+ reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
+ return reg
+ }(),
+}
+{{- end }}
+
+{{ define "gocode Regions" -}}
+regions{
+ {{ range $id, $region := . -}}
+ "{{ $id }}": {{ template "gocode Region" $region }},
+ {{ end -}}
+}
+{{- end }}
+
+{{ define "gocode Region" -}}
+region{
+ {{ StringIfSet "Description: %q,\n" .Description -}}
+}
+{{- end }}
+
+{{ define "gocode Services" -}}
+services{
+ {{ range $id, $service := . -}}
+ "{{ $id }}": {{ template "gocode Service" $service }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Service" -}}
+service{
+ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
+ {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults -}},
+ {{- end }}
+ {{ if .Endpoints -}}
+ Endpoints: {{ template "gocode Endpoints" .Endpoints }},
+ {{- end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoints" -}}
+endpoints{
+ {{ range $id, $endpoint := . -}}
+ "{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoint" -}}
+endpoint{
+ {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
+ {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
+ {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
+ {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
+ {{ if or .CredentialScope.Region .CredentialScope.Service -}}
+ CredentialScope: credentialScope{
+ {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
+ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
+ },
+ {{- end }}
+ {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
+ {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
+
+}
+{{- end }}
+`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
new file mode 100644
index 00000000000..fa06f7a8f8b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
@@ -0,0 +1,13 @@
+package aws
+
+import "github.com/aws/aws-sdk-go/aws/awserr"
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
new file mode 100644
index 00000000000..91a6f277a7e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
@@ -0,0 +1,12 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+// Example:
+//
+// values := aws.JSONValue{
+// "Foo": "Bar",
+// }
+// values["Baz"] = "Qux"
+type JSONValue map[string]interface{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
new file mode 100644
index 00000000000..6ed15b2ecc2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
@@ -0,0 +1,118 @@
+package aws
+
+import (
+ "log"
+ "os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+ return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+ if l != nil {
+ return *l
+ }
+ return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nil, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+ c := l.Value()
+ return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+ c := l.Value()
+ return c >= v
+}
+
+const (
+ // LogOff states that no logging should be performed by the SDK. This is the
+ // default state of the SDK, and should be use to disable all logging.
+ LogOff LogLevelType = iota * 0x1000
+
+ // LogDebug state that debug output should be logged by the SDK. This should
+ // be used to inspect request made and responses received.
+ LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+ // LogDebugWithSigning states that the SDK should log request signing and
+ // presigning events. This should be used to log the signing details of
+ // requests for debugging. Will also enable LogDebug.
+ LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+ // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+ // HTTP bodys in addition to the headers and path. This should be used to
+ // see the body content of requests and responses made while using the SDK
+ // Will also enable LogDebug.
+ LogDebugWithHTTPBody
+
+ // LogDebugWithRequestRetries states the SDK should log when service requests will
+ // be retried. This should be used to log when you want to log when service
+ // requests are being retried. Will also enable LogDebug.
+ LogDebugWithRequestRetries
+
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail
+ // to build, send, validate, or unmarshal.
+ LogDebugWithRequestErrors
+
+ // LogDebugWithEventStreamBody states the SDK should log EventStream
+ // request and response bodys. This should be used to log the EventStream
+ // wire unmarshaled message content of requests and responses made while
+ // using the SDK Will also enable LogDebug.
+ LogDebugWithEventStreamBody
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+ Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+// fmt.Fprintln(os.Stdout, args...)
+// })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+ f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
new file mode 100644
index 00000000000..271da432ce1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
@@ -0,0 +1,19 @@
+// +build !appengine,!plan9
+
+package request
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+func isErrConnectionReset(err error) bool {
+ if opErr, ok := err.(*net.OpError); ok {
+ if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
+ return sysErr.Err == syscall.ECONNRESET
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
new file mode 100644
index 00000000000..daf9eca4373
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
@@ -0,0 +1,11 @@
+// +build appengine plan9
+
+package request
+
+import (
+ "strings"
+)
+
+func isErrConnectionReset(err error) bool {
+ return strings.Contains(err.Error(), "connection reset")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
new file mode 100644
index 00000000000..8ef8548a96d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -0,0 +1,277 @@
+package request
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalStream HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+ CompleteAttempt HandlerList
+ Complete HandlerList
+}
+
+// Copy returns of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalStream: h.UnmarshalStream.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ CompleteAttempt: h.CompleteAttempt.copy(),
+ Complete: h.Complete.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalStream.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+ h.CompleteAttempt.Clear()
+ h.Complete.Clear()
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+ Index int
+ Handler NamedHandler
+ Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []NamedHandler
+
+ // Called after each request handler in the list is called. If set
+ // and the func returns true the HandlerList will continue to iterate
+ // over the request handlers. If false is returned the HandlerList
+ // will stop iterating.
+ //
+ // Should be used if extra logic to be performed between each handler
+ // in the list. This can be used to terminate a list's iteration
+ // based on a condition such as error like, HandlerListStopOnError.
+ // Or for logging like HandlerListLogItem.
+ AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+ Name string
+ Fn func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ n := HandlerList{
+ AfterEachFn: l.AfterEachFn,
+ }
+ if len(l.list) == 0 {
+ return n
+ }
+
+ n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = l.list[0:0]
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+ l.PushBackNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ if cap(l.list) == 0 {
+ l.list = make([]NamedHandler, 0, 5)
+ }
+ l.list = append(l.list, n)
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.PushFrontNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+ if cap(l.list) == len(l.list) {
+ // Allocating new list required
+ l.list = append([]NamedHandler{n}, l.list...)
+ } else {
+ // Enough room to prepend into list.
+ l.list = append(l.list, NamedHandler{})
+ copy(l.list[1:], l.list)
+ l.list[0] = n
+ }
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+ l.RemoveByName(n.Name)
+}
+
+// RemoveByName removes a NamedHandler by name.
+func (l *HandlerList) RemoveByName(name string) {
+ for i := 0; i < len(l.list); i++ {
+ m := l.list[i]
+ if m.Name == name {
+ // Shift array preventing creating new arrays
+ copy(l.list[i:], l.list[i+1:])
+ l.list[len(l.list)-1] = NamedHandler{}
+ l.list = l.list[:len(l.list)-1]
+
+ // decrement list so next check to length is correct
+ i--
+ }
+ }
+}
+
+// SwapNamed will swap out any existing handlers with the same name as the
+// passed in NamedHandler returning true if handlers were swapped. False is
+// returned otherwise.
+func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == n.Name {
+ l.list[i].Fn = n.Fn
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// Swap will swap out all handlers matching the name passed in. The matched
+// handlers will be swapped in. True is returned if the handlers were swapped.
+func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
+ var swapped bool
+
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == name {
+ l.list[i] = replace
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// SetBackNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the end of the list.
+func (l *HandlerList) SetBackNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushBackNamed(n)
+ }
+}
+
+// SetFrontNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the beginning of
+// the list.
+func (l *HandlerList) SetFrontNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushFrontNamed(n)
+ }
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for i, h := range l.list {
+ h.Fn(r)
+ item := HandlerListRunItem{
+ Index: i, Handler: h, Request: r,
+ }
+ if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+ return
+ }
+ }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+ if item.Request.Config.Logger == nil {
+ return true
+ }
+ item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+ item.Index, item.Handler.Name, item.Request.Error)
+
+ return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+ return item.Request.Error == nil
+}
+
+// WithAppendUserAgent will add a string to the user agent prefixed with a
+// single white space.
+func WithAppendUserAgent(s string) Option {
+ return func(r *Request) {
+ r.Handlers.Build.PushBack(func(r2 *Request) {
+ AddToUserAgent(r, s)
+ })
+ }
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+ ua := fmt.Sprintf("%s/%s", name, version)
+ if len(extra) > 0 {
+ ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+ }
+ return func(r *Request) {
+ AddToUserAgent(r, ua)
+ }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+ return func(r *Request) {
+ AddToUserAgent(r, s)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
new file mode 100644
index 00000000000..79f79602b03
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
@@ -0,0 +1,24 @@
+package request
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+ req := new(http.Request)
+ *req = *r
+ req.URL = &url.URL{}
+ *req.URL = *r.URL
+ req.Body = body
+
+ req.Header = http.Header{}
+ for k, v := range r.Header {
+ for _, vv := range v {
+ req.Header.Add(k, vv)
+ }
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
new file mode 100644
index 00000000000..b0c2ef4fe67
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
@@ -0,0 +1,60 @@
+package request
+
+import (
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+ buf io.ReadSeeker
+ lock sync.Mutex
+ closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
+ reader := &offsetReader{}
+ buf.Seek(offset, sdkio.SeekStart)
+
+ reader.buf = buf
+ return reader
+}
+
+// Close will close the instance of the offset reader's access to
+// the underlying io.ReadSeeker.
+func (o *offsetReader) Close() error {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ o.closed = true
+ return nil
+}
+
+// Read is a thread-safe read of the underlying io.ReadSeeker
+func (o *offsetReader) Read(p []byte) (int, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ if o.closed {
+ return 0, io.EOF
+ }
+
+ return o.buf.Read(p)
+}
+
+// Seek is a thread-safe seeking operation.
+func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ return o.buf.Seek(offset, whence)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
+ o.Close()
+ return newOffsetReader(o.buf, offset)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
new file mode 100644
index 00000000000..8f2eb3e43c5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -0,0 +1,673 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+const (
+ // ErrCodeSerialization is the serialization error code that is received
+ // during protocol unmarshaling.
+ ErrCodeSerialization = "SerializationError"
+
+ // ErrCodeRead is an error that is returned during HTTP reads.
+ ErrCodeRead = "ReadError"
+
+ // ErrCodeResponseTimeout is the connection timeout error that is received
+ // during body reads.
+ ErrCodeResponseTimeout = "ResponseTimeout"
+
+ // ErrCodeInvalidPresignExpire is returned when the expire time provided to
+ // presign is invalid
+ ErrCodeInvalidPresignExpire = "InvalidPresignExpireError"
+
+ // CanceledErrorCode is the error code that will be returned by an
+ // API request that was canceled. Requests given a aws.Context may
+ // return this error when canceled.
+ CanceledErrorCode = "RequestCanceled"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ Config aws.Config
+ ClientInfo metadata.ClientInfo
+ Handlers Handlers
+
+ Retryer
+ AttemptTime time.Time
+ Time time.Time
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+ LastSignedAt time.Time
+ DisableFollowRedirects bool
+
+ // A value greater than 0 instructs the request to be signed as Presigned URL
+ // You should not set this field directly. Instead use Request's
+ // Presign or PresignRequest methods.
+ ExpireTime time.Duration
+
+ context aws.Context
+
+ built bool
+
+ // Need to persist an intermediate body between the input Body and HTTP
+ // request body because the HTTP Client's transport can maintain a reference
+ // to the HTTP request's body after the client has returned. This value is
+ // safe to use concurrently and wrap the input Body for each HTTP request.
+ safeBody *offsetReader
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+
+ BeforePresignFn func(r *Request) error
+}
+
+// New returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+
+ var err error
+ httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
+ if err != nil {
+ httpReq.URL = &url.URL{}
+ err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+ }
+
+ SanitizeHostForHeader(httpReq)
+
+ r := &Request{
+ Config: cfg,
+ ClientInfo: clientInfo,
+ Handlers: handlers.Copy(),
+
+ Retryer: retryer,
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: err,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// A Option is a functional option that can augment or modify a request when
+// using a WithContext API operation method.
+type Option func(*Request)
+
+// WithGetResponseHeader builds a request Option which will retrieve a single
+// header value from the HTTP Response. If there are multiple values for the
+// header key use WithGetResponseHeaders instead to access the http.Header
+// map directly. The passed in val pointer must be non-nil.
+//
+// This Option can be used multiple times with a single API operation.
+//
+// var id2, versionID string
+// svc.PutObjectWithContext(ctx, params,
+// request.WithGetResponseHeader("x-amz-id-2", &id2),
+// request.WithGetResponseHeader("x-amz-version-id", &versionID),
+// )
+func WithGetResponseHeader(key string, val *string) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *val = req.HTTPResponse.Header.Get(key)
+ })
+ }
+}
+
+// WithGetResponseHeaders builds a request Option which will retrieve the
+// headers from the HTTP response and assign them to the passed in headers
+// variable. The passed in headers pointer must be non-nil.
+//
+// var headers http.Header
+// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
+func WithGetResponseHeaders(headers *http.Header) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *headers = req.HTTPResponse.Header
+ })
+ }
+}
+
+// WithLogLevel is a request option that will set the request to use a specific
+// log level when the request is made.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
+func WithLogLevel(l aws.LogLevelType) Option {
+ return func(r *Request) {
+ r.Config.LogLevel = aws.LogLevel(l)
+ }
+}
+
+// ApplyOptions will apply each option to the request calling them in the order
+// the were provided.
+func (r *Request) ApplyOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt(r)
+ }
+}
+
+// Context will always returns a non-nil context. If Request does not have a
+// context aws.BackgroundContext will be returned.
+func (r *Request) Context() aws.Context {
+ if r.context != nil {
+ return r.context
+ }
+ return aws.BackgroundContext()
+}
+
+// SetContext adds a Context to the current request that can be used to cancel
+// a in-flight request. The Context value must not be nil, or this method will
+// panic.
+//
+// Unlike http.Request.WithContext, SetContext does not return a copy of the
+// Request. It is not safe to use use a single Request value for multiple
+// requests. A new Request should be created for each API operation request.
+//
+// Go 1.6 and below:
+// The http.Request's Cancel field will be set to the Done() value of
+// the context. This will overwrite the Cancel field's value.
+//
+// Go 1.7 and above:
+// The http.Request.WithContext will be used to set the context on the underlying
+// http.Request. This will create a shallow copy of the http.Request. The SDK
+// may create sub contexts in the future for nested requests such as retries.
+func (r *Request) SetContext(ctx aws.Context) {
+ if ctx == nil {
+ panic("context cannot be nil")
+ }
+ setRequestContext(r, ctx)
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
+ return false
+ }
+ return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.Body = reader
+ r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset.
+ r.ResetBody()
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails. The expire parameter is only used for presigned Amazon
+// S3 API requests. All other AWS services will use a fixed expiration
+// time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+func (r *Request) Presign(expire time.Duration) (string, error) {
+ r = r.copy()
+
+ // Presign requires all headers be hoisted. There is no way to retrieve
+ // the signed headers not hoisted without this. Making the presigned URL
+ // useless.
+ r.NotHoist = false
+
+ u, _, err := getPresignedURL(r, expire)
+ return u, err
+}
+
+// PresignRequest behaves just like presign, with the addition of returning a
+// set of headers that were signed. The expire parameter is only used for
+// presigned Amazon S3 API requests. All other AWS services will use a fixed
+// expiration time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+//
+// Returns the URL string for the API operation with signature in the query string,
+// and the HTTP headers that were included in the signature. These headers must
+// be included in any HTTP request made with the presigned URL.
+//
+// To prevent hoisting any headers to the query string set NotHoist to true on
+// this Request value prior to calling PresignRequest.
+func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) {
+ r = r.copy()
+ return getPresignedURL(r, expire)
+}
+
+// IsPresigned returns true if the request represents a presigned API url.
+func (r *Request) IsPresigned() bool {
+ return r.ExpireTime != 0
+}
+
+func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) {
+ if expire <= 0 {
+ return "", nil, awserr.New(
+ ErrCodeInvalidPresignExpire,
+ "presigned URL requires an expire duration greater than 0",
+ nil,
+ )
+ }
+
+ r.ExpireTime = expire
+
+ if r.Operation.BeforePresignFn != nil {
+ if err := r.Operation.BeforePresignFn(r); err != nil {
+ return "", nil, err
+ }
+ }
+
+ if err := r.Sign(); err != nil {
+ return "", nil, err
+ }
+
+ return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+func debugLogReqError(r *Request, stage string, retrying bool, err error) {
+ if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+ return
+ }
+
+ retryStr := "not retrying"
+ if retrying {
+ retryStr = "will retry"
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Any additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Request", false, r.Error)
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request, returning error if errors are encountered.
+//
+// Sign will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
+ if r.safeBody != nil {
+ r.safeBody.Close()
+ }
+
+ r.safeBody = newOffsetReader(r.Body, r.BodyStart)
+
+ // Go 1.8 tightened and clarified the rules code needs to use when building
+ // requests with the http package. Go 1.8 removed the automatic detection
+ // of if the Request.Body was empty, or actually had bytes in it. The SDK
+ // always sets the Request.Body even if it is empty and should not actually
+ // be sent. This is incorrect.
+ //
+ // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
+ // client that the request really should be sent without a body. The
+ // Request.Body cannot be set to nil, which is preferable, because the
+ // field is exported and could introduce nil pointer dereferences for users
+ // of the SDK if they used that field.
+ //
+ // Related golang/go#18257
+ l, err := aws.SeekerLen(r.Body)
+ if err != nil {
+ return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
+ }
+
+ var body io.ReadCloser
+ if l == 0 {
+ body = NoBody
+ } else if l > 0 {
+ body = r.safeBody
+ } else {
+ // Hack to prevent sending bodies for methods where the body
+ // should be ignored by the server. Sending bodies on these
+ // methods without an associated ContentLength will cause the
+ // request to socket timeout because the server does not handle
+ // Transfer-Encoding: chunked bodies for these methods.
+ //
+ // This would only happen if a aws.ReaderSeekerCloser was used with
+ // a io.Reader that was not also an io.Seeker, or did not implement
+ // Len() method.
+ switch r.Operation.HTTPMethod {
+ case "GET", "HEAD", "DELETE":
+ body = NoBody
+ default:
+ body = r.safeBody
+ }
+ }
+
+ return body, nil
+}
+
+// GetBody will return an io.ReadSeeker of the Request's underlying
+// input body with a concurrency safe wrapper.
+func (r *Request) GetBody() io.ReadSeeker {
+ return r.safeBody
+}
+
+// Send will send the request, returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
+//
+// Send will not close the request.Request's body.
+func (r *Request) Send() error {
+ defer func() {
+ // Regardless of success or failure of the request trigger the Complete
+ // request handlers.
+ r.Handlers.Complete.Run(r)
+ }()
+
+ if err := r.Error; err != nil {
+ return err
+ }
+
+ for {
+ r.Error = nil
+ r.AttemptTime = time.Now()
+
+ if err := r.Sign(); err != nil {
+ debugLogReqError(r, "Sign Request", false, err)
+ return err
+ }
+
+ if err := r.sendRequest(); err == nil {
+ return nil
+ } else if !shouldRetryCancel(r.Error) {
+ return err
+ } else {
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+
+ if r.Error != nil || !aws.BoolValue(r.Retryable) {
+ return r.Error
+ }
+
+ r.prepareRetry()
+ continue
+ }
+ }
+}
+
+func (r *Request) prepareRetry() {
+ if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+ }
+
+ // The previous http.Request will have a reference to the r.Body
+ // and the HTTP Client's Transport may still be reading from
+ // the request's body even though the Client's Do returned.
+ r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
+ r.ResetBody()
+
+ // Closing response body to ensure that no response body is leaked
+ // between retry attempts.
+ if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+ r.HTTPResponse.Body.Close()
+ }
+}
+
+func (r *Request) sendRequest() (sendErr error) {
+ defer r.Handlers.CompleteAttempt.Run(r)
+
+ r.Retryable = nil
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Send Request", r.WillRetry(), r.Error)
+ return r.Error
+ }
+
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ r.Handlers.UnmarshalError.Run(r)
+ debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error)
+ return r.Error
+ }
+
+ return nil
+}
+
+// copy will copy a request which will allow for local manipulation of the
+// request.
+func (r *Request) copy() *Request {
+ req := &Request{}
+ *req = *r
+ req.Handlers = r.Handlers.Copy()
+ op := *r.Operation
+ req.Operation = &op
+ return req
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+ curUA := r.HTTPRequest.Header.Get("User-Agent")
+ if len(curUA) > 0 {
+ s = curUA + " " + s
+ }
+ r.HTTPRequest.Header.Set("User-Agent", s)
+}
+
+type temporary interface {
+ Temporary() bool
+}
+
+func shouldRetryCancel(err error) bool {
+ switch err := err.(type) {
+ case awserr.Error:
+ if err.Code() == CanceledErrorCode {
+ return false
+ }
+ return shouldRetryCancel(err.OrigErr())
+ case *url.Error:
+ if strings.Contains(err.Error(), "connection refused") {
+ // Refused connections should be retried as the service may not yet
+ // be running on the port. Go TCP dial considers refused
+ // connections as not temporary.
+ return true
+ }
+ // *url.Error only implements Temporary after golang 1.6 but since
+ // url.Error only wraps the error:
+ return shouldRetryCancel(err.Err)
+ case temporary:
+ // If the error is temporary, we want to allow continuation of the
+ // retry process
+ return err.Temporary()
+ case nil:
+ // `awserr.Error.OrigErr()` can be nil, meaning there was an error but
+ // because we don't know the cause, it is marked as retriable. See
+ // TestRequest4xxUnretryable for an example.
+ return true
+ default:
+ switch err.Error() {
+ case "net/http: request canceled",
+ "net/http: request canceled while waiting for connection":
+ // known 1.5 error case when an http request is cancelled
+ return false
+ }
+ // here we don't know the error; so we allow a retry.
+ return true
+ }
+}
+
+// SanitizeHostForHeader removes default port from host and updates request.Host
+func SanitizeHostForHeader(r *http.Request) {
+ host := getHost(r)
+ port := portOnly(host)
+ if port != "" && isDefaultPort(r.URL.Scheme, port) {
+ r.Host = stripPort(host)
+ }
+}
+
+// Returns host from request
+func getHost(r *http.Request) string {
+ if r.Host != "" {
+ return r.Host
+ }
+
+ return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+ if port == "" {
+ return true
+ }
+
+ lowerCaseScheme := strings.ToLower(scheme)
+ if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
new file mode 100644
index 00000000000..e36e468b7c6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -0,0 +1,39 @@
+// +build !go1.8
+
+package request
+
+import "io"
+
+// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
+// and Close always returns nil. It can be used in an outgoing client
+// request to explicitly signal that a request has zero bytes.
+// An alternative, however, is to simply set Request.Body to nil.
+//
+// Copy of Go 1.8 NoBody type from net/http/http.go
+type noBody struct{}
+
+func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
+func (noBody) Close() error { return nil }
+func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
+
+// NoBody is an empty reader that will trigger the Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = noBody{}
+
+// ResetBody rewinds the request body back to its starting position, and
+// sets the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
new file mode 100644
index 00000000000..7c6a8000f67
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -0,0 +1,33 @@
+// +build go1.8
+
+package request
+
+import (
+ "net/http"
+)
+
+// NoBody is a http.NoBody reader instructing Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = http.NoBody
+
+// ResetBody rewinds the request body back to its starting position, and
+// sets the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+//
+// Will also set the Go 1.8's http.Request.GetBody member to allow retrying
+// PUT/POST redirects.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+ r.HTTPRequest.GetBody = r.getNextRequestBody
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
new file mode 100644
index 00000000000..a7365cd1e46
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
@@ -0,0 +1,14 @@
+// +build go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
new file mode 100644
index 00000000000..307fa0705be
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest.Cancel = ctx.Done()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100644
index 00000000000..a633ed5acfa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,264 @@
+package request
+
+import (
+ "reflect"
+ "sync/atomic"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// A Pagination provides paginating of SDK API operations which are paginatable.
+// Generally you should not use this type directly, but use the "Pages" API
+// operations method to automatically perform pagination for you. Such as,
+// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
+//
+// Pagination differs from a Paginator type in that pagination is the type that
+// does the pagination between API operations, and Paginator defines the
+// configuration that will be used per page request.
+//
+// cont := true
+// for p.Next() && cont {
+// data := p.Page().(*s3.ListObjectsOutput)
+// // process the page's data
+// }
+// return p.Err()
+//
+// See service client API operation Pages methods for examples how the SDK will
+// use the Pagination type.
+type Pagination struct {
+ // Function to return a Request value for each pagination request.
+ // Any configuration or handlers that need to be applied to the request
+ // prior to getting the next page should be done here before the request
+ // returned.
+ //
+ // NewRequest should always be built from the same API operations. It is
+ // undefined if different API operations are returned on subsequent calls.
+ NewRequest func() (*Request, error)
+ // EndPageOnSameToken, when enabled, will allow the paginator to stop on
+ // token that are the same as its previous tokens.
+ EndPageOnSameToken bool
+
+ started bool
+ prevTokens []interface{}
+ nextTokens []interface{}
+
+ err error
+ curPage interface{}
+}
+
+// HasNextPage will return true if Pagination is able to determine that the API
+// operation has additional pages. False will be returned if there are no more
+// pages remaining.
+//
+// Will always return true if Next has not been called yet.
+func (p *Pagination) HasNextPage() bool {
+ if !p.started {
+ return true
+ }
+
+ hasNextPage := len(p.nextTokens) != 0
+ if p.EndPageOnSameToken {
+ return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens)
+ }
+ return hasNextPage
+}
+
+// Err returns the error Pagination encountered when retrieving the next page.
+func (p *Pagination) Err() error {
+ return p.err
+}
+
+// Page returns the current page. Page should only be called after a successful
+// call to Next. It is undefined what Page will return if Page is called after
+// Next returns false.
+func (p *Pagination) Page() interface{} {
+ return p.curPage
+}
+
+// Next will attempt to retrieve the next page for the API operation. When a page
+// is retrieved true will be returned. If the page cannot be retrieved, or there
+// are no more pages false will be returned.
+//
+// Use the Page method to retrieve the current page data. The data will need
+// to be cast to the API operation's output type.
+//
+// Use the Err method to determine if an error occurred if Page returns false.
+func (p *Pagination) Next() bool {
+ if !p.HasNextPage() {
+ return false
+ }
+
+ req, err := p.NewRequest()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ if p.started {
+ for i, intok := range req.Operation.InputTokens {
+ awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
+ }
+ }
+ p.started = true
+
+ err = req.Send()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ p.prevTokens = p.nextTokens
+ p.nextTokens = req.nextPageTokens()
+ p.curPage = req.Data
+
+ return true
+}
+
+// A Paginator is the configuration data that defines how an API operation
+// should be paginated. This type is used by the API service models to define
+// the generated pagination config for service APIs.
+//
+// The Pagination type is what provides iterating between pages of an API. It
+// is only used to store the token metadata the SDK should use for performing
+// pagination.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+ if r.Operation.TruncationToken != "" {
+ tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+ if len(tr) == 0 {
+ return nil
+ }
+
+ switch v := tr[0].(type) {
+ case *bool:
+ if !aws.BoolValue(v) {
+ return nil
+ }
+ case bool:
+ if v == false {
+ return nil
+ }
+ }
+ }
+
+ tokens := []interface{}{}
+ tokenAdded := false
+ for _, outToken := range r.Operation.OutputTokens {
+ vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
+ if len(vs) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ v := vs[0]
+
+ switch tv := v.(type) {
+ case *string:
+ if len(aws.StringValue(tv)) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ case string:
+ if len(tv) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ }
+
+ tokenAdded = true
+ tokens = append(tokens, v)
+ }
+ if !tokenAdded {
+ return nil
+ }
+
+ return tokens
+}
+
+// Ensure a deprecated item is only logged once instead of each time its used.
+func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
+ if logger == nil {
+ return
+ }
+ if atomic.CompareAndSwapInt32(flag, 0, 1) {
+ logger.Log(msg)
+ }
+}
+
+var (
+ logDeprecatedHasNextPage int32
+ logDeprecatedNextPage int32
+ logDeprecatedEachPage int32
+)
+
+// HasNextPage returns true if this request has more pages of data available.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) HasNextPage() bool {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
+ "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ return len(r.nextPageTokens()) > 0
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) NextPage() *Request {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
+ "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ tokens := r.nextPageTokens()
+ if len(tokens) == 0 {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
+ "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ for page := r; page != nil; page = page.NextPage() {
+ if err := page.Send(); err != nil {
+ return err
+ }
+ if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
new file mode 100644
index 00000000000..d0aa54c6d10
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -0,0 +1,163 @@
+package request
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Retryer is an interface to control retry logic for a given service.
+// The default implementation used by most services is the client.DefaultRetryer
+// structure, which contains basic retry logic using exponential backoff.
+type Retryer interface {
+ RetryRules(*Request) time.Duration
+ ShouldRetry(*Request) bool
+ MaxRetries() int
+}
+
+// WithRetryer sets a config Retryer value to the given Config returning it
+// for chaining.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+ cfg.Retryer = retryer
+ return cfg
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ ErrCodeResponseTimeout: {},
+ "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
+}
+
+var throttleCodes = map[string]struct{}{
+ "ProvisionedThroughputExceededException": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "RequestThrottledException": {},
+ "TooManyRequestsException": {}, // Lambda functions
+ "PriorRequestNotComplete": {}, // Route53
+ "TransactionInProgressException": {},
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeThrottle(code string) bool {
+ _, ok := throttleCodes[code]
+ return ok
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+var validParentCodes = map[string]struct{}{
+ ErrCodeSerialization: {},
+ ErrCodeRead: {},
+}
+
+type temporaryError interface {
+ Temporary() bool
+}
+
+func isNestedErrorRetryable(parentErr awserr.Error) bool {
+ if parentErr == nil {
+ return false
+ }
+
+ if _, ok := validParentCodes[parentErr.Code()]; !ok {
+ return false
+ }
+
+ err := parentErr.OrigErr()
+ if err == nil {
+ return false
+ }
+
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code())
+ }
+
+ if t, ok := err.(temporaryError); ok {
+ return t.Temporary() || isErrConnectionReset(err)
+ }
+
+ return isErrConnectionReset(err)
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if error is nil.
+func IsErrorRetryable(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
+ }
+ }
+ return false
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if error is nil.
+func IsErrorThrottle(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeThrottle(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
+// Returns false if error is nil.
+func IsErrorExpiredCreds(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeExpiredCreds(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorRetryable
+func (r *Request) IsErrorRetryable() bool {
+ return IsErrorRetryable(r.Error)
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if the request has no Error set
+//
+// Alias for the utility function IsErrorThrottle
+func (r *Request) IsErrorThrottle() bool {
+ return IsErrorThrottle(r.Error)
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorExpiredCreds
+func (r *Request) IsErrorExpired() bool {
+ return IsErrorExpiredCreds(r.Error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
new file mode 100644
index 00000000000..09a44eb987a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
@@ -0,0 +1,94 @@
+package request
+
+import (
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var timeoutErr = awserr.New(
+ ErrCodeResponseTimeout,
+ "read on body has reached the timeout limit",
+ nil,
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, timeoutErr
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+const (
+ // HandlerResponseTimeout is what we use to signify the name of the
+ // response timeout handler.
+ HandlerResponseTimeout = "ResponseTimeoutHandler"
+)
+
+// adaptToResponseTimeoutError is a handler that will replace any top level error
+// to a ErrCodeResponseTimeout, if its child is that.
+func adaptToResponseTimeoutError(req *Request) {
+ if err, ok := req.Error.(awserr.Error); ok {
+ aerr, ok := err.OrigErr().(awserr.Error)
+ if ok && aerr.Code() == ErrCodeResponseTimeout {
+ req.Error = aerr
+ }
+ }
+}
+
+// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
+// This will allow for per read timeouts. If a timeout occurred, we will return the
+// ErrCodeResponseTimeout.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
+func WithResponseReadTimeout(duration time.Duration) Option {
+ return func(r *Request) {
+
+ var timeoutHandler = NamedHandler{
+ HandlerResponseTimeout,
+ func(req *Request) {
+ req.HTTPResponse.Body = &timeoutReadCloser{
+ reader: req.HTTPResponse.Body,
+ duration: duration,
+ }
+ }}
+
+ // remove the handler so we are not stomping over any new durations.
+ r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
+ r.Handlers.Send.PushBackNamed(timeoutHandler)
+
+ r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
+ r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
new file mode 100644
index 00000000000..8630683f317
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
@@ -0,0 +1,286 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // InvalidParameterErrCode is the error code for invalid parameters errors
+ InvalidParameterErrCode = "InvalidParameter"
+ // ParamRequiredErrCode is the error code for required parameter errors
+ ParamRequiredErrCode = "ParamRequiredError"
+ // ParamMinValueErrCode is the error code for fields with too low of a
+ // number value.
+ ParamMinValueErrCode = "ParamMinValueError"
+ // ParamMinLenErrCode is the error code for fields without enough elements.
+ ParamMinLenErrCode = "ParamMinLenError"
+ // ParamMaxLenErrCode is the error code for value being too long.
+ ParamMaxLenErrCode = "ParamMaxLenError"
+
+ // ParamFormatErrCode is the error code for a field with invalid
+ // format or characters.
+ ParamFormatErrCode = "ParamFormatInvalidError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+ Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+ return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+ return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+ return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Message())
+ }
+
+ return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+ return awserr.NewBatchError(
+ InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+ awserr.Error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+ context string
+ nestedContext string
+ field string
+ code string
+ msg string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+ return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+ return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+ return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+ field := e.context
+ if len(field) > 0 {
+ field += "."
+ }
+ if len(e.nestedContext) > 0 {
+ field += fmt.Sprintf("%s.", e.nestedContext)
+ }
+ field += e.field
+
+ return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ } else {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ }
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+ errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+ return &ErrParamRequired{
+ errInvalidParam{
+ code: ParamRequiredErrCode,
+ field: field,
+ msg: fmt.Sprintf("missing required field"),
+ },
+ }
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+ errInvalidParam
+ min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+ return &ErrParamMinValue{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field value of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+ return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+ errInvalidParam
+ min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+ return &ErrParamMinLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field size of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+ return e.min
+}
+
+// An ErrParamMaxLen represents a maximum length parameter error.
+type ErrParamMaxLen struct {
+ errInvalidParam
+ max int
+}
+
+// NewErrParamMaxLen creates a new maximum length parameter error.
+func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen {
+ return &ErrParamMaxLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMaxLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("maximum size of %v, %v", max, value),
+ },
+ max: max,
+ }
+}
+
+// MaxLen returns the field's required minimum length.
+func (e *ErrParamMaxLen) MaxLen() int {
+ return e.max
+}
+
+// An ErrParamFormat represents a invalid format parameter error.
+type ErrParamFormat struct {
+ errInvalidParam
+ format string
+}
+
+// NewErrParamFormat creates a new invalid format parameter error.
+func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
+ return &ErrParamFormat{
+ errInvalidParam: errInvalidParam{
+ code: ParamFormatErrCode,
+ field: field,
+ msg: fmt.Sprintf("format %v, %v", format, value),
+ },
+ format: format,
+ }
+}
+
+// Format returns the field's required format.
+func (e *ErrParamFormat) Format() string {
+ return e.format
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
new file mode 100644
index 00000000000..4601f883cc5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -0,0 +1,295 @@
+package request
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
+// the waiter's max attempts have been exhausted.
+const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
+
+// A WaiterOption is a function that will update the Waiter value's fields to
+// configure the waiter.
+type WaiterOption func(*Waiter)
+
+// WithWaiterMaxAttempts returns the maximum number of times the waiter should
+// attempt to check the resource for the target state.
+func WithWaiterMaxAttempts(max int) WaiterOption {
+ return func(w *Waiter) {
+ w.MaxAttempts = max
+ }
+}
+
+// WaiterDelay will return a delay the waiter should pause between attempts to
+// check the resource state. The passed in attempt is the number of times the
+// Waiter has checked the resource state.
+//
+// Attempt is the number of attempts the Waiter has made checking the resource
+// state.
+type WaiterDelay func(attempt int) time.Duration
+
+// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
+// delay the waiter should use between attempts. It ignores the number of
+// attempts made.
+func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
+ return func(attempt int) time.Duration {
+ return delay
+ }
+}
+
+// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
+func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
+ return func(w *Waiter) {
+ w.Delay = delayer
+ }
+}
+
+// WithWaiterLogger returns a waiter option to set the logger a waiter
+// should use to log warnings and errors to.
+func WithWaiterLogger(logger aws.Logger) WaiterOption {
+ return func(w *Waiter) {
+ w.Logger = logger
+ }
+}
+
+// WithWaiterRequestOptions returns a waiter option setting the request
+// options for each request the waiter makes. Appends to waiter's request
+// options already set.
+func WithWaiterRequestOptions(opts ...Option) WaiterOption {
+ return func(w *Waiter) {
+ w.RequestOptions = append(w.RequestOptions, opts...)
+ }
+}
+
+// A Waiter provides the functionality to perform a blocking call which will
+// wait for a resource state to be satisfied by a service.
+//
+// This type should not be used directly. The API operations provided in the
+// service packages prefixed with "WaitUntil" should be used instead.
+type Waiter struct {
+ Name string
+ Acceptors []WaiterAcceptor
+ Logger aws.Logger
+
+ MaxAttempts int
+ Delay WaiterDelay
+
+ RequestOptions []Option
+ NewRequest func([]Option) (*Request, error)
+ SleepWithContext func(aws.Context, time.Duration) error
+}
+
+// ApplyOptions updates the waiter with the list of waiter options provided.
+func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
+ for _, fn := range opts {
+ fn(w)
+ }
+}
+
+// WaiterState are states the waiter uses based on WaiterAcceptor definitions
+// to identify if the resource state the waiter is waiting on has occurred.
+type WaiterState int
+
+// String returns the string representation of the waiter state.
+func (s WaiterState) String() string {
+ switch s {
+ case SuccessWaiterState:
+ return "success"
+ case FailureWaiterState:
+ return "failure"
+ case RetryWaiterState:
+ return "retry"
+ default:
+ return "unknown waiter state"
+ }
+}
+
+// States the waiter acceptors will use to identify target resource states.
+const (
+ SuccessWaiterState WaiterState = iota // waiter successful
+ FailureWaiterState // waiter failed
+ RetryWaiterState // waiter needs to be retried
+)
+
+// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
+// definition's Expected attribute.
+type WaiterMatchMode int
+
+// Modes the waiter will use when inspecting API response to identify target
+// resource states.
+const (
+ PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
+ PathWaiterMatch // match on specific path
+ PathAnyWaiterMatch // match on any path
+ PathListWaiterMatch // match on list of paths
+ StatusWaiterMatch // match on status code
+ ErrorWaiterMatch // match on error
+)
+
+// String returns the string representation of the waiter match mode.
+func (m WaiterMatchMode) String() string {
+ switch m {
+ case PathAllWaiterMatch:
+ return "pathAll"
+ case PathWaiterMatch:
+ return "path"
+ case PathAnyWaiterMatch:
+ return "pathAny"
+ case PathListWaiterMatch:
+ return "pathList"
+ case StatusWaiterMatch:
+ return "status"
+ case ErrorWaiterMatch:
+ return "error"
+ default:
+ return "unknown waiter match mode"
+ }
+}
+
+// WaitWithContext will make requests for the API operation using NewRequest to
+// build API requests. The request's response will be compared against the
+// Waiter's Acceptors to determine the successful state of the resource the
+// waiter is inspecting.
+//
+// The passed in context must not be nil. If it is nil a panic will occur. The
+// Context will be used to cancel the waiter's pending requests and retry delays.
+// Use aws.BackgroundContext if no context is available.
+//
+// The waiter will continue until the target state defined by the Acceptors,
+// or the max attempts expires.
+//
+// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
+// retryer ShouldRetry returns false. This normally will happen when the max
+// wait attempts expires.
+func (w Waiter) WaitWithContext(ctx aws.Context) error {
+
+ for attempt := 1; ; attempt++ {
+ req, err := w.NewRequest(w.RequestOptions)
+ if err != nil {
+ waiterLogf(w.Logger, "unable to create request %v", err)
+ return err
+ }
+ req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
+ err = req.Send()
+
+ // See if any of the acceptors match the request's response, or error
+ for _, a := range w.Acceptors {
+ if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
+ return matchErr
+ }
+ }
+
+ // The Waiter should only check the resource state MaxAttempts times
+ // This is here instead of in the for loop above to prevent delaying
+ // unnecessary when the waiter will not retry.
+ if attempt == w.MaxAttempts {
+ break
+ }
+
+ // Delay to wait before inspecting the resource again
+ delay := w.Delay(attempt)
+ if sleepFn := req.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(delay)
+ } else {
+ sleepCtxFn := w.SleepWithContext
+ if sleepCtxFn == nil {
+ sleepCtxFn = aws.SleepWithContext
+ }
+
+ if err := sleepCtxFn(ctx, delay); err != nil {
+ return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+ }
+ }
+ }
+
+ return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
+}
+
+// A WaiterAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaiterAcceptor struct {
+ State WaiterState
+ Matcher WaiterMatchMode
+ Argument string
+ Expected interface{}
+}
+
+// match returns if the acceptor found a match with the passed in request
+// or error. True is returned if the acceptor made a match, error is returned
+// if there was an error attempting to perform the match.
+func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
+ result := false
+ var vals []interface{}
+
+ switch a.Matcher {
+ case PathAllWaiterMatch, PathWaiterMatch:
+ // Require all matches to be equal for result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ if len(vals) == 0 {
+ break
+ }
+ result = true
+ for _, val := range vals {
+ if !awsutil.DeepEqual(val, a.Expected) {
+ result = false
+ break
+ }
+ }
+ case PathAnyWaiterMatch:
+ // Only a single match needs to equal for the result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ for _, val := range vals {
+ if awsutil.DeepEqual(val, a.Expected) {
+ result = true
+ break
+ }
+ }
+ case PathListWaiterMatch:
+ // ignored matcher
+ case StatusWaiterMatch:
+ s := a.Expected.(int)
+ result = s == req.HTTPResponse.StatusCode
+ case ErrorWaiterMatch:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == a.Expected.(string)
+ }
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
+ name, a.Matcher)
+ }
+
+ if !result {
+ // If there was no matching result found there is nothing more to do
+ // for this response, retry the request.
+ return false, nil
+ }
+
+ switch a.State {
+ case SuccessWaiterState:
+ // waiter completed
+ return true, nil
+ case FailureWaiterState:
+ // Waiter failure state triggered
+ return true, awserr.New(WaiterResourceNotReadyErrorCode,
+ "failed waiting for successful resource state", err)
+ case RetryWaiterState:
+ // clear the error and retry the operation
+ return false, nil
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
+ name, a.State)
+ return false, nil
+ }
+}
+
+func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
+ if logger != nil {
+ logger.Log(fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
new file mode 100644
index 00000000000..ea9ebb6f6a2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
@@ -0,0 +1,26 @@
+// +build go1.7
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
new file mode 100644
index 00000000000..fec39dfc126
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
@@ -0,0 +1,22 @@
+// +build !go1.6,go1.5
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
new file mode 100644
index 00000000000..1c5a5391e65
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
@@ -0,0 +1,23 @@
+// +build !go1.7,go1.6
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
new file mode 100644
index 00000000000..38a7b05a621
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -0,0 +1,273 @@
+/*
+Package session provides configuration for the SDK's service clients.
+
+Sessions can be shared across all service clients that share the same base
+configuration. The Session is built from the SDK's default configuration and
+request handlers.
+
+Sessions should be cached when possible, because creating a new Session will
+load all configuration values from the environment, and config files each time
+the Session is created. Sharing the Session value across all of your service
+clients will ensure the configuration is loaded the fewest number of times possible.
+
+Concurrency
+
+Sessions are safe to use concurrently as long as the Session is not being
+modified. The SDK will not modify the Session once the Session has been created.
+Creating service clients concurrently from a shared Session is safe.
+
+Sessions from Shared Config
+
+Sessions can be created using the method above that will only load the
+additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
+Alternatively you can explicitly create a Session with shared config enabled.
+To do this you can use NewSessionWithOptions to configure how the Session will
+be created. Using the NewSessionWithOptions with SharedConfigState set to
+SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG
+environment variable was set.
+
+Creating Sessions
+
+When creating Sessions optional aws.Config values can be passed in that will
+override the default, or loaded config values the Session is being created
+with. This allows you to provide additional, or case based, configuration
+as needed.
+
+By default NewSession will only load credentials from the shared credentials
+file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
+set to a truthy value the Session will be created from the configuration
+values from the shared config (~/.aws/config) and shared credentials
+(~/.aws/credentials) files. See the section Sessions from Shared Config for
+more information.
+
+Create a Session with the default config and request handlers. With credentials
+region, and profile loaded from the environment and shared config automatically.
+Requires the AWS_PROFILE to be set, or "default" is used.
+
+ // Create Session
+ sess := session.Must(session.NewSession())
+
+ // Create a Session with a custom region
+ sess := session.Must(session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1"),
+ }))
+
+ // Create a S3 client instance from a session
+ sess := session.Must(session.NewSession())
+
+ svc := s3.New(sess)
+
+Create Session With Option Overrides
+
+In addition to NewSession, Sessions can be created using NewSessionWithOptions.
+This func allows you to control and override how the Session will be created
+through code instead of being driven by environment variables only.
+
+Use NewSessionWithOptions when you want to provide the config profile, or
+override the shared config state (AWS_SDK_LOAD_CONFIG).
+
+ // Equivalent to session.NewSession()
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ // Options
+ }))
+
+ // Specify profile to load for the session's config
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Profile: "profile_name",
+ }))
+
+ // Specify profile for config and region for requests
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{Region: aws.String("us-east-1")},
+ Profile: "profile_name",
+ }))
+
+ // Force enable Shared Config support
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ SharedConfigState: session.SharedConfigEnable,
+ }))
+
+Adding Handlers
+
+You can add handlers to a session for processing HTTP requests. All service
+clients that use the session inherit the handlers. For example, the following
+handler logs every request and its payload made by a service client:
+
+ // Create a session, and add additional handlers for all service
+ // clients created with the Session to inherit. Adds logging handler.
+ sess := session.Must(session.NewSession())
+
+ sess.Handlers.Send.PushFront(func(r *request.Request) {
+ // Log every request made and its payload
+ logger.Printf("Request: %s/%s, Payload: %s",
+ r.ClientInfo.ServiceName, r.Operation, r.Params)
+ })
+
+Deprecated "New" function
+
+The New session function has been deprecated because it does not provide good
+way to return errors that occur when loading the configuration files and values.
+Because of this, NewSession was created so errors can be retrieved when
+creating a session fails.
+
+Shared Config Fields
+
+By default the SDK will only load the shared credentials file's (~/.aws/credentials)
+credentials values, and all other config is provided by the environment variables,
+SDK defaults, and user provided aws.Config values.
+
+If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
+option is used to create the Session the full shared config values will be
+loaded. This includes credentials, region, and support for assume role. In
+addition the Session will load its configuration from both the shared config
+file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
+files have the same format.
+
+If both config files are present the configuration from both files will be
+read. The Session will be created from configuration values from the shared
+credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
+
+Credentials are the values the SDK should use for authenticating requests with
+AWS Services. They are from a configuration file will need to include both
+aws_access_key_id and aws_secret_access_key must be provided together in the
+same file to be considered valid. The values will be ignored if not a complete
+group. aws_session_token is an optional field that can be provided if both of
+the other two fields are also provided.
+
+ aws_access_key_id = AKID
+ aws_secret_access_key = SECRET
+ aws_session_token = TOKEN
+
+Assume Role values allow you to configure the SDK to assume an IAM role using
+a set of credentials provided in a config file via the source_profile field.
+Both "role_arn" and "source_profile" are required. The SDK supports assuming
+a role with MFA token if the session option AssumeRoleTokenProvider
+is set.
+
+ role_arn = arn:aws:iam:::role/
+ source_profile = profile_with_creds
+ external_id = 1234
+ mfa_serial =
+ role_session_name = session_name
+
+Region is the region the SDK should use for looking up AWS service endpoints
+and signing requests.
+
+ region = us-east-1
+
+Assume Role with MFA token
+
+To create a session with support for assuming an IAM role with MFA set the
+session option AssumeRoleTokenProvider to a function that will prompt for the
+MFA token code when the SDK assumes the role and refreshes the role's credentials.
+This allows you to configure the SDK via the shared config to assumea role
+with MFA tokens.
+
+In order for the SDK to assume a role with MFA the SharedConfigState
+session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG
+environment variable set.
+
+The shared configuration instructs the SDK to assume an IAM role with MFA
+when the mfa_serial configuration field is set in the shared config
+(~/.aws/config) or shared credentials (~/.aws/credentials) file.
+
+If mfa_serial is set in the configuration, the SDK will assume the role, and
+the AssumeRoleTokenProvider session option is not set an an error will
+be returned when creating the session.
+
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
+ }))
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess)
+
+To setup assume role outside of a session see the stscreds.AssumeRoleProvider
+documentation.
+
+Environment Variables
+
+When a Session is created several environment variables can be set to adjust
+how the SDK functions, and what configuration data it loads when creating
+Sessions. All environment values are optional, but some values like credentials
+require multiple of the values to set or the partial values will be ignored.
+All environment variable values are strings unless otherwise noted.
+
+Environment configuration values. If set both Access Key ID and Secret Access
+Key must be provided. Session Token and optionally also be provided, but is
+not required.
+
+ # Access Key ID
+ AWS_ACCESS_KEY_ID=AKID
+ AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+
+ # Secret Access Key
+ AWS_SECRET_ACCESS_KEY=SECRET
+ AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+
+ # Session Token
+ AWS_SESSION_TOKEN=TOKEN
+
+Region value will instruct the SDK where to make service API requests to. If is
+not provided in the environment the region must be provided before a service
+client request is made.
+
+ AWS_REGION=us-east-1
+
+ # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_REGION is not also set.
+ AWS_DEFAULT_REGION=us-east-1
+
+Profile name the SDK should load use when loading shared config from the
+configuration files. If not provided "default" will be used as the profile name.
+
+ AWS_PROFILE=my_profile
+
+ # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_PROFILE is not also set.
+ AWS_DEFAULT_PROFILE=my_profile
+
+SDK load config instructs the SDK to load the shared config in addition to
+shared credentials. This also expands the configuration loaded so the shared
+credentials will have parity with the shared config file. This also enables
+Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+env values as well.
+
+ AWS_SDK_LOAD_CONFIG=1
+
+Shared credentials file path can be set to instruct the SDK to use an alternative
+file for the shared credentials. If not set the file will be loaded from
+$HOME/.aws/credentials on Linux/Unix based systems, and
+%USERPROFILE%\.aws\credentials on Windows.
+
+ AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+
+Shared config file path can be set to instruct the SDK to use an alternative
+file for the shared config. If not set the file will be loaded from
+$HOME/.aws/config on Linux/Unix based systems, and
+%USERPROFILE%\.aws\config on Windows.
+
+ AWS_CONFIG_FILE=$HOME/my_shared_config
+
+Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
+will use instead of the default system's root CA bundle. Use this only
+if you want to replace the CA bundle the SDK uses for TLS requests.
+
+ AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+
+Enabling this option will attempt to merge the Transport into the SDK's HTTP
+client. If the client's Transport is not a http.Transport an error will be
+returned. If the Transport's TLS config is set this option will cause the SDK
+to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
+contains multiple certificates all of them will be loaded.
+
+The Session option CustomCABundle is also available when creating sessions
+to also enable this feature. CustomCABundle session option field has priority
+over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+
+Setting a custom HTTPClient in the aws.Config options will override this setting.
+To use this option and custom HTTP client, the HTTP client needs to be provided
+when creating the session. Not the service client.
+*/
+package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
new file mode 100644
index 00000000000..e3959b959ef
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -0,0 +1,236 @@
+package session
+
+import (
+ "os"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+)
+
+// EnvProviderName provides a name of the provider when config is loaded from environment.
+const EnvProviderName = "EnvConfigCredentials"
+
+// envConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type envConfig struct {
+ // Environment configuration values. If set both Access Key ID and Secret Access
+ // Key must be provided. Session Token and optionally also be provided, but is
+ // not required.
+ //
+ // # Access Key ID
+ // AWS_ACCESS_KEY_ID=AKID
+ // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+ //
+ // # Secret Access Key
+ // AWS_SECRET_ACCESS_KEY=SECRET
+ // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+ //
+ // # Session Token
+ // AWS_SESSION_TOKEN=TOKEN
+ Creds credentials.Value
+
+ // Region value will instruct the SDK where to make service API requests to. If is
+ // not provided in the environment the region must be provided before a service
+ // client request is made.
+ //
+ // AWS_REGION=us-east-1
+ //
+ // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_REGION is not also set.
+ // AWS_DEFAULT_REGION=us-east-1
+ Region string
+
+ // Profile name the SDK should load use when loading shared configuration from the
+ // shared configuration files. If not provided "default" will be used as the
+ // profile name.
+ //
+ // AWS_PROFILE=my_profile
+ //
+ // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_PROFILE is not also set.
+ // AWS_DEFAULT_PROFILE=my_profile
+ Profile string
+
+ // SDK load config instructs the SDK to load the shared config in addition to
+ // shared credentials. This also expands the configuration loaded from the shared
+ // credentials to have parity with the shared config file. This also enables
+ // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+ // env values as well.
+ //
+ // AWS_SDK_LOAD_CONFIG=1
+ EnableSharedConfig bool
+
+ // Shared credentials file path can be set to instruct the SDK to use an alternate
+ // file for the shared credentials. If not set the file will be loaded from
+ // $HOME/.aws/credentials on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\credentials on Windows.
+ //
+ // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+ SharedCredentialsFile string
+
+ // Shared config file path can be set to instruct the SDK to use an alternate
+ // file for the shared config. If not set the file will be loaded from
+ // $HOME/.aws/config on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\config on Windows.
+ //
+ // AWS_CONFIG_FILE=$HOME/my_shared_config
+ SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+ // that the SDK will use instead of the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the session. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+
+ csmEnabled string
+ CSMEnabled bool
+ CSMPort string
+ CSMClientID string
+
+ enableEndpointDiscovery string
+ // Enables endpoint discovery via environment variables.
+ //
+ // AWS_ENABLE_ENDPOINT_DISCOVERY=true
+ EnableEndpointDiscovery *bool
+}
+
+var (
+ csmEnabledEnvKey = []string{
+ "AWS_CSM_ENABLED",
+ }
+ csmPortEnvKey = []string{
+ "AWS_CSM_PORT",
+ }
+ csmClientIDEnvKey = []string{
+ "AWS_CSM_CLIENT_ID",
+ }
+ credAccessEnvKey = []string{
+ "AWS_ACCESS_KEY_ID",
+ "AWS_ACCESS_KEY",
+ }
+ credSecretEnvKey = []string{
+ "AWS_SECRET_ACCESS_KEY",
+ "AWS_SECRET_KEY",
+ }
+ credSessionEnvKey = []string{
+ "AWS_SESSION_TOKEN",
+ }
+
+ enableEndpointDiscoveryEnvKey = []string{
+ "AWS_ENABLE_ENDPOINT_DISCOVERY",
+ }
+
+ regionEnvKeys = []string{
+ "AWS_REGION",
+ "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ profileEnvKeys = []string{
+ "AWS_PROFILE",
+ "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ sharedCredsFileEnvKey = []string{
+ "AWS_SHARED_CREDENTIALS_FILE",
+ }
+ sharedConfigFileEnvKey = []string{
+ "AWS_CONFIG_FILE",
+ }
+)
+
+// loadEnvConfig retrieves the SDK's environment configuration.
+// See `envConfig` for the values that will be retrieved.
+//
+// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
+// the shared SDK config will be loaded in addition to the SDK's specific
+// configuration values.
+func loadEnvConfig() envConfig {
+ enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
+ return envConfigLoad(enableSharedConfig)
+}
+
+// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
+// SDK shared config. See `envConfig` for the values that will be retrieved.
+//
+// Loads the shared configuration in addition to the SDK's specific configuration.
+// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
+// environment variable is set.
+func loadSharedEnvConfig() envConfig {
+ return envConfigLoad(true)
+}
+
+func envConfigLoad(enableSharedConfig bool) envConfig {
+ cfg := envConfig{}
+
+ cfg.EnableSharedConfig = enableSharedConfig
+
+ setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
+ setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
+ setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
+
+ // CSM environment variables
+ setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
+ setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
+ setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
+ cfg.CSMEnabled = len(cfg.csmEnabled) > 0
+
+ // Require logical grouping of credentials
+ if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
+ cfg.Creds = credentials.Value{}
+ } else {
+ cfg.Creds.ProviderName = EnvProviderName
+ }
+
+ regionKeys := regionEnvKeys
+ profileKeys := profileEnvKeys
+ if !cfg.EnableSharedConfig {
+ regionKeys = regionKeys[:1]
+ profileKeys = profileKeys[:1]
+ }
+
+ setFromEnvVal(&cfg.Region, regionKeys)
+ setFromEnvVal(&cfg.Profile, profileKeys)
+
+ // endpoint discovery is in reference to it being enabled.
+ setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey)
+ if len(cfg.enableEndpointDiscovery) > 0 {
+ cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false")
+ }
+
+ setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
+ setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
+
+ if len(cfg.SharedCredentialsFile) == 0 {
+ cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
+ }
+ if len(cfg.SharedConfigFile) == 0 {
+ cfg.SharedConfigFile = defaults.SharedConfigFilename()
+ }
+
+ cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
+
+ return cfg
+}
+
+func setFromEnvVal(dst *string, keys []string) {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) > 0 {
+ *dst = v
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100644
index 00000000000..be4b5f07772
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -0,0 +1,719 @@
+package session
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/processcreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/csm"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+const (
+ // ErrCodeSharedConfig represents an error that occurs in the shared
+ // configuration logic
+ ErrCodeSharedConfig = "SharedConfigErr"
+)
+
+// ErrSharedConfigSourceCollision will be returned if a section contains both
+// source_profile and credential_source
+var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
+
+// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
+// variables are empty and Environment was set as the credential source
+var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil)
+
+// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided
+var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the Session concurrently.
+//
+// The Session satisfies the service client's client.ConfigProvider.
+type Session struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided configs
+// on top of the SDK's default configurations. Once the Session is created it
+// can be mutated to modify the Config or Handlers. The Session is safe to be
+// read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
+// method could now encounter an error when loading the configuration. When
+// The environment variable is set, and an error occurs, New will return a
+// session that will fail all requests reporting the error that occurred while
+// loading the session. Use NewSession to get the error when creating the
+// session.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded, in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file.
+//
+// Deprecated: Use NewSession functions to create sessions instead. NewSession
+// has the same functionality as New except an error can be returned when the
+// func is called instead of waiting to receive an error until a request is made.
+func New(cfgs ...*aws.Config) *Session {
+ // load initial config from environment
+ envCfg := loadEnvConfig()
+
+ if envCfg.EnableSharedConfig {
+ var cfg aws.Config
+ cfg.MergeIn(cfgs...)
+ s, err := NewSessionWithOptions(Options{
+ Config: cfg,
+ SharedConfigState: SharedConfigEnable,
+ })
+ if err != nil {
+ // Old session.New expected all errors to be discovered when
+ // a request is made, and would report the errors then. This
+ // needs to be replicated if an error occurs while creating
+ // the session.
+ msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
+ "Use session.NewSession to handle errors occurring during session creation."
+
+ // Session creation failed, need to report the error and prevent
+ // any requests from succeeding.
+ s = &Session{Config: defaults.Config()}
+ s.Config.MergeIn(cfgs...)
+ s.Config.Logger.Log("ERROR:", msg, "Error:", err)
+ s.Handlers.Validate.PushBack(func(r *request.Request) {
+ r.Error = err
+ })
+ }
+
+ return s
+ }
+
+ s := deprecatedNewSession(cfgs...)
+ if envCfg.CSMEnabled {
+ enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
+ }
+
+ return s
+}
+
+// NewSession returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. Once the Session is created
+// it can be mutated to modify the Config or Handlers. The Session is safe to
+// be read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// See the NewSessionWithOptions func for information on how to override or
+// control through code how the Session will be created. Such as specifying the
+// config profile, and controlling if shared config is enabled or not.
+func NewSession(cfgs ...*aws.Config) (*Session, error) {
+ opts := Options{}
+ opts.Config.MergeIn(cfgs...)
+
+ return NewSessionWithOptions(opts)
+}
+
+// SharedConfigState provides the ability to optionally override the state
+// of the session's creation based on the shared config being enabled or
+// disabled.
+type SharedConfigState int
+
+const (
+ // SharedConfigStateFromEnv does not override any state of the
+ // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
+ // SharedConfigState type.
+ SharedConfigStateFromEnv SharedConfigState = iota
+
+ // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and disables the shared config functionality.
+ SharedConfigDisable
+
+ // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and enables the shared config functionality.
+ SharedConfigEnable
+)
+
+// Options provides the means to control how a Session is created and what
+// configuration values will be loaded.
+//
+type Options struct {
+ // Provides config values for the SDK to use when creating service clients
+ // and making API requests to services. Any value set in with this field
+ // will override the associated value provided by the SDK defaults,
+ // environment or config files where relevant.
+ //
+ // If not set, configuration values from from SDK defaults, environment,
+ // config will be used.
+ Config aws.Config
+
+ // Overrides the config profile the Session should be created from. If not
+ // set the value of the environment variable will be loaded (AWS_PROFILE,
+ // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
+ //
+ // If not set and environment variables are not set the "default"
+ // (DefaultSharedConfigProfile) will be used as the profile to load the
+ // session config from.
+ Profile string
+
+ // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
+ // environment variable. By default a Session will be created using the
+ // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
+ //
+ // Setting this value to SharedConfigEnable or SharedConfigDisable
+ // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
+ // and enable or disable the shared config functionality.
+ SharedConfigState SharedConfigState
+
+ // Ordered list of files the session will load configuration from.
+ // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE.
+ SharedConfigFiles []string
+
+ // When the SDK's shared config is configured to assume a role with MFA
+ // this option is required in order to provide the mechanism that will
+ // retrieve the MFA token. There is no default value for this field. If
+ // it is not set an error will be returned when creating the session.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed. Within the context of service clients
+ // all sharing the same session the SDK will ensure calls to the token
+ // provider are atomic. When sharing a token provider across multiple
+ // sessions additional synchronization logic is needed to ensure the
+ // token providers do not introduce race conditions. It is recommend to
+ // share the session where possible.
+ //
+ // stscreds.StdinTokenProvider is a basic implementation that will prompt
+ // from stdin for the MFA token code.
+ //
+ // This field is only used if the shared configuration is enabled, and
+ // the config enables assume role wit MFA via the mfa_serial field.
+ AssumeRoleTokenProvider func() (string, error)
+
+ // Reader for a custom Credentials Authority (CA) bundle in PEM format that
+ // the SDK will use instead of the default system's root CA bundle. Use this
+ // only if you want to replace the CA bundle the SDK uses for TLS requests.
+ //
+ // Enabling this option will attempt to merge the Transport into the SDK's HTTP
+ // client. If the client's Transport is not a http.Transport an error will be
+ // returned. If the Transport's TLS config is set this option will cause the SDK
+ // to overwrite the Transport's TLS config's RootCAs value. If the CA
+ // bundle reader contains multiple certificates all of them will be loaded.
+ //
+ // The Session option CustomCABundle is also available when creating sessions
+ // to also enable this feature. CustomCABundle session option field has priority
+ // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+ CustomCABundle io.Reader
+}
+
+// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. This func uses the Options
+// values to configure how the Session is created.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// // Equivalent to session.New
+// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
+//
+// // Specify profile to load for the session's config
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Profile: "profile_name",
+// }))
+//
+// // Specify profile for config and region for requests
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Config: aws.Config{Region: aws.String("us-east-1")},
+// Profile: "profile_name",
+// }))
+//
+// // Force enable Shared Config support
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// }))
+func NewSessionWithOptions(opts Options) (*Session, error) {
+ var envCfg envConfig
+ if opts.SharedConfigState == SharedConfigEnable {
+ envCfg = loadSharedEnvConfig()
+ } else {
+ envCfg = loadEnvConfig()
+ }
+
+ if len(opts.Profile) > 0 {
+ envCfg.Profile = opts.Profile
+ }
+
+ switch opts.SharedConfigState {
+ case SharedConfigDisable:
+ envCfg.EnableSharedConfig = false
+ case SharedConfigEnable:
+ envCfg.EnableSharedConfig = true
+ }
+
+ // Only use AWS_CA_BUNDLE if session option is not provided.
+ if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
+ f, err := os.Open(envCfg.CustomCABundle)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to open custom CA bundle PEM file", err)
+ }
+ defer f.Close()
+ opts.CustomCABundle = f
+ }
+
+ return newSession(opts, envCfg, &opts.Config)
+}
+
+// Must is a helper function to ensure the Session is valid and there was no
+// error when calling a NewSession function.
+//
+// This helper is intended to be used in variable initialization to load the
+// Session and configuration at startup. Such as:
+//
+// var sess = session.Must(session.NewSession())
+func Must(sess *Session, err error) *Session {
+ if err != nil {
+ panic(err)
+ }
+
+ return sess
+}
+
+func deprecatedNewSession(cfgs ...*aws.Config) *Session {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Apply the passed in configs so the configuration can be applied to the
+ // default credential chain
+ cfg.MergeIn(cfgs...)
+ if cfg.EndpointResolver == nil {
+ // An endpoint resolver is required for a session to be able to provide
+ // endpoints for service client configurations.
+ cfg.EndpointResolver = endpoints.DefaultResolver()
+ }
+ cfg.Credentials = defaults.CredChain(cfg, handlers)
+
+ // Reapply any passed in configs to override credentials if set
+ cfg.MergeIn(cfgs...)
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+ return s
+}
+
+func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) {
+ logger.Log("Enabling CSM")
+ if len(port) == 0 {
+ port = csm.DefaultPort
+ }
+
+ r, err := csm.Start(clientID, "127.0.0.1:"+port)
+ if err != nil {
+ return
+ }
+ r.InjectHandlers(handlers)
+}
+
+func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Get a merged version of the user provided config to determine if
+ // credentials were.
+ userCfg := &aws.Config{}
+ userCfg.MergeIn(cfgs...)
+
+ // Ordered config files will be loaded in with later files overwriting
+ // previous config file values.
+ var cfgFiles []string
+ if opts.SharedConfigFiles != nil {
+ cfgFiles = opts.SharedConfigFiles
+ } else {
+ cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
+ if !envCfg.EnableSharedConfig {
+ // The shared config file (~/.aws/config) is only loaded if instructed
+ // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
+ cfgFiles = cfgFiles[1:]
+ }
+ }
+
+ // Load additional config from file(s)
+ sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
+ return nil, err
+ }
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+ if envCfg.CSMEnabled {
+ enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
+ }
+
+ // Setup HTTP client with custom cert bundle if enabled
+ if opts.CustomCABundle != nil {
+ if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
+ return nil, err
+ }
+ }
+
+ return s, nil
+}
+
+func loadCustomCABundle(s *Session, bundle io.Reader) error {
+ var t *http.Transport
+ switch v := s.Config.HTTPClient.Transport.(type) {
+ case *http.Transport:
+ t = v
+ default:
+ if s.Config.HTTPClient.Transport != nil {
+ return awserr.New("LoadCustomCABundleError",
+ "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
+ }
+ }
+ if t == nil {
+ // Nil transport implies `http.DefaultTransport` should be used. Since
+ // the SDK cannot modify, nor copy the `DefaultTransport` specifying
+ // the values the next closest behavior.
+ t = getCABundleTransport()
+ }
+
+ p, err := loadCertPool(bundle)
+ if err != nil {
+ return err
+ }
+ if t.TLSClientConfig == nil {
+ t.TLSClientConfig = &tls.Config{}
+ }
+ t.TLSClientConfig.RootCAs = p
+
+ s.Config.HTTPClient.Transport = t
+
+ return nil
+}
+
+func loadCertPool(r io.Reader) (*x509.CertPool, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to read custom CA bundle PEM file", err)
+ }
+
+ p := x509.NewCertPool()
+ if !p.AppendCertsFromPEM(b) {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to load custom CA bundle PEM file", err)
+ }
+
+ return p, nil
+}
+
+func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
+ // Merge in user provided configuration
+ cfg.MergeIn(userCfg)
+
+ // Region if not already set by user
+ if len(aws.StringValue(cfg.Region)) == 0 {
+ if len(envCfg.Region) > 0 {
+ cfg.WithRegion(envCfg.Region)
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
+ cfg.WithRegion(sharedCfg.Region)
+ }
+ }
+
+ if cfg.EnableEndpointDiscovery == nil {
+ if envCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery)
+ } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery)
+ }
+ }
+
+ // Configure credentials if not already set
+ if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
+
+ // inspect the profile to see if a credential source has been specified.
+ if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 {
+
+ // if both credential_source and source_profile have been set, return an error
+ // as this is undefined behavior.
+ if len(sharedCfg.AssumeRole.SourceProfile) > 0 {
+ return ErrSharedConfigSourceCollision
+ }
+
+ // valid credential source values
+ const (
+ credSourceEc2Metadata = "Ec2InstanceMetadata"
+ credSourceEnvironment = "Environment"
+ credSourceECSContainer = "EcsContainer"
+ )
+
+ switch sharedCfg.AssumeRole.CredentialSource {
+ case credSourceEc2Metadata:
+ cfgCp := *cfg
+ p := defaults.RemoteCredProvider(cfgCp, handlers)
+ cfgCp.Credentials = credentials.NewCredentials(p)
+
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
+ // AssumeRole Token provider is required if doing Assume Role
+ // with MFA.
+ return AssumeRoleTokenProviderNotSetError{}
+ }
+
+ cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
+ case credSourceEnvironment:
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ envCfg.Creds,
+ )
+ case credSourceECSContainer:
+ if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
+ return ErrSharedConfigECSContainerEnvVarEmpty
+ }
+
+ cfgCp := *cfg
+ p := defaults.RemoteCredProvider(cfgCp, handlers)
+ creds := credentials.NewCredentials(p)
+
+ cfg.Credentials = creds
+ default:
+ return ErrSharedConfigInvalidCredSource
+ }
+
+ return nil
+ }
+
+ if len(envCfg.Creds.AccessKeyID) > 0 {
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ envCfg.Creds,
+ )
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
+ cfgCp := *cfg
+ cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.AssumeRoleSource.Creds,
+ )
+
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
+ // AssumeRole Token provider is required if doing Assume Role
+ // with MFA.
+ return AssumeRoleTokenProviderNotSetError{}
+ }
+
+ cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
+ } else if len(sharedCfg.Creds.AccessKeyID) > 0 {
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.Creds,
+ )
+ } else if len(sharedCfg.CredentialProcess) > 0 {
+ cfg.Credentials = processcreds.NewCredentials(
+ sharedCfg.CredentialProcess,
+ )
+ } else {
+ // Fallback to default credentials provider, include mock errors
+ // for the credential chain so user can identify why credentials
+ // failed to be retrieved.
+ cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: []credentials.Provider{
+ &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
+ &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
+ defaults.RemoteCredProvider(*cfg, handlers),
+ },
+ })
+ }
+ }
+
+ return nil
+}
+
+func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials {
+ return stscreds.NewCredentials(
+ &Session{
+ Config: &cfg,
+ Handlers: handlers.Copy(),
+ },
+ sharedCfg.AssumeRole.RoleARN,
+ func(opt *stscreds.AssumeRoleProvider) {
+ opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
+
+ // Assume role with external ID
+ if len(sharedCfg.AssumeRole.ExternalID) > 0 {
+ opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
+ }
+
+ // Assume role with MFA
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 {
+ opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
+ opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
+ }
+ },
+ )
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
+// MFAToken option is not set when shared config is configured load assume a
+// role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Code is the short id of the error.
+func (e AssumeRoleTokenProviderNotSetError) Code() string {
+ return "AssumeRoleTokenProviderNotSetError"
+}
+
+// Message is the description of the error
+func (e AssumeRoleTokenProviderNotSetError) Message() string {
+ return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
+
+type credProviderError struct {
+ Err error
+}
+
+var emptyCreds = credentials.Value{}
+
+func (c credProviderError) Retrieve() (credentials.Value, error) {
+ return credentials.Value{}, c.Err
+}
+func (c credProviderError) IsExpired() bool {
+ return true
+}
+
+func initHandlers(s *Session) {
+ // Add the Validate parameter handler if it is not disabled.
+ s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+ }
+}
+
+// Copy creates and returns a copy of the current Session, coping the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the Session's copied config.
+//
+// // Create a copy of the current Session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+ newSession := &Session{
+ Config: s.Config.Copy(cfgs...),
+ Handlers: s.Handlers.Copy(),
+ }
+
+ initHandlers(newSession)
+
+ return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+ // Backwards compatibility, the error will be eaten if user calls ClientConfig
+ // directly. All SDK services will use ClientconfigWithError.
+ cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
+
+ return cfg
+}
+
+func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
+ s = s.Copy(cfgs...)
+
+ var resolved endpoints.ResolvedEndpoint
+ var err error
+
+ region := aws.StringValue(s.Config.Region)
+
+ if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
+ resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = region
+ } else {
+ resolved, err = s.Config.EndpointResolver.EndpointFor(
+ serviceName, region,
+ func(opt *endpoints.Options) {
+ opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
+ opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
+
+ // Support the condition where the service is modeled but its
+ // endpoint metadata is not available.
+ opt.ResolveUnknownService = true
+ },
+ )
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ }, err
+}
+
+// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
+// that the EndpointResolver will not be used to resolve the endpoint. The only
+// endpoint set must come from the aws.Config.Endpoint field.
+func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+
+ var resolved endpoints.ResolvedEndpoint
+
+ region := aws.StringValue(s.Config.Region)
+
+ if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
+ resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = region
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
new file mode 100644
index 00000000000..7cb44021b3f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -0,0 +1,329 @@
+package session
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+
+ "github.com/aws/aws-sdk-go/internal/ini"
+)
+
+const (
+ // Static Credentials group
+ accessKeyIDKey = `aws_access_key_id` // group required
+ secretAccessKey = `aws_secret_access_key` // group required
+ sessionTokenKey = `aws_session_token` // optional
+
+ // Assume Role Credentials group
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required (or credential_source)
+ credentialSourceKey = `credential_source` // group required (or source_profile)
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+
+ // Additional Config fields
+ regionKey = `region`
+
+ // endpoint discovery group
+ enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
+ // External Credential Process
+ credentialProcessKey = `credential_process`
+
+ // DefaultSharedConfigProfile is the default profile to be used when
+ // loading configuration from the config files if another profile name
+ // is not provided.
+ DefaultSharedConfigProfile = `default`
+)
+
+type assumeRoleConfig struct {
+ RoleARN string
+ SourceProfile string
+ CredentialSource string
+ ExternalID string
+ MFASerial string
+ RoleSessionName string
+}
+
+// sharedConfig represents the configuration fields of the SDK config files.
+type sharedConfig struct {
+ // Credentials values from the config file. Both aws_access_key_id
+ // and aws_secret_access_key must be provided together in the same file
+ // to be considered valid. The values will be ignored if not a complete group.
+ // aws_session_token is an optional field that can be provided if both of the
+ // other two fields are also provided.
+ //
+ // aws_access_key_id
+ // aws_secret_access_key
+ // aws_session_token
+ Creds credentials.Value
+
+ AssumeRole assumeRoleConfig
+ AssumeRoleSource *sharedConfig
+
+ // An external process to request credentials
+ CredentialProcess string
+
+ // Region is the region the SDK should use for looking up AWS service endpoints
+ // and signing requests.
+ //
+ // region
+ Region string
+
+ // EnableEndpointDiscovery can be enabled in the shared config by setting
+ // endpoint_discovery_enabled to true
+ //
+ // endpoint_discovery_enabled = true
+ EnableEndpointDiscovery *bool
+}
+
+type sharedConfigFile struct {
+ Filename string
+ IniData ini.Sections
+}
+
+// loadSharedConfig retrieves the configuration from the list of files
+// using the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of A's.
+//
+// See sharedConfig.setFromFile for information how the config files
+// will be loaded.
+func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
+ if len(profile) == 0 {
+ profile = DefaultSharedConfigProfile
+ }
+
+ files, err := loadSharedConfigIniFiles(filenames)
+ if err != nil {
+ return sharedConfig{}, err
+ }
+
+ cfg := sharedConfig{}
+ if err = cfg.setFromIniFiles(profile, files); err != nil {
+ return sharedConfig{}, err
+ }
+
+ if len(cfg.AssumeRole.SourceProfile) > 0 {
+ if err := cfg.setAssumeRoleSource(profile, files); err != nil {
+ return sharedConfig{}, err
+ }
+ }
+
+ return cfg, nil
+}
+
+func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
+ files := make([]sharedConfigFile, 0, len(filenames))
+
+ for _, filename := range filenames {
+ sections, err := ini.OpenFile(filename)
+ if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile {
+ // Skip files which can't be opened and read for whatever reason
+ continue
+ } else if err != nil {
+ return nil, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+
+ files = append(files, sharedConfigFile{
+ Filename: filename, IniData: sections,
+ })
+ }
+
+ return files, nil
+}
+
+func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
+ var assumeRoleSrc sharedConfig
+
+ if len(cfg.AssumeRole.CredentialSource) > 0 {
+ // setAssumeRoleSource is only called when source_profile is found.
+ // If both source_profile and credential_source are set, then
+ // ErrSharedConfigSourceCollision will be returned
+ return ErrSharedConfigSourceCollision
+ }
+
+ // Multiple level assume role chains are not support
+ if cfg.AssumeRole.SourceProfile == origProfile {
+ assumeRoleSrc = *cfg
+ assumeRoleSrc.AssumeRole = assumeRoleConfig{}
+ } else {
+ err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
+ if err != nil {
+ return err
+ }
+ }
+
+ if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
+ return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
+ }
+
+ cfg.AssumeRoleSource = &assumeRoleSrc
+
+ return nil
+}
+
+func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
+ // Trim files from the list that don't exist.
+ for _, f := range files {
+ if err := cfg.setFromIniFile(profile, f); err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistsError); ok {
+ // Ignore proviles missings
+ continue
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// setFromFile loads the configuration from the file using
+// the profile provided. A sharedConfig pointer type value is used so that
+// multiple config file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For example
+// if a config file only includes aws_access_key_id but no aws_secret_access_key
+// the aws_access_key_id will be ignored.
+func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
+ section, ok := file.IniData.GetSection(profile)
+ if !ok {
+ // Fallback to to alternate profile name: profile
+ section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
+ if !ok {
+ return SharedConfigProfileNotExistsError{Profile: profile, Err: nil}
+ }
+ }
+
+ // Shared Credentials
+ akid := section.String(accessKeyIDKey)
+ secret := section.String(secretAccessKey)
+ if len(akid) > 0 && len(secret) > 0 {
+ cfg.Creds = credentials.Value{
+ AccessKeyID: akid,
+ SecretAccessKey: secret,
+ SessionToken: section.String(sessionTokenKey),
+ ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
+ }
+ }
+
+ // Assume Role
+ roleArn := section.String(roleArnKey)
+ srcProfile := section.String(sourceProfileKey)
+ credentialSource := section.String(credentialSourceKey)
+ hasSource := len(srcProfile) > 0 || len(credentialSource) > 0
+ if len(roleArn) > 0 && hasSource {
+ cfg.AssumeRole = assumeRoleConfig{
+ RoleARN: roleArn,
+ SourceProfile: srcProfile,
+ CredentialSource: credentialSource,
+ ExternalID: section.String(externalIDKey),
+ MFASerial: section.String(mfaSerialKey),
+ RoleSessionName: section.String(roleSessionNameKey),
+ }
+ }
+
+ // `credential_process`
+ if credProc := section.String(credentialProcessKey); len(credProc) > 0 {
+ cfg.CredentialProcess = credProc
+ }
+
+ // Region
+ if v := section.String(regionKey); len(v) > 0 {
+ cfg.Region = v
+ }
+
+ // Endpoint discovery
+ if section.Has(enableEndpointDiscoveryKey) {
+ v := section.Bool(enableEndpointDiscoveryKey)
+ cfg.EnableEndpointDiscovery = &v
+ }
+
+ return nil
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+ Filename string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigLoadError) Code() string {
+ return "SharedConfigLoadError"
+}
+
+// Message is the description of the error
+func (e SharedConfigLoadError) Message() string {
+ return fmt.Sprintf("failed to load config file, %s", e.Filename)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigLoadError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigLoadError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigProfileNotExistsError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistsError struct {
+ Profile string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigProfileNotExistsError) Code() string {
+ return "SharedConfigProfileNotExistsError"
+}
+
+// Message is the description of the error
+func (e SharedConfigProfileNotExistsError) Message() string {
+ return fmt.Sprintf("failed to get profile, %s", e.Profile)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistsError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigProfileNotExistsError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+ RoleARN string
+}
+
+// Code is the short id of the error.
+func (e SharedConfigAssumeRoleError) Code() string {
+ return "SharedConfigAssumeRoleError"
+}
+
+// Message is the description of the error
+func (e SharedConfigAssumeRoleError) Message() string {
+ return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
+ e.RoleARN)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigAssumeRoleError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
new file mode 100644
index 00000000000..244c86da054
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// whitelist is a generic rule for whitelisting
+type whitelist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (w whitelist) IsValid(value string) bool {
+ return w.rule.IsValid(value)
+}
+
+// blacklist is a generic rule for blacklisting
+type blacklist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (b blacklist) IsValid(value string) bool {
+ return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
new file mode 100644
index 00000000000..6aa2ed241bb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
@@ -0,0 +1,7 @@
+package v4
+
+// WithUnsignedPayload will enable and set the UnsignedPayload field to
+// true of the signer.
+func WithUnsignedPayload(v4 *Signer) {
+ v4.UnsignedPayload = true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
new file mode 100644
index 00000000000..bd082e9d1f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
@@ -0,0 +1,24 @@
+// +build go1.5
+
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+func getURIPath(u *url.URL) string {
+ var uri string
+
+ if len(u.Opaque) > 0 {
+ uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+ } else {
+ uri = u.EscapedPath()
+ }
+
+ if len(uri) == 0 {
+ uri = "/"
+ }
+
+ return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
new file mode 100644
index 00000000000..523db79f8d2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -0,0 +1,796 @@
+// Package v4 implements signing for AWS V4 signer
+//
+// Provides request signing for request that need to be signed with
+// AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// logic when using Go v1.5 or higher. The signer does this by taking advantage
+// of the URL.EscapedPath method. If your request URI requires additional escaping
+// you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+// "///"
+//
+// // e.g.
+// "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value. If you're using Go v1.4 you must set
+// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
+// Go v1.5 the signer will fallback to URL.Path.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
+// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
+// request URL. https://github.com/golang/go/issues/16847 points to a bug in
+// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
+// message. URL.Opaque generally will force Go to make requests with absolute URL.
+// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
+// or url.EscapedPath will ignore the RawPath escaping.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+const (
+ authHeaderPrefix = "AWS4-HMAC-SHA256"
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+
+ // emptyStringSHA256 is a SHA256 of an empty string
+ emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+)
+
+var ignoredHeaders = rules{
+ blacklist{
+ mapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ },
+ },
+}
+
+// requiredSignedHeaders is a whitelist for build canonical headers.
+var requiredSignedHeaders = rules{
+ whitelist{
+ mapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Tagging": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ "X-Amz-Content-Sha256": struct{}{},
+ },
+ },
+ patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a whitelist for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+ blacklist{requiredSignedHeaders},
+ patterns{"X-Amz-"},
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+ // The authentication credentials the request will be signed against.
+ // This value must be set to sign requests.
+ Credentials *credentials.Credentials
+
+ // Sets the log level the signer should use when reporting information to
+ // the logger. If the logger is nil nothing will be logged. See
+ // aws.LogLevelType for more information on available logging levels
+ //
+ // By default nothing will be logged.
+ Debug aws.LogLevelType
+
+ // The logger loging information will be written to. If there the logger
+ // is nil, nothing will be logged.
+ Logger aws.Logger
+
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+
+ // Disables the automatical setting of the HTTP request's Body field with the
+ // io.ReadSeeker passed in to the signer. This is useful if you're using a
+ // custom wrapper around the body for the io.ReadSeeker and want to preserve
+ // the Body value on the Request.Body.
+ //
+ // This does run the risk of signing a request with a body that will not be
+ // sent in the request. Need to ensure that the underlying data of the Body
+ // values are the same.
+ DisableRequestBodyOverwrite bool
+
+ // currentTimeFn returns the time value which represents the current time.
+ // This value should only be used for testing. If it is nil the default
+ // time.Now will be used.
+ currentTimeFn func() time.Time
+
+ // UnsignedPayload will prevent signing of the payload. This will only
+ // work for services that have support for this.
+ UnsignedPayload bool
+}
+
+// NewSigner returns a Signer pointer configured with the credentials and optional
+// option values provided. If not options are provided the Signer will use its
+// default configuration.
+func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
+ v4 := &Signer{
+ Credentials: credentials,
+ }
+
+ for _, option := range options {
+ option(v4)
+ }
+
+ return v4
+}
+
+type signingCtx struct {
+ ServiceName string
+ Region string
+ Request *http.Request
+ Body io.ReadSeeker
+ Query url.Values
+ Time time.Time
+ ExpireTime time.Duration
+ SignedHeaderVals http.Header
+
+ DisableURIPathEscaping bool
+
+ credValues credentials.Value
+ isPresign bool
+ formattedTime string
+ formattedShortTime string
+ unsignedPayload bool
+
+ bodyDigest string
+ signedHeaders string
+ canonicalHeaders string
+ canonicalString string
+ credentialString string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+// Sign signs AWS v4 requests with the provided body, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+//
+// Sign will set the request's Body to be the `body` parameter passed in. If
+// the body is not already an io.ReadCloser, it will be wrapped within one. If
+// a `nil` body parameter passed to Sign, the request's Body field will be
+// also set to nil. Its important to note that this functionality will not
+// change the request's ContentLength of the request.
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, 0, false, signTime)
+}
+
+// Presign signs AWS v4 requests with the provided body, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. For presigned requests these headers
+// and their values must be included on the HTTP request when it is made. This
+// is helpful to know what header values need to be shared with the party the
+// presigned request will be distributed to.
+//
+// Presign differs from Sign in that it will sign the request using query string
+// instead of header values. This allows you to share the Presigned Request's
+// URL with third parties, or distribute it throughout your system with minimal
+// dependencies.
+//
+// Presign also takes an exp value which is the duration the
+// signed request will be valid after the signing time. This is allows you to
+// set when the request will expire.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+//
+// Presigning a S3 request will not compute the body's SHA256 hash by default.
+// This is done due to the general use case for S3 presigned URLs is to share
+// PUT/GET capabilities. If you would like to include the body's SHA256 in the
+// presigned request's signature you can set the "X-Amz-Content-Sha256"
+// HTTP header and that will be included in the request's signature.
+func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, exp, true, signTime)
+}
+
+func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
+ currentTimeFn := v4.currentTimeFn
+ if currentTimeFn == nil {
+ currentTimeFn = time.Now
+ }
+
+ ctx := &signingCtx{
+ Request: r,
+ Body: body,
+ Query: r.URL.Query(),
+ Time: signTime,
+ ExpireTime: exp,
+ isPresign: isPresign,
+ ServiceName: service,
+ Region: region,
+ DisableURIPathEscaping: v4.DisableURIPathEscaping,
+ unsignedPayload: v4.UnsignedPayload,
+ }
+
+ for key := range ctx.Query {
+ sort.Strings(ctx.Query[key])
+ }
+
+ if ctx.isRequestSigned() {
+ ctx.Time = currentTimeFn()
+ ctx.handlePresignRemoval()
+ }
+
+ var err error
+ ctx.credValues, err = v4.Credentials.Get()
+ if err != nil {
+ return http.Header{}, err
+ }
+
+ ctx.sanitizeHostForHeader()
+ ctx.assignAmzQueryValues()
+ if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
+ return nil, err
+ }
+
+ // If the request is not presigned the body should be attached to it. This
+ // prevents the confusion of wanting to send a signed request without
+ // the body the request was signed for attached.
+ if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
+ var reader io.ReadCloser
+ if body != nil {
+ var ok bool
+ if reader, ok = body.(io.ReadCloser); !ok {
+ reader = ioutil.NopCloser(body)
+ }
+ }
+ r.Body = reader
+ }
+
+ if v4.Debug.Matches(aws.LogDebugWithSigning) {
+ v4.logSigningInfo(ctx)
+ }
+
+ return ctx.SignedHeaderVals, nil
+}
+
+func (ctx *signingCtx) sanitizeHostForHeader() {
+ request.SanitizeHostForHeader(ctx.Request)
+}
+
+func (ctx *signingCtx) handlePresignRemoval() {
+ if !ctx.isPresign {
+ return
+ }
+
+ // The credentials have expired for this request. The current signing
+ // is invalid, and needs to be request because the request will fail.
+ ctx.removePresign()
+
+ // Update the request's query string to ensure the values stays in
+ // sync in the case retrieving the new credentials fails.
+ ctx.Request.URL.RawQuery = ctx.Query.Encode()
+}
+
+func (ctx *signingCtx) assignAmzQueryValues() {
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+ if ctx.credValues.SessionToken != "" {
+ ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ } else {
+ ctx.Query.Del("X-Amz-Security-Token")
+ }
+
+ return
+ }
+
+ if ctx.credValues.SessionToken != "" {
+ ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ }
+}
+
+// SignRequestHandler is a named request handler the SDK will use to sign
+// service client request with using the V4 signature.
+var SignRequestHandler = request.NamedHandler{
+ Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
+}
+
+// SignSDKRequest signs an AWS request with the V4 signature. This
+// request handler should only be used with the SDK's built in service client's
+// API operation requests.
+//
+// This function should not be used on its on its own, but in conjunction with
+// an AWS service client's API operation call. To sign a standalone request
+// not created by a service client's API operation method use the "Sign" or
+// "Presign" functions of the "Signer" type.
+//
+// If the credentials of the request's config are set to
+// credentials.AnonymousCredentials the request will not be signed.
+func SignSDKRequest(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now)
+}
+
+// BuildNamedHandler will build a generic handler for signing.
+func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
+ return request.NamedHandler{
+ Name: name,
+ Fn: func(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now, opts...)
+ },
+ }
+}
+
+// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
+// function passed in. Behaves the same as SignSDKRequest with the exception
+// the request is signed with the value returned by the current time function.
+func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
+ // If the request does not need to be signed ignore the signing of the
+ // request if the AnonymousCredentials object is used.
+ if req.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ region := req.ClientInfo.SigningRegion
+ if region == "" {
+ region = aws.StringValue(req.Config.Region)
+ }
+
+ name := req.ClientInfo.SigningName
+ if name == "" {
+ name = req.ClientInfo.ServiceName
+ }
+
+ v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
+ v4.Debug = req.Config.LogLevel.Value()
+ v4.Logger = req.Config.Logger
+ v4.DisableHeaderHoisting = req.NotHoist
+ v4.currentTimeFn = curTimeFn
+ if name == "s3" {
+ // S3 service should not have any escaping applied
+ v4.DisableURIPathEscaping = true
+ }
+ // Prevents setting the HTTPRequest's Body. Since the Body could be
+ // wrapped in a custom io.Closer that we do not want to be stompped
+ // on top of by the signer.
+ v4.DisableRequestBodyOverwrite = true
+ })
+
+ for _, opt := range opts {
+ opt(v4)
+ }
+
+ curTime := curTimeFn()
+ signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
+ name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
+ )
+ if err != nil {
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ req.SignedHeaderVals = signedHeaders
+ req.LastSignedAt = curTime
+}
+
+const logSignInfoMsg = `DEBUG: Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
+ signedURLMsg := ""
+ if ctx.isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
+ }
+ msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
+ v4.Logger.Log(msg)
+}
+
+func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
+ ctx.buildTime() // no depends
+ ctx.buildCredentialString() // no depends
+
+ if err := ctx.buildBodyDigest(); err != nil {
+ return err
+ }
+
+ unsignedHeaders := ctx.Request.Header
+ if ctx.isPresign {
+ if !disableHeaderHoisting {
+ urlValues := url.Values{}
+ urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+ for k := range urlValues {
+ ctx.Query[k] = urlValues[k]
+ }
+ }
+ }
+
+ ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+ ctx.buildCanonicalString() // depends on canon headers / signed headers
+ ctx.buildStringToSign() // depends on canon string
+ ctx.buildSignature() // depends on string to sign
+
+ if ctx.isPresign {
+ ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
+ } else {
+ parts := []string{
+ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
+ "SignedHeaders=" + ctx.signedHeaders,
+ "Signature=" + ctx.signature,
+ }
+ ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+ }
+
+ return nil
+}
+
+func (ctx *signingCtx) buildTime() {
+ ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
+ ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
+
+ if ctx.isPresign {
+ duration := int64(ctx.ExpireTime / time.Second)
+ ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
+ ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+ } else {
+ ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
+ }
+}
+
+func (ctx *signingCtx) buildCredentialString() {
+ ctx.credentialString = strings.Join([]string{
+ ctx.formattedShortTime,
+ ctx.Region,
+ ctx.ServiceName,
+ "aws4_request",
+ }, "/")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
+ }
+}
+
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
+ var headers []string
+ headers = append(headers, "host")
+ for k, v := range header {
+ canonicalKey := http.CanonicalHeaderKey(k)
+ if !r.IsValid(canonicalKey) {
+ continue // ignored header
+ }
+ if ctx.SignedHeaderVals == nil {
+ ctx.SignedHeaderVals = make(http.Header)
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
+ // include additional values
+ ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ ctx.SignedHeaderVals[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ ctx.signedHeaders = strings.Join(headers, ";")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
+ }
+
+ headerValues := make([]string, len(headers))
+ for i, k := range headers {
+ if k == "host" {
+ if ctx.Request.Host != "" {
+ headerValues[i] = "host:" + ctx.Request.Host
+ } else {
+ headerValues[i] = "host:" + ctx.Request.URL.Host
+ }
+ } else {
+ headerValues[i] = k + ":" +
+ strings.Join(ctx.SignedHeaderVals[k], ",")
+ }
+ }
+ stripExcessSpaces(headerValues)
+ ctx.canonicalHeaders = strings.Join(headerValues, "\n")
+}
+
+func (ctx *signingCtx) buildCanonicalString() {
+ ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
+
+ uri := getURIPath(ctx.Request.URL)
+
+ if !ctx.DisableURIPathEscaping {
+ uri = rest.EscapePath(uri, false)
+ }
+
+ ctx.canonicalString = strings.Join([]string{
+ ctx.Request.Method,
+ uri,
+ ctx.Request.URL.RawQuery,
+ ctx.canonicalHeaders + "\n",
+ ctx.signedHeaders,
+ ctx.bodyDigest,
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildStringToSign() {
+ ctx.stringToSign = strings.Join([]string{
+ authHeaderPrefix,
+ ctx.formattedTime,
+ ctx.credentialString,
+ hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildSignature() {
+ secret := ctx.credValues.SecretAccessKey
+ date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
+ region := makeHmac(date, []byte(ctx.Region))
+ service := makeHmac(region, []byte(ctx.ServiceName))
+ credentials := makeHmac(service, []byte("aws4_request"))
+ signature := makeHmac(credentials, []byte(ctx.stringToSign))
+ ctx.signature = hex.EncodeToString(signature)
+}
+
+func (ctx *signingCtx) buildBodyDigest() error {
+ hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
+ if hash == "" {
+ includeSHA256Header := ctx.unsignedPayload ||
+ ctx.ServiceName == "s3" ||
+ ctx.ServiceName == "glacier"
+
+ s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
+
+ if ctx.unsignedPayload || s3Presign {
+ hash = "UNSIGNED-PAYLOAD"
+ includeSHA256Header = !s3Presign
+ } else if ctx.Body == nil {
+ hash = emptyStringSHA256
+ } else {
+ if !aws.IsReaderSeekable(ctx.Body) {
+ return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
+ }
+ hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
+ }
+
+ if includeSHA256Header {
+ ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
+ }
+ }
+ ctx.bodyDigest = hash
+
+ return nil
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (ctx *signingCtx) isRequestSigned() bool {
+ if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
+ return true
+ }
+ if ctx.Request.Header.Get("Authorization") != "" {
+ return true
+ }
+
+ return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (ctx *signingCtx) removePresign() {
+ ctx.Query.Del("X-Amz-Algorithm")
+ ctx.Query.Del("X-Amz-Signature")
+ ctx.Query.Del("X-Amz-Security-Token")
+ ctx.Query.Del("X-Amz-Date")
+ ctx.Query.Del("X-Amz-Expires")
+ ctx.Query.Del("X-Amz-Credential")
+ ctx.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) []byte {
+ hash := sha256.New()
+ start, _ := reader.Seek(0, sdkio.SeekCurrent)
+ defer reader.Seek(start, sdkio.SeekStart)
+
+ // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
+ // smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
+ size, err := aws.SeekerLen(reader)
+ if err != nil {
+ io.Copy(hash, reader)
+ } else {
+ io.CopyN(hash, reader, size)
+ }
+
+ return hash.Sum(nil)
+}
+
+const doubleSpace = " "
+
+// stripExcessSpaces will rewrite the passed in slice's string values to not
+// contain multiple side-by-side spaces.
+func stripExcessSpaces(vals []string) {
+ var j, k, l, m, spaces int
+ for i, str := range vals {
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
+
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
+
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ vals[i] = str
+ continue
+ }
+
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
+ }
+ spaces++
+ } else {
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
+ }
+ }
+
+ vals[i] = string(buf[:m])
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100644
index 00000000000..8b6f23425a6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -0,0 +1,201 @@
+package aws
+
+import (
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
+// only be used with an io.Reader that is also an io.Seeker. Doing so may
+// cause request signature errors, or request body's not sent for GET, HEAD
+// and DELETE HTTP methods.
+//
+// Deprecated: Should only be used with io.ReadSeeker. If using for
+// S3 PutObject to stream content use s3manager.Uploader instead.
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// IsReaderSeekable returns if the underlying reader type can be seeked. A
+// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
+// type.
+func IsReaderSeekable(r io.Reader) bool {
+ switch v := r.(type) {
+ case ReaderSeekerCloser:
+ return v.IsSeeker()
+ case *ReaderSeekerCloser:
+ return v.IsSeeker()
+ case io.ReadSeeker:
+ return true
+ default:
+ return false
+ }
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
+// HasLen returns the length of the underlying reader if the value implements
+// the Len() int method.
+func (r ReaderSeekerCloser) HasLen() (int, bool) {
+ type lenner interface {
+ Len() int
+ }
+
+ if lr, ok := r.r.(lenner); ok {
+ return lr.Len(), true
+ }
+
+ return 0, false
+}
+
+// GetLen returns the length of the bytes remaining in the underlying reader.
+// Checks first for Len(), then io.Seeker to determine the size of the
+// underlying reader.
+//
+// Will return -1 if the length cannot be determined.
+func (r ReaderSeekerCloser) GetLen() (int64, error) {
+ if l, ok := r.HasLen(); ok {
+ return int64(l), nil
+ }
+
+ if s, ok := r.r.(io.Seeker); ok {
+ return seekerLen(s)
+ }
+
+ return -1, nil
+}
+
+// SeekerLen attempts to get the number of bytes remaining at the seeker's
+// current position. Returns the number of bytes remaining or error.
+func SeekerLen(s io.Seeker) (int64, error) {
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := s.(type) {
+ case ReaderSeekerCloser:
+ return v.GetLen()
+ case *ReaderSeekerCloser:
+ return v.GetLen()
+ }
+
+ return seekerLen(s)
+}
+
+func seekerLen(s io.Seeker) (int64, error) {
+ curOffset, err := s.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := s.Seek(0, sdkio.SeekEnd)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = s.Seek(curOffset, sdkio.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go
new file mode 100644
index 00000000000..6192b2455b6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go
@@ -0,0 +1,12 @@
+// +build go1.8
+
+package aws
+
+import "net/url"
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
+func URLHostname(url *url.URL) string {
+ return url.Hostname()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
new file mode 100644
index 00000000000..0210d2720e7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
@@ -0,0 +1,29 @@
+// +build !go1.8
+
+package aws
+
+import (
+ "net/url"
+ "strings"
+)
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Copy of Go 1.8's net/url#URL.Hostname functionality.
+func URLHostname(url *url.URL) string {
+ return stripPort(url.Host)
+
+}
+
+// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
+// https://golang.org/src/net/url/url.go
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100644
index 00000000000..1d96f9c0b37
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.19.11"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
new file mode 100644
index 00000000000..e83a99886bc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
@@ -0,0 +1,120 @@
+package ini
+
+// ASTKind represents different states in the parse table
+// and the type of AST that is being constructed
+type ASTKind int
+
+// ASTKind* is used in the parse table to transition between
+// the different states
+const (
+ ASTKindNone = ASTKind(iota)
+ ASTKindStart
+ ASTKindExpr
+ ASTKindEqualExpr
+ ASTKindStatement
+ ASTKindSkipStatement
+ ASTKindExprStatement
+ ASTKindSectionStatement
+ ASTKindNestedSectionStatement
+ ASTKindCompletedNestedSectionStatement
+ ASTKindCommentStatement
+ ASTKindCompletedSectionStatement
+)
+
+func (k ASTKind) String() string {
+ switch k {
+ case ASTKindNone:
+ return "none"
+ case ASTKindStart:
+ return "start"
+ case ASTKindExpr:
+ return "expr"
+ case ASTKindStatement:
+ return "stmt"
+ case ASTKindSectionStatement:
+ return "section_stmt"
+ case ASTKindExprStatement:
+ return "expr_stmt"
+ case ASTKindCommentStatement:
+ return "comment"
+ case ASTKindNestedSectionStatement:
+ return "nested_section_stmt"
+ case ASTKindCompletedSectionStatement:
+ return "completed_stmt"
+ case ASTKindSkipStatement:
+ return "skip"
+ default:
+ return ""
+ }
+}
+
+// AST interface allows us to determine what kind of node we
+// are on and casting may not need to be necessary.
+//
+// The root is always the first node in Children
+type AST struct {
+ Kind ASTKind
+ Root Token
+ RootToken bool
+ Children []AST
+}
+
+func newAST(kind ASTKind, root AST, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Children: append([]AST{root}, children...),
+ }
+}
+
+func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Root: root,
+ RootToken: true,
+ Children: children,
+ }
+}
+
+// AppendChild will append to the list of children an AST has.
+func (a *AST) AppendChild(child AST) {
+ a.Children = append(a.Children, child)
+}
+
+// GetRoot will return the root AST which can be the first entry
+// in the children list or a token.
+func (a *AST) GetRoot() AST {
+ if a.RootToken {
+ return *a
+ }
+
+ if len(a.Children) == 0 {
+ return AST{}
+ }
+
+ return a.Children[0]
+}
+
+// GetChildren will return the current AST's list of children
+func (a *AST) GetChildren() []AST {
+ if len(a.Children) == 0 {
+ return []AST{}
+ }
+
+ if a.RootToken {
+ return a.Children
+ }
+
+ return a.Children[1:]
+}
+
+// SetChildren will set and override all children of the AST.
+func (a *AST) SetChildren(children []AST) {
+ if a.RootToken {
+ a.Children = children
+ } else {
+ a.Children = append(a.Children[:1], children...)
+ }
+}
+
+// Start is used to indicate the starting state of the parse table.
+var Start = newAST(ASTKindStart, AST{})
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
new file mode 100644
index 00000000000..0895d53cbe6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
@@ -0,0 +1,11 @@
+package ini
+
+var commaRunes = []rune(",")
+
+func isComma(b rune) bool {
+ return b == ','
+}
+
+func newCommaToken() Token {
+ return newToken(TokenComma, commaRunes, NoneType)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
new file mode 100644
index 00000000000..0b76999ba1f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
@@ -0,0 +1,35 @@
+package ini
+
+// isComment will return whether or not the next byte(s) is a
+// comment.
+func isComment(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case ';':
+ return true
+ case '#':
+ return true
+ }
+
+ return false
+}
+
+// newCommentToken will create a comment token and
+// return how many bytes were read.
+func newCommentToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '\n' {
+ break
+ }
+
+ if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
+ break
+ }
+ }
+
+ return newToken(TokenComment, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
new file mode 100644
index 00000000000..25ce0fe134d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
@@ -0,0 +1,29 @@
+// Package ini is an LL(1) parser for configuration files.
+//
+// Example:
+// sections, err := ini.OpenFile("/path/to/file")
+// if err != nil {
+// panic(err)
+// }
+//
+// profile := "foo"
+// section, ok := sections.GetSection(profile)
+// if !ok {
+// fmt.Printf("section %q could not be found", profile)
+// }
+//
+// Below is the BNF that describes this parser
+// Grammar:
+// stmt -> value stmt'
+// stmt' -> epsilon | op stmt
+// value -> number | string | boolean | quoted_string
+//
+// section -> [ section'
+// section' -> value section_close
+// section_close -> ]
+//
+// SkipState will skip (NL WS)+
+//
+// comment -> # comment' | ; comment'
+// comment' -> epsilon | value
+package ini
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
new file mode 100644
index 00000000000..04345a54c20
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
@@ -0,0 +1,4 @@
+package ini
+
+// emptyToken is used to satisfy the Token interface
+var emptyToken = newToken(TokenNone, []rune{}, NoneType)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
new file mode 100644
index 00000000000..91ba2a59dd5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
@@ -0,0 +1,24 @@
+package ini
+
+// newExpression will return an expression AST.
+// Expr represents an expression
+//
+// grammar:
+// expr -> string | number
+func newExpression(tok Token) AST {
+ return newASTWithRootToken(ASTKindExpr, tok)
+}
+
+func newEqualExpr(left AST, tok Token) AST {
+ return newASTWithRootToken(ASTKindEqualExpr, tok, left)
+}
+
+// EqualExprKey will return a LHS value in the equal expr
+func EqualExprKey(ast AST) string {
+ children := ast.GetChildren()
+ if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
+ return ""
+ }
+
+ return string(children[0].Root.Raw())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
new file mode 100644
index 00000000000..8d462f77e24
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
@@ -0,0 +1,17 @@
+// +build gofuzz
+
+package ini
+
+import (
+ "bytes"
+)
+
+func Fuzz(data []byte) int {
+ b := bytes.NewReader(data)
+
+ if _, err := Parse(b); err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
new file mode 100644
index 00000000000..3b0ca7afe3b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
@@ -0,0 +1,51 @@
+package ini
+
+import (
+ "io"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// OpenFile takes a path to a given file, and will open and parse
+// that file.
+func OpenFile(path string) (Sections, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
+ }
+ defer f.Close()
+
+ return Parse(f)
+}
+
+// Parse will parse the given file using the shared config
+// visitor.
+func Parse(f io.Reader) (Sections, error) {
+ tree, err := ParseAST(f)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
+
+// ParseBytes will parse the given bytes and return the parsed sections.
+func ParseBytes(b []byte) (Sections, error) {
+ tree, err := ParseASTBytes(b)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
new file mode 100644
index 00000000000..582c024ad15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
@@ -0,0 +1,165 @@
+package ini
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // ErrCodeUnableToReadFile is used when a file is failed to be
+ // opened or read from.
+ ErrCodeUnableToReadFile = "FailedRead"
+)
+
+// TokenType represents the various different tokens types
+type TokenType int
+
+func (t TokenType) String() string {
+ switch t {
+ case TokenNone:
+ return "none"
+ case TokenLit:
+ return "literal"
+ case TokenSep:
+ return "sep"
+ case TokenOp:
+ return "op"
+ case TokenWS:
+ return "ws"
+ case TokenNL:
+ return "newline"
+ case TokenComment:
+ return "comment"
+ case TokenComma:
+ return "comma"
+ default:
+ return ""
+ }
+}
+
+// TokenType enums
+const (
+ TokenNone = TokenType(iota)
+ TokenLit
+ TokenSep
+ TokenComma
+ TokenOp
+ TokenWS
+ TokenNL
+ TokenComment
+)
+
+type iniLexer struct{}
+
+// Tokenize will return a list of tokens during lexical analysis of the
+// io.Reader.
+func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err)
+ }
+
+ return l.tokenize(b)
+}
+
+func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
+ runes := bytes.Runes(b)
+ var err error
+ n := 0
+ tokenAmount := countTokens(runes)
+ tokens := make([]Token, tokenAmount)
+ count := 0
+
+ for len(runes) > 0 && count < tokenAmount {
+ switch {
+ case isWhitespace(runes[0]):
+ tokens[count], n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ tokens[count], n = newCommaToken(), 1
+ case isComment(runes):
+ tokens[count], n, err = newCommentToken(runes)
+ case isNewline(runes):
+ tokens[count], n, err = newNewlineToken(runes)
+ case isSep(runes):
+ tokens[count], n, err = newSepToken(runes)
+ case isOp(runes):
+ tokens[count], n, err = newOpToken(runes)
+ default:
+ tokens[count], n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ count++
+
+ runes = runes[n:]
+ }
+
+ return tokens[:count], nil
+}
+
+func countTokens(runes []rune) int {
+ count, n := 0, 0
+ var err error
+
+ for len(runes) > 0 {
+ switch {
+ case isWhitespace(runes[0]):
+ _, n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ _, n = newCommaToken(), 1
+ case isComment(runes):
+ _, n, err = newCommentToken(runes)
+ case isNewline(runes):
+ _, n, err = newNewlineToken(runes)
+ case isSep(runes):
+ _, n, err = newSepToken(runes)
+ case isOp(runes):
+ _, n, err = newOpToken(runes)
+ default:
+ _, n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return 0
+ }
+
+ count++
+ runes = runes[n:]
+ }
+
+ return count + 1
+}
+
+// Token indicates a metadata about a given value.
+type Token struct {
+ t TokenType
+ ValueType ValueType
+ base int
+ raw []rune
+}
+
+var emptyValue = Value{}
+
+func newToken(t TokenType, raw []rune, v ValueType) Token {
+ return Token{
+ t: t,
+ raw: raw,
+ ValueType: v,
+ }
+}
+
+// Raw return the raw runes that were consumed
+func (tok Token) Raw() []rune {
+ return tok.raw
+}
+
+// Type returns the token type
+func (tok Token) Type() TokenType {
+ return tok.t
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
new file mode 100644
index 00000000000..f99703372c4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
@@ -0,0 +1,347 @@
+package ini
+
+import (
+ "fmt"
+ "io"
+)
+
+// State enums for the parse table
+const (
+ InvalidState = iota
+ // stmt -> value stmt'
+ StatementState
+ // stmt' -> MarkComplete | op stmt
+ StatementPrimeState
+ // value -> number | string | boolean | quoted_string
+ ValueState
+ // section -> [ section'
+ OpenScopeState
+ // section' -> value section_close
+ SectionState
+ // section_close -> ]
+ CloseScopeState
+ // SkipState will skip (NL WS)+
+ SkipState
+ // SkipTokenState will skip any token and push the previous
+ // state onto the stack.
+ SkipTokenState
+ // comment -> # comment' | ; comment'
+ // comment' -> MarkComplete | value
+ CommentState
+ // MarkComplete state will complete statements and move that
+ // to the completed AST list
+ MarkCompleteState
+ // TerminalState signifies that the tokens have been fully parsed
+ TerminalState
+)
+
+// parseTable is a state machine to dictate the grammar above.
+var parseTable = map[ASTKind]map[TokenType]int{
+ ASTKindStart: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+ ASTKindCommentStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExpr: map[TokenType]int{
+ TokenOp: StatementPrimeState,
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenWS: ValueState,
+ TokenNL: SkipState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindEqualExpr: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ },
+ ASTKindStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExprStatement: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenOp: ValueState,
+ TokenWS: ValueState,
+ TokenNL: MarkCompleteState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ TokenComma: SkipState,
+ },
+ ASTKindSectionStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenOp: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SectionState,
+ TokenNL: SkipTokenState,
+ },
+ ASTKindCompletedSectionStatement: map[TokenType]int{
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindSkipStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+}
+
+// ParseAST will parse input from an io.Reader using
+// an LL(1) parser.
+func ParseAST(r io.Reader) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.Tokenize(r)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+// ParseASTBytes will parse input from a byte slice using
+// an LL(1) parser.
+func ParseASTBytes(b []byte) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.tokenize(b)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+func parse(tokens []Token) ([]AST, error) {
+ start := Start
+ stack := newParseStack(3, len(tokens))
+
+ stack.Push(start)
+ s := newSkipper()
+
+loop:
+ for stack.Len() > 0 {
+ k := stack.Pop()
+
+ var tok Token
+ if len(tokens) == 0 {
+ // this occurs when all the tokens have been processed
+ // but reduction of what's left on the stack needs to
+ // occur.
+ tok = emptyToken
+ } else {
+ tok = tokens[0]
+ }
+
+ step := parseTable[k.Kind][tok.Type()]
+ if s.ShouldSkip(tok) {
+ // being in a skip state with no tokens will break out of
+ // the parse loop since there is nothing left to process.
+ if len(tokens) == 0 {
+ break loop
+ }
+
+ step = SkipTokenState
+ }
+
+ switch step {
+ case TerminalState:
+ // Finished parsing. Push what should be the last
+ // statement to the stack. If there is anything left
+ // on the stack, an error in parsing has occurred.
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ break loop
+ case SkipTokenState:
+ // When skipping a token, the previous state was popped off the stack.
+ // To maintain the correct state, the previous state will be pushed
+ // onto the stack.
+ stack.Push(k)
+ case StatementState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ expr := newExpression(tok)
+ stack.Push(expr)
+ case StatementPrimeState:
+ if tok.Type() != TokenOp {
+ stack.MarkComplete(k)
+ continue
+ }
+
+ if k.Kind != ASTKindExpr {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
+ )
+ }
+
+ k = trimSpaces(k)
+ expr := newEqualExpr(k, tok)
+ stack.Push(expr)
+ case ValueState:
+ // ValueState requires the previous state to either be an equal expression
+ // or an expression statement.
+ //
+ // This grammar occurs when the RHS is a number, word, or quoted string.
+ // equal_expr -> lit op equal_expr'
+ // equal_expr' -> number | string | quoted_string
+ // quoted_string -> " quoted_string'
+ // quoted_string' -> string quoted_string_end
+ // quoted_string_end -> "
+ //
+ // otherwise
+ // expr_stmt -> equal_expr (expr_stmt')*
+ // expr_stmt' -> ws S | op S | MarkComplete
+ // S -> equal_expr' expr_stmt'
+ switch k.Kind {
+ case ASTKindEqualExpr:
+ // assiging a value to some key
+ k.AppendChild(newExpression(tok))
+ stack.Push(newExprStatement(k))
+ case ASTKindExpr:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stack.Push(k)
+ case ASTKindExprStatement:
+ root := k.GetRoot()
+ children := root.GetChildren()
+ if len(children) == 0 {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
+ )
+ }
+
+ rhs := children[len(children)-1]
+
+ if rhs.Root.ValueType != QuotedStringType {
+ rhs.Root.ValueType = StringType
+ rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
+
+ }
+
+ children[len(children)-1] = rhs
+ k.SetChildren(children)
+
+ stack.Push(k)
+ }
+ case OpenScopeState:
+ if !runeCompare(tok.Raw(), openBrace) {
+ return nil, NewParseError("expected '['")
+ }
+
+ stmt := newStatement()
+ stack.Push(stmt)
+ case CloseScopeState:
+ if !runeCompare(tok.Raw(), closeBrace) {
+ return nil, NewParseError("expected ']'")
+ }
+
+ k = trimSpaces(k)
+ stack.Push(newCompletedSectionStatement(k))
+ case SectionState:
+ var stmt AST
+
+ switch k.Kind {
+ case ASTKindStatement:
+ // If there are multiple literals inside of a scope declaration,
+ // then the current token's raw value will be appended to the Name.
+ //
+ // This handles cases like [ profile default ]
+ //
+ // k will represent a SectionStatement with the children representing
+ // the label of the section
+ stmt = newSectionStatement(tok)
+ case ASTKindSectionStatement:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stmt = k
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
+ )
+ }
+
+ stack.Push(stmt)
+ case MarkCompleteState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ if stack.Len() == 0 {
+ stack.Push(start)
+ }
+ case SkipState:
+ stack.Push(newSkipStatement(k))
+ s.Skip()
+ case CommentState:
+ if k.Kind == ASTKindStart {
+ stack.Push(k)
+ } else {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newCommentStatement(tok)
+ stack.Push(stmt)
+ default:
+ return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok))
+ }
+
+ if len(tokens) > 0 {
+ tokens = tokens[1:]
+ }
+ }
+
+ // this occurs when a statement has not been completed
+ if stack.top > 1 {
+ return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container))
+ }
+
+ // returns a sublist which excludes the start symbol
+ return stack.List(), nil
+}
+
+// trimSpaces will trim spaces on the left and right hand side of
+// the literal.
+func trimSpaces(k AST) AST {
+ // trim left hand side of spaces
+ for i := 0; i < len(k.Root.raw); i++ {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[1:]
+ i--
+ }
+
+ // trim right hand side of spaces
+ for i := len(k.Root.raw) - 1; i >= 0; i-- {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
+ }
+
+ return k
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
new file mode 100644
index 00000000000..24df543d38c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
@@ -0,0 +1,324 @@
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ runesTrue = []rune("true")
+ runesFalse = []rune("false")
+)
+
+var literalValues = [][]rune{
+ runesTrue,
+ runesFalse,
+}
+
+func isBoolValue(b []rune) bool {
+ for _, lv := range literalValues {
+ if isLitValue(lv, b) {
+ return true
+ }
+ }
+ return false
+}
+
+func isLitValue(want, have []rune) bool {
+ if len(have) < len(want) {
+ return false
+ }
+
+ for i := 0; i < len(want); i++ {
+ if want[i] != have[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isNumberValue will return whether not the leading characters in
+// a byte slice is a number. A number is delimited by whitespace or
+// the newline token.
+//
+// A number is defined to be in a binary, octal, decimal (int | float), hex format,
+// or in scientific notation.
+func isNumberValue(b []rune) bool {
+ negativeIndex := 0
+ helper := numberHelper{}
+ needDigit := false
+
+ for i := 0; i < len(b); i++ {
+ negativeIndex++
+
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return false
+ }
+ helper.Determine(b[i])
+ needDigit = true
+ continue
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ negativeIndex = 0
+ needDigit = true
+ continue
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ needDigit = true
+ if i == 0 {
+ return false
+ }
+
+ fallthrough
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ needDigit = true
+ continue
+ }
+
+ if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
+ return !needDigit
+ }
+
+ if !helper.CorrectByte(b[i]) {
+ return false
+ }
+ needDigit = false
+ }
+
+ return !needDigit
+}
+
+func isValid(b []rune) (bool, int, error) {
+ if len(b) == 0 {
+ // TODO: should probably return an error
+ return false, 0, nil
+ }
+
+ return isValidRune(b[0]), 1, nil
+}
+
+func isValidRune(r rune) bool {
+ return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
+}
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+ switch v {
+ case NoneType:
+ return "NONE"
+ case DecimalType:
+ return "FLOAT"
+ case IntegerType:
+ return "INT"
+ case StringType:
+ return "STRING"
+ case BoolType:
+ return "BOOL"
+ }
+
+ return ""
+}
+
+// ValueType enums
+const (
+ NoneType = ValueType(iota)
+ DecimalType
+ IntegerType
+ StringType
+ QuotedStringType
+ BoolType
+)
+
+// Value is a union container
+type Value struct {
+ Type ValueType
+ raw []rune
+
+ integer int64
+ decimal float64
+ boolean bool
+ str string
+}
+
+func newValue(t ValueType, base int, raw []rune) (Value, error) {
+ v := Value{
+ Type: t,
+ raw: raw,
+ }
+ var err error
+
+ switch t {
+ case DecimalType:
+ v.decimal, err = strconv.ParseFloat(string(raw), 64)
+ case IntegerType:
+ if base != 10 {
+ raw = raw[2:]
+ }
+
+ v.integer, err = strconv.ParseInt(string(raw), base, 64)
+ case StringType:
+ v.str = string(raw)
+ case QuotedStringType:
+ v.str = string(raw[1 : len(raw)-1])
+ case BoolType:
+ v.boolean = runeCompare(v.raw, runesTrue)
+ }
+
+ // issue 2253
+ //
+ // if the value trying to be parsed is too large, then we will use
+ // the 'StringType' and raw value instead.
+ if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
+ v.Type = StringType
+ v.str = string(raw)
+ err = nil
+ }
+
+ return v, err
+}
+
+// Append will append values and change the type to a string
+// type.
+func (v *Value) Append(tok Token) {
+ r := tok.Raw()
+ if v.Type != QuotedStringType {
+ v.Type = StringType
+ r = tok.raw[1 : len(tok.raw)-1]
+ }
+ if tok.Type() != TokenLit {
+ v.raw = append(v.raw, tok.Raw()...)
+ } else {
+ v.raw = append(v.raw, r...)
+ }
+}
+
+func (v Value) String() string {
+ switch v.Type {
+ case DecimalType:
+ return fmt.Sprintf("decimal: %f", v.decimal)
+ case IntegerType:
+ return fmt.Sprintf("integer: %d", v.integer)
+ case StringType:
+ return fmt.Sprintf("string: %s", string(v.raw))
+ case QuotedStringType:
+ return fmt.Sprintf("quoted string: %s", string(v.raw))
+ case BoolType:
+ return fmt.Sprintf("bool: %t", v.boolean)
+ default:
+ return "union not set"
+ }
+}
+
+func newLitToken(b []rune) (Token, int, error) {
+ n := 0
+ var err error
+
+ token := Token{}
+ if b[0] == '"' {
+ n, err = getStringValue(b)
+ if err != nil {
+ return token, n, err
+ }
+
+ token = newToken(TokenLit, b[:n], QuotedStringType)
+ } else if isNumberValue(b) {
+ var base int
+ base, n, err = getNumericalValue(b)
+ if err != nil {
+ return token, 0, err
+ }
+
+ value := b[:n]
+ vType := IntegerType
+ if contains(value, '.') || hasExponent(value) {
+ vType = DecimalType
+ }
+ token = newToken(TokenLit, value, vType)
+ token.base = base
+ } else if isBoolValue(b) {
+ n, err = getBoolValue(b)
+
+ token = newToken(TokenLit, b[:n], BoolType)
+ } else {
+ n, err = getValue(b)
+ token = newToken(TokenLit, b[:n], StringType)
+ }
+
+ return token, n, err
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() int64 {
+ return v.integer
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() float64 {
+ return v.decimal
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() bool {
+ return v.boolean
+}
+
+func isTrimmable(r rune) bool {
+ switch r {
+ case '\n', ' ':
+ return true
+ }
+ return false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+ switch v.Type {
+ case StringType:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ case QuotedStringType:
+ // preserve all characters in the quotes
+ return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
+ default:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ }
+}
+
+func contains(runes []rune, c rune) bool {
+ for i := 0; i < len(runes); i++ {
+ if runes[i] == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+func runeCompare(v1 []rune, v2 []rune) bool {
+ if len(v1) != len(v2) {
+ return false
+ }
+
+ for i := 0; i < len(v1); i++ {
+ if v1[i] != v2[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
new file mode 100644
index 00000000000..e52ac399f17
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
@@ -0,0 +1,30 @@
+package ini
+
+func isNewline(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ if b[0] == '\n' {
+ return true
+ }
+
+ if len(b) < 2 {
+ return false
+ }
+
+ return b[0] == '\r' && b[1] == '\n'
+}
+
+func newNewlineToken(b []rune) (Token, int, error) {
+ i := 1
+ if b[0] == '\r' && isNewline(b[1:]) {
+ i++
+ }
+
+ if !isNewline([]rune(b[:i])) {
+ return emptyToken, 0, NewParseError("invalid new line token")
+ }
+
+ return newToken(TokenNL, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
new file mode 100644
index 00000000000..a45c0bc5662
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
@@ -0,0 +1,152 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+const (
+ none = numberFormat(iota)
+ binary
+ octal
+ decimal
+ hex
+ exponent
+)
+
+type numberFormat int
+
+// numberHelper is used to dictate what format a number is in
+// and what to do for negative values. Since -1e-4 is a valid
+// number, we cannot just simply check for duplicate negatives.
+type numberHelper struct {
+ numberFormat numberFormat
+
+ negative bool
+ negativeExponent bool
+}
+
+func (b numberHelper) Exists() bool {
+ return b.numberFormat != none
+}
+
+func (b numberHelper) IsNegative() bool {
+ return b.negative || b.negativeExponent
+}
+
+func (b *numberHelper) Determine(c rune) error {
+ if b.Exists() {
+ return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
+ }
+
+ switch c {
+ case 'b':
+ b.numberFormat = binary
+ case 'o':
+ b.numberFormat = octal
+ case 'x':
+ b.numberFormat = hex
+ case 'e', 'E':
+ b.numberFormat = exponent
+ case '-':
+ if b.numberFormat != exponent {
+ b.negative = true
+ } else {
+ b.negativeExponent = true
+ }
+ case '.':
+ b.numberFormat = decimal
+ default:
+ return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
+ }
+
+ return nil
+}
+
+func (b numberHelper) CorrectByte(c rune) bool {
+ switch {
+ case b.numberFormat == binary:
+ if !isBinaryByte(c) {
+ return false
+ }
+ case b.numberFormat == octal:
+ if !isOctalByte(c) {
+ return false
+ }
+ case b.numberFormat == hex:
+ if !isHexByte(c) {
+ return false
+ }
+ case b.numberFormat == decimal:
+ if !isDigit(c) {
+ return false
+ }
+ case b.numberFormat == exponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negativeExponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negative:
+ if !isDigit(c) {
+ return false
+ }
+ default:
+ if !isDigit(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b numberHelper) Base() int {
+ switch b.numberFormat {
+ case binary:
+ return 2
+ case octal:
+ return 8
+ case hex:
+ return 16
+ default:
+ return 10
+ }
+}
+
+func (b numberHelper) String() string {
+ buf := bytes.Buffer{}
+ i := 0
+
+ switch b.numberFormat {
+ case binary:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": binary format\n")
+ case octal:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": octal format\n")
+ case hex:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": hex format\n")
+ case exponent:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
+ default:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": integer format\n")
+ }
+
+ if b.negative {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative format\n")
+ }
+
+ if b.negativeExponent {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
new file mode 100644
index 00000000000..8a84c7cbe08
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
@@ -0,0 +1,39 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ equalOp = []rune("=")
+ equalColonOp = []rune(":")
+)
+
+func isOp(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '=':
+ return true
+ case ':':
+ return true
+ default:
+ return false
+ }
+}
+
+func newOpToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '=':
+ tok = newToken(TokenOp, equalOp, NoneType)
+ case ':':
+ tok = newToken(TokenOp, equalColonOp, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
new file mode 100644
index 00000000000..45728701931
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
@@ -0,0 +1,43 @@
+package ini
+
+import "fmt"
+
+const (
+ // ErrCodeParseError is returned when a parsing error
+ // has occurred.
+ ErrCodeParseError = "INIParseError"
+)
+
+// ParseError is an error which is returned during any part of
+// the parsing process.
+type ParseError struct {
+ msg string
+}
+
+// NewParseError will return a new ParseError where message
+// is the description of the error.
+func NewParseError(message string) *ParseError {
+ return &ParseError{
+ msg: message,
+ }
+}
+
+// Code will return the ErrCodeParseError
+func (err *ParseError) Code() string {
+ return ErrCodeParseError
+}
+
+// Message returns the error's message
+func (err *ParseError) Message() string {
+ return err.msg
+}
+
+// OrigError return nothing since there will never be any
+// original error.
+func (err *ParseError) OrigError() error {
+ return nil
+}
+
+func (err *ParseError) Error() string {
+ return fmt.Sprintf("%s: %s", err.Code(), err.Message())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
new file mode 100644
index 00000000000..7f01cf7c703
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
@@ -0,0 +1,60 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// ParseStack is a stack that contains a container, the stack portion,
+// and the list which is the list of ASTs that have been successfully
+// parsed.
+type ParseStack struct {
+ top int
+ container []AST
+ list []AST
+ index int
+}
+
+func newParseStack(sizeContainer, sizeList int) ParseStack {
+ return ParseStack{
+ container: make([]AST, sizeContainer),
+ list: make([]AST, sizeList),
+ }
+}
+
+// Pop will return and truncate the last container element.
+func (s *ParseStack) Pop() AST {
+ s.top--
+ return s.container[s.top]
+}
+
+// Push will add the new AST to the container
+func (s *ParseStack) Push(ast AST) {
+ s.container[s.top] = ast
+ s.top++
+}
+
+// MarkComplete will append the AST to the list of completed statements
+func (s *ParseStack) MarkComplete(ast AST) {
+ s.list[s.index] = ast
+ s.index++
+}
+
+// List will return the completed statements
+func (s ParseStack) List() []AST {
+ return s.list[:s.index]
+}
+
+// Len will return the length of the container
+func (s *ParseStack) Len() int {
+ return s.top
+}
+
+func (s ParseStack) String() string {
+ buf := bytes.Buffer{}
+ for i, node := range s.list {
+ buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
new file mode 100644
index 00000000000..f82095ba259
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
@@ -0,0 +1,41 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ emptyRunes = []rune{}
+)
+
+func isSep(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+var (
+ openBrace = []rune("[")
+ closeBrace = []rune("]")
+)
+
+func newSepToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '[':
+ tok = newToken(TokenSep, openBrace, NoneType)
+ case ']':
+ tok = newToken(TokenSep, closeBrace, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
new file mode 100644
index 00000000000..6bb6964475e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
@@ -0,0 +1,45 @@
+package ini
+
+// skipper is used to skip certain blocks of an ini file.
+// Currently skipper is used to skip nested blocks of ini
+// files. See example below
+//
+// [ foo ]
+// nested = ; this section will be skipped
+// a=b
+// c=d
+// bar=baz ; this will be included
+type skipper struct {
+ shouldSkip bool
+ TokenSet bool
+ prevTok Token
+}
+
+func newSkipper() skipper {
+ return skipper{
+ prevTok: emptyToken,
+ }
+}
+
+func (s *skipper) ShouldSkip(tok Token) bool {
+ if s.shouldSkip &&
+ s.prevTok.Type() == TokenNL &&
+ tok.Type() != TokenWS {
+
+ s.Continue()
+ return false
+ }
+ s.prevTok = tok
+
+ return s.shouldSkip
+}
+
+func (s *skipper) Skip() {
+ s.shouldSkip = true
+ s.prevTok = emptyToken
+}
+
+func (s *skipper) Continue() {
+ s.shouldSkip = false
+ s.prevTok = emptyToken
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
new file mode 100644
index 00000000000..18f3fe89317
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
@@ -0,0 +1,35 @@
+package ini
+
+// Statement is an empty AST mostly used for transitioning states.
+func newStatement() AST {
+ return newAST(ASTKindStatement, AST{})
+}
+
+// SectionStatement represents a section AST
+func newSectionStatement(tok Token) AST {
+ return newASTWithRootToken(ASTKindSectionStatement, tok)
+}
+
+// ExprStatement represents a completed expression AST
+func newExprStatement(ast AST) AST {
+ return newAST(ASTKindExprStatement, ast)
+}
+
+// CommentStatement represents a comment in the ini definition.
+//
+// grammar:
+// comment -> #comment' | ;comment'
+// comment' -> epsilon | value
+func newCommentStatement(tok Token) AST {
+ return newAST(ASTKindCommentStatement, newExpression(tok))
+}
+
+// CompletedSectionStatement represents a completed section
+func newCompletedSectionStatement(ast AST) AST {
+ return newAST(ASTKindCompletedSectionStatement, ast)
+}
+
+// SkipStatement is used to skip whole statements
+func newSkipStatement(ast AST) AST {
+ return newAST(ASTKindSkipStatement, ast)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
new file mode 100644
index 00000000000..305999d29be
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
@@ -0,0 +1,284 @@
+package ini
+
+import (
+ "fmt"
+)
+
+// getStringValue will return a quoted string and the amount
+// of bytes read
+//
+// an error will be returned if the string is not properly formatted
+func getStringValue(b []rune) (int, error) {
+ if b[0] != '"' {
+ return 0, NewParseError("strings must start with '\"'")
+ }
+
+ endQuote := false
+ i := 1
+
+ for ; i < len(b) && !endQuote; i++ {
+ if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
+ endQuote = true
+ break
+ } else if escaped {
+ /*c, err := getEscapedByte(b[i])
+ if err != nil {
+ return 0, err
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--*/
+
+ continue
+ }
+ }
+
+ if !endQuote {
+ return 0, NewParseError("missing '\"' in string value")
+ }
+
+ return i + 1, nil
+}
+
+// getBoolValue will return a boolean and the amount
+// of bytes read
+//
+// an error will be returned if the boolean is not of a correct
+// value
+func getBoolValue(b []rune) (int, error) {
+ if len(b) < 4 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ n := 0
+ for _, lv := range literalValues {
+ if len(lv) > len(b) {
+ continue
+ }
+
+ if isLitValue(lv, b) {
+ n = len(lv)
+ }
+ }
+
+ if n == 0 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ return n, nil
+}
+
+// getNumericalValue will return a numerical string, the amount
+// of bytes read, and the base of the number
+//
+// an error will be returned if the number is not of a correct
+// value
+func getNumericalValue(b []rune) (int, int, error) {
+ if !isDigit(b[0]) {
+ return 0, 0, NewParseError("invalid digit value")
+ }
+
+ i := 0
+ helper := numberHelper{}
+
+loop:
+ for negativeIndex := 0; i < len(b); i++ {
+ negativeIndex++
+
+ if !isDigit(b[i]) {
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return 0, 0, NewParseError("parse error '-'")
+ }
+
+ n := getNegativeNumber(b[i:])
+ i += (n - 1)
+ helper.Determine(b[i])
+ continue
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+
+ negativeIndex = 0
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ if i == 0 && b[i] != '0' {
+ return 0, 0, NewParseError("incorrect base format, expected leading '0'")
+ }
+
+ if i != 1 {
+ return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
+ }
+
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ default:
+ if isWhitespace(b[i]) {
+ break loop
+ }
+
+ if isNewline(b[i:]) {
+ break loop
+ }
+
+ if !(helper.numberFormat == hex && isHexByte(b[i])) {
+ if i+2 < len(b) && !isNewline(b[i:i+2]) {
+ return 0, 0, NewParseError("invalid numerical character")
+ } else if !isNewline([]rune{b[i]}) {
+ return 0, 0, NewParseError("invalid numerical character")
+ }
+
+ break loop
+ }
+ }
+ }
+ }
+
+ return helper.Base(), i, nil
+}
+
+// isDigit will return whether or not something is an integer
+func isDigit(b rune) bool {
+ return b >= '0' && b <= '9'
+}
+
+func hasExponent(v []rune) bool {
+ return contains(v, 'e') || contains(v, 'E')
+}
+
+func isBinaryByte(b rune) bool {
+ switch b {
+ case '0', '1':
+ return true
+ default:
+ return false
+ }
+}
+
+func isOctalByte(b rune) bool {
+ switch b {
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ return true
+ default:
+ return false
+ }
+}
+
+func isHexByte(b rune) bool {
+ if isDigit(b) {
+ return true
+ }
+ return (b >= 'A' && b <= 'F') ||
+ (b >= 'a' && b <= 'f')
+}
+
+func getValue(b []rune) (int, error) {
+ i := 0
+
+ for i < len(b) {
+ if isNewline(b[i:]) {
+ break
+ }
+
+ if isOp(b[i:]) {
+ break
+ }
+
+ valid, n, err := isValid(b[i:])
+ if err != nil {
+ return 0, err
+ }
+
+ if !valid {
+ break
+ }
+
+ i += n
+ }
+
+ return i, nil
+}
+
+// getNegativeNumber will return a negative number from a
+// byte slice. This will iterate through all characters until
+// a non-digit has been found.
+func getNegativeNumber(b []rune) int {
+ if b[0] != '-' {
+ return 0
+ }
+
+ i := 1
+ for ; i < len(b); i++ {
+ if !isDigit(b[i]) {
+ return i
+ }
+ }
+
+ return i
+}
+
+// isEscaped will return whether or not the character is an escaped
+// character.
+func isEscaped(value []rune, b rune) bool {
+ if len(value) == 0 {
+ return false
+ }
+
+ switch b {
+ case '\'': // single quote
+ case '"': // quote
+ case 'n': // newline
+ case 't': // tab
+ case '\\': // backslash
+ default:
+ return false
+ }
+
+ return value[len(value)-1] == '\\'
+}
+
+func getEscapedByte(b rune) (rune, error) {
+ switch b {
+ case '\'': // single quote
+ return '\'', nil
+ case '"': // quote
+ return '"', nil
+ case 'n': // newline
+ return '\n', nil
+ case 't': // table
+ return '\t', nil
+ case '\\': // backslash
+ return '\\', nil
+ default:
+ return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
+ }
+}
+
+func removeEscapedCharacters(b []rune) []rune {
+ for i := 0; i < len(b); i++ {
+ if isEscaped(b[:i], b[i]) {
+ c, err := getEscapedByte(b[i])
+ if err != nil {
+ return b
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--
+ }
+ }
+
+ return b
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
new file mode 100644
index 00000000000..94841c32443
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
@@ -0,0 +1,166 @@
+package ini
+
+import (
+ "fmt"
+ "sort"
+)
+
+// Visitor is an interface used by walkers that will
+// traverse an array of ASTs.
+type Visitor interface {
+ VisitExpr(AST) error
+ VisitStatement(AST) error
+}
+
+// DefaultVisitor is used to visit statements and expressions
+// and ensure that they are both of the correct format.
+// In addition, upon visiting this will build sections and populate
+// the Sections field which can be used to retrieve profile
+// configuration.
+type DefaultVisitor struct {
+ scope string
+ Sections Sections
+}
+
+// NewDefaultVisitor return a DefaultVisitor
+func NewDefaultVisitor() *DefaultVisitor {
+ return &DefaultVisitor{
+ Sections: Sections{
+ container: map[string]Section{},
+ },
+ }
+}
+
+// VisitExpr visits expressions...
+func (v *DefaultVisitor) VisitExpr(expr AST) error {
+ t := v.Sections.container[v.scope]
+ if t.values == nil {
+ t.values = values{}
+ }
+
+ switch expr.Kind {
+ case ASTKindExprStatement:
+ opExpr := expr.GetRoot()
+ switch opExpr.Kind {
+ case ASTKindEqualExpr:
+ children := opExpr.GetChildren()
+ if len(children) <= 1 {
+ return NewParseError("unexpected token type")
+ }
+
+ rhs := children[1]
+
+ if rhs.Root.Type() != TokenLit {
+ return NewParseError("unexpected token type")
+ }
+
+ key := EqualExprKey(opExpr)
+ v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
+ if err != nil {
+ return err
+ }
+
+ t.values[key] = v
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+
+ v.Sections.container[v.scope] = t
+ return nil
+}
+
+// VisitStatement visits statements...
+func (v *DefaultVisitor) VisitStatement(stmt AST) error {
+ switch stmt.Kind {
+ case ASTKindCompletedSectionStatement:
+ child := stmt.GetRoot()
+ if child.Kind != ASTKindSectionStatement {
+ return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
+ }
+
+ name := string(child.Root.Raw())
+ v.Sections.container[name] = Section{}
+ v.scope = name
+ default:
+ return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
+ }
+
+ return nil
+}
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+ container map[string]Section
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+ v, ok := t.container[p]
+ return v, ok
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+ keys := make([]string, len(t.container))
+ i := 0
+ for k := range t.container {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+ Name string
+ values values
+}
+
+// Has will return whether or not an entry exists in a given section
+func (t Section) Has(k string) bool {
+ _, ok := t.values[k]
+ return ok
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+ v, ok := t.values[k]
+ return v.Type, ok
+}
+
+// Bool returns a bool value at k
+func (t Section) Bool(k string) bool {
+ return t.values[k].BoolValue()
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) int64 {
+ return t.values[k].IntValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) float64 {
+ return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+ _, ok := t.values[k]
+ if !ok {
+ return ""
+ }
+ return t.values[k].StringValue()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
new file mode 100644
index 00000000000..99915f7f777
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
@@ -0,0 +1,25 @@
+package ini
+
+// Walk will traverse the AST using the v, the Visitor.
+func Walk(tree []AST, v Visitor) error {
+ for _, node := range tree {
+ switch node.Kind {
+ case ASTKindExpr,
+ ASTKindExprStatement:
+
+ if err := v.VisitExpr(node); err != nil {
+ return err
+ }
+ case ASTKindStatement,
+ ASTKindCompletedSectionStatement,
+ ASTKindNestedSectionStatement,
+ ASTKindCompletedNestedSectionStatement:
+
+ if err := v.VisitStatement(node); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
new file mode 100644
index 00000000000..7ffb4ae06ff
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
@@ -0,0 +1,24 @@
+package ini
+
+import (
+ "unicode"
+)
+
+// isWhitespace will return whether or not the character is
+// a whitespace character.
+//
+// Whitespace is defined as a space or tab.
+func isWhitespace(c rune) bool {
+ return unicode.IsSpace(c) && c != '\n' && c != '\r'
+}
+
+func newWSToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if !isWhitespace(b[i]) {
+ break
+ }
+ }
+
+ return newToken(TokenWS, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
new file mode 100644
index 00000000000..5aa9137e0f9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
@@ -0,0 +1,10 @@
+// +build !go1.7
+
+package sdkio
+
+// Copy of Go 1.7 io package's Seeker constants.
+const (
+ SeekStart = 0 // seek relative to the origin of the file
+ SeekCurrent = 1 // seek relative to the current offset
+ SeekEnd = 2 // seek relative to the end
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
new file mode 100644
index 00000000000..e5f005613b7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
@@ -0,0 +1,12 @@
+// +build go1.7
+
+package sdkio
+
+import "io"
+
+// Alias for Go 1.7 io package Seeker constants
+const (
+ SeekStart = io.SeekStart // seek relative to the origin of the file
+ SeekCurrent = io.SeekCurrent // seek relative to the current offset
+ SeekEnd = io.SeekEnd // seek relative to the end
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
new file mode 100644
index 00000000000..0c9802d8770
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
@@ -0,0 +1,29 @@
+package sdkrand
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+// SeededRand is a new RNG using a thread safe implementation of rand.Source
+var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
new file mode 100644
index 00000000000..38ea61afeaa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
@@ -0,0 +1,23 @@
+package sdkuri
+
+import (
+ "path"
+ "strings"
+)
+
+// PathJoin will join the elements of the path delimited by the "/"
+// character. Similar to path.Join with the exception the trailing "/"
+// character is preserved if present.
+func PathJoin(elems ...string) string {
+ if len(elems) == 0 {
+ return ""
+ }
+
+ hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
+ str := path.Join(elems...)
+ if hasTrailing && str != "/" {
+ str += "/"
+ }
+
+ return str
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
new file mode 100644
index 00000000000..7da8a49ce52
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
@@ -0,0 +1,12 @@
+package shareddefaults
+
+const (
+ // ECSCredsProviderEnvVar is an environmental variable key used to
+ // determine which path needs to be hit.
+ ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+)
+
+// ECSContainerCredentialsURI is the endpoint to retrieve container
+// credentials. This can be overridden to test to ensure the credential process
+// is behaving correctly.
+var ECSContainerCredentialsURI = "http://169.254.170.2"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
new file mode 100644
index 00000000000..ebcbc2b40a3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
@@ -0,0 +1,40 @@
+package shareddefaults
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "credentials")
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "config")
+}
+
+// UserHomeDir returns the home directory for the user the process is
+// running under.
+func UserHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
new file mode 100644
index 00000000000..d7d42db0a6a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
@@ -0,0 +1,68 @@
+package protocol
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ValidateEndpointHostHandler is a request handler that will validate the
+// request endpoint's hosts is a valid RFC 3986 host.
+var ValidateEndpointHostHandler = request.NamedHandler{
+ Name: "awssdk.protocol.ValidateEndpointHostHandler",
+ Fn: func(r *request.Request) {
+ err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
+ if err != nil {
+ r.Error = err
+ }
+ },
+}
+
+// ValidateEndpointHost validates that the host string passed in is a valid RFC
+// 3986 host. Returns error if the host is not valid.
+func ValidateEndpointHost(opName, host string) error {
+ paramErrs := request.ErrInvalidParams{Context: opName}
+ labels := strings.Split(host, ".")
+
+ for i, label := range labels {
+ if i == len(labels)-1 && len(label) == 0 {
+ // Allow trailing dot for FQDN hosts.
+ continue
+ }
+
+ if !ValidHostLabel(label) {
+ paramErrs.Add(request.NewErrParamFormat(
+ "endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
+ }
+ }
+
+ if len(host) > 255 {
+ paramErrs.Add(request.NewErrParamMaxLen(
+ "endpoint host", 255, host,
+ ))
+ }
+
+ if paramErrs.Len() > 0 {
+ return paramErrs
+ }
+ return nil
+}
+
+// ValidHostLabel returns if the label is a valid RFC 3986 host label.
+func ValidHostLabel(label string) bool {
+ if l := len(label); l == 0 || l > 63 {
+ return false
+ }
+ for _, r := range label {
+ switch {
+ case r >= '0' && r <= '9':
+ case r >= 'A' && r <= 'Z':
+ case r >= 'a' && r <= 'z':
+ case r == '-':
+ default:
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
new file mode 100644
index 00000000000..915b0fcafd7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
@@ -0,0 +1,54 @@
+package protocol
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// HostPrefixHandlerName is the handler name for the host prefix request
+// handler.
+const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler"
+
+// NewHostPrefixHandler constructs a build handler
+func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler {
+ builder := HostPrefixBuilder{
+ Prefix: prefix,
+ LabelsFn: labelsFn,
+ }
+
+ return request.NamedHandler{
+ Name: HostPrefixHandlerName,
+ Fn: builder.Build,
+ }
+}
+
+// HostPrefixBuilder provides the request handler to expand and prepend
+// the host prefix into the operation's request endpoint host.
+type HostPrefixBuilder struct {
+ Prefix string
+ LabelsFn func() map[string]string
+}
+
+// Build updates the passed in Request with the HostPrefix template expanded.
+func (h HostPrefixBuilder) Build(r *request.Request) {
+ if aws.BoolValue(r.Config.DisableEndpointHostPrefix) {
+ return
+ }
+
+ var labels map[string]string
+ if h.LabelsFn != nil {
+ labels = h.LabelsFn()
+ }
+
+ prefix := h.Prefix
+ for name, value := range labels {
+ prefix = strings.Replace(prefix, "{"+name+"}", value, -1)
+ }
+
+ r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host
+ if len(r.HTTPRequest.Host) > 0 {
+ r.HTTPRequest.Host = prefix + r.HTTPRequest.Host
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
new file mode 100644
index 00000000000..53831dff984
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
@@ -0,0 +1,75 @@
+package protocol
+
+import (
+ "crypto/rand"
+ "fmt"
+ "reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+ switch u := v.Interface().(type) {
+ // To auto fill an Idempotency token the field must be a string,
+ // tagged for auto fill, and have a zero value.
+ case *string:
+ return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ case string:
+ return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ }
+
+ return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+ b := make([]byte, 16)
+ RandReader.Read(b)
+
+ return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() && v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = reflect.Indirect(v)
+
+ if !v.CanSet() {
+ panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+ }
+
+ b := make([]byte, 16)
+ _, err := rand.Read(b)
+ if err != nil {
+ // TODO handle error
+ return
+ }
+
+ v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+ // 13th character is "4"
+ u[6] = (u[6] | 0x40) & 0x4F
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] | 0x80) & 0xBF
+
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
new file mode 100644
index 00000000000..776d1101843
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
@@ -0,0 +1,76 @@
+package protocol
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+// EscapeMode is the mode that should be use for escaping a value
+type EscapeMode uint
+
+// The modes for escaping a value before it is marshaled, and unmarshaled.
+const (
+ NoEscape EscapeMode = iota
+ Base64Escape
+ QuotedEscape
+)
+
+// EncodeJSONValue marshals the value into a JSON string, and optionally base64
+// encodes the string before returning it.
+//
+// Will panic if the escape mode is unknown.
+func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+
+ switch escape {
+ case NoEscape:
+ return string(b), nil
+ case Base64Escape:
+ return base64.StdEncoding.EncodeToString(b), nil
+ case QuotedEscape:
+ return strconv.Quote(string(b)), nil
+ }
+
+ panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape))
+}
+
+// DecodeJSONValue will attempt to decode the string input as a JSONValue.
+// Optionally decoding base64 the value first before JSON unmarshaling.
+//
+// Will panic if the escape mode is unknown.
+func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) {
+ var b []byte
+ var err error
+
+ switch escape {
+ case NoEscape:
+ b = []byte(v)
+ case Base64Escape:
+ b, err = base64.StdEncoding.DecodeString(v)
+ case QuotedEscape:
+ var u string
+ u, err = strconv.Unquote(v)
+ b = []byte(u)
+ default:
+ panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ m := aws.JSONValue{}
+ err = json.Unmarshal(b, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
new file mode 100644
index 00000000000..e21614a1250
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
@@ -0,0 +1,81 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// PayloadUnmarshaler provides the interface for unmarshaling a payload's
+// reader into a SDK shape.
+type PayloadUnmarshaler interface {
+ UnmarshalPayload(io.Reader, interface{}) error
+}
+
+// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a
+// HandlerList. This provides the support for unmarshaling a payload reader to
+// a shape without needing a SDK request first.
+type HandlerPayloadUnmarshal struct {
+ Unmarshalers request.HandlerList
+}
+
+// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using
+// the Unmarshalers HandlerList provided. Returns an error if unable
+// unmarshaling fails.
+func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error {
+ req := &request.Request{
+ HTTPRequest: &http.Request{},
+ HTTPResponse: &http.Response{
+ StatusCode: 200,
+ Header: http.Header{},
+ Body: ioutil.NopCloser(r),
+ },
+ Data: v,
+ }
+
+ h.Unmarshalers.Run(req)
+
+ return req.Error
+}
+
+// PayloadMarshaler provides the interface for marshaling a SDK shape into and
+// io.Writer.
+type PayloadMarshaler interface {
+ MarshalPayload(io.Writer, interface{}) error
+}
+
+// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList.
+// This provides support for marshaling a SDK shape into an io.Writer without
+// needing a SDK request first.
+type HandlerPayloadMarshal struct {
+ Marshalers request.HandlerList
+}
+
+// MarshalPayload marshals the SDK shape into the io.Writer using the
+// Marshalers HandlerList provided. Returns an error if unable if marshal
+// fails.
+func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error {
+ req := request.New(
+ aws.Config{},
+ metadata.ClientInfo{},
+ request.Handlers{},
+ nil,
+ &request.Operation{HTTPMethod: "GET"},
+ v,
+ nil,
+ )
+
+ h.Marshalers.Run(req)
+
+ if req.Error != nil {
+ return req.Error
+ }
+
+ io.Copy(w, req.GetBody())
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
new file mode 100644
index 00000000000..60e5b09d548
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -0,0 +1,36 @@
+// Package query provides serialization of AWS query requests, and responses.
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+
+import (
+ "net/url"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
+)
+
+// BuildHandler is a named request handler for building query protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
+
+// Build builds a request for an AWS Query service.
+func Build(r *request.Request) {
+ body := url.Values{
+ "Action": {r.Operation.Name},
+ "Version": {r.ClientInfo.APIVersion},
+ }
+ if err := queryutil.Parse(body, r.Params, false); err != nil {
+ r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
+ return
+ }
+
+ if !r.IsPresigned() {
+ r.HTTPRequest.Method = "POST"
+ r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ r.SetBufferBody([]byte(body.Encode()))
+ } else { // This is a pre-signed request
+ r.HTTPRequest.Method = "GET"
+ r.HTTPRequest.URL.RawQuery = body.Encode()
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
new file mode 100644
index 00000000000..75866d01218
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,246 @@
+package queryutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+ q := queryParser{isEC2: isEC2}
+ return q.parseValue(body, reflect.ValueOf(i), "", "")
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+type queryParser struct {
+ isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ value = elemOf(value)
+
+ // no need to handle zero values
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ return q.parseStruct(v, value, prefix)
+ case "list":
+ return q.parseList(v, value, prefix, tag)
+ case "map":
+ return q.parseMap(v, value, prefix, tag)
+ default:
+ return q.parseScalar(v, value, prefix, tag)
+ }
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ elemValue := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ elemValue = reflect.ValueOf(token)
+ }
+
+ var name string
+ if q.isEC2 {
+ name = field.Tag.Get("queryName")
+ }
+ if name == "" {
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if name != "" && q.isEC2 {
+ name = strings.ToUpper(name[0:1]) + name[1:]
+ }
+ }
+ if name == "" {
+ name = field.Name
+ }
+
+ if prefix != "" {
+ name = prefix + "." + name
+ }
+
+ if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ if _, ok := value.Interface().([]byte); ok {
+ return q.parseScalar(v, value, prefix, tag)
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ if listName := tag.Get("locationNameList"); listName == "" {
+ prefix += ".member"
+ } else {
+ prefix += "." + listName
+ }
+ }
+
+ for i := 0; i < value.Len(); i++ {
+ slicePrefix := prefix
+ if slicePrefix == "" {
+ slicePrefix = strconv.Itoa(i + 1)
+ } else {
+ slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+ }
+ if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".entry"
+ }
+
+ // sort keys for improved serialization consistency.
+ // this is not strictly necessary for protocol support.
+ mapKeyValues := value.MapKeys()
+ mapKeys := map[string]reflect.Value{}
+ mapKeyNames := make([]string, len(mapKeyValues))
+ for i, mapKey := range mapKeyValues {
+ name := mapKey.String()
+ mapKeys[name] = mapKey
+ mapKeyNames[i] = name
+ }
+ sort.Strings(mapKeyNames)
+
+ for i, mapKeyName := range mapKeyNames {
+ mapKey := mapKeys[mapKeyName]
+ mapValue := value.MapIndex(mapKey)
+
+ kname := tag.Get("locationNameKey")
+ if kname == "" {
+ kname = "key"
+ }
+ vname := tag.Get("locationNameValue")
+ if vname == "" {
+ vname = "value"
+ }
+
+ // serialize key
+ var keyName string
+ if prefix == "" {
+ keyName = strconv.Itoa(i+1) + "." + kname
+ } else {
+ keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+ }
+
+ if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
+ return err
+ }
+
+ // serialize value
+ var valueName string
+ if prefix == "" {
+ valueName = strconv.Itoa(i+1) + "." + vname
+ } else {
+ valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+ }
+
+ if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := r.Interface().(type) {
+ case string:
+ v.Set(name, value)
+ case []byte:
+ if !r.IsNil() {
+ v.Set(name, base64.StdEncoding.EncodeToString(value))
+ }
+ case bool:
+ v.Set(name, strconv.FormatBool(value))
+ case int64:
+ v.Set(name, strconv.FormatInt(value, 10))
+ case int:
+ v.Set(name, strconv.Itoa(value))
+ case float64:
+ v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ case float32:
+ v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ v.Set(name, protocol.FormatTime(format, value))
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
new file mode 100644
index 00000000000..3495c73070b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -0,0 +1,39 @@
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+
+import (
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ if r.DataFilled() {
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError", "failed decoding Query response", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
new file mode 100644
index 00000000000..46d354e826f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
@@ -0,0 +1,74 @@
+package query
+
+import (
+ "encoding/xml"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"ErrorResponse"`
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+type xmlServiceUnavailableResponse struct {
+ XMLName xml.Name `xml:"ServiceUnavailableException"`
+}
+
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError", "failed to read from query HTTP response body", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // First check for specific error
+ resp := xmlErrorResponse{}
+ decodeErr := xml.Unmarshal(bodyBytes, &resp)
+ if decodeErr == nil {
+ reqID := resp.RequestID
+ if reqID == "" {
+ reqID = r.RequestID
+ }
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(resp.Code, resp.Message, nil),
+ r.HTTPResponse.StatusCode,
+ reqID,
+ )
+ return
+ }
+
+ // Check for unhandled error
+ servUnavailResp := xmlServiceUnavailableResponse{}
+ unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
+ if unavailErr == nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("ServiceUnavailableException", "service is unavailable", nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Failed to retrieve any error message from the response body
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("SerializationError",
+ "failed to decode query XML error response", decodeErr),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
new file mode 100644
index 00000000000..b80f84fbb86
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -0,0 +1,300 @@
+// Package rest provides RESTful serialization of AWS requests and responses.
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+var errValueNotSet = fmt.Errorf("value not set")
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
+
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
+// Build builds the REST component of a service request.
+func Build(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, false)
+ buildBody(r, v)
+ }
+}
+
+// BuildAsGET builds the REST component of a service request with the ability to hoist
+// data from the body.
+func BuildAsGET(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, true)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
+ query := r.HTTPRequest.URL.Query()
+
+ // Setup the raw path to match the base path pattern. This is needed
+ // so that when the path is mutated a custom escaped version can be
+ // stored in RawPath that will be used by the Go client.
+ r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
+
+ for i := 0; i < v.NumField(); i++ {
+ m := v.Field(i)
+ if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ field := v.Type().Field(i)
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+ if kind := m.Kind(); kind == reflect.Ptr {
+ m = m.Elem()
+ } else if kind == reflect.Interface {
+ if !m.Elem().IsValid() {
+ continue
+ }
+ }
+ if !m.IsValid() {
+ continue
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ var err error
+ switch field.Tag.Get("location") {
+ case "headers": // header maps
+ err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
+ case "header":
+ err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
+ case "uri":
+ err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
+ case "querystring":
+ err = buildQueryString(query, m, name, field.Tag)
+ default:
+ if buildGETQuery {
+ err = buildQueryString(query, m, name, field.Tag)
+ }
+ }
+ r.Error = err
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+
+ r.HTTPRequest.URL.RawQuery = query.Encode()
+ if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
+ cleanPath(r.HTTPRequest.URL)
+ }
+}
+
+func buildBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := reflect.Indirect(v.FieldByName(payloadName))
+ if payload.IsValid() && payload.Interface() != nil {
+ switch reader := payload.Interface().(type) {
+ case io.ReadSeeker:
+ r.SetReaderBody(reader)
+ case []byte:
+ r.SetBufferBody(reader)
+ case string:
+ r.SetStringBody(reader)
+ default:
+ r.Error = awserr.New("SerializationError",
+ "failed to encode REST request",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+}
+
+func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ name = strings.TrimSpace(name)
+ str = strings.TrimSpace(str)
+
+ header.Add(name, str)
+
+ return nil
+}
+
+func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
+ prefix := tag.Get("locationName")
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key), tag)
+ if err == errValueNotSet {
+ continue
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+
+ }
+ keyStr := strings.TrimSpace(key.String())
+ str = strings.TrimSpace(str)
+
+ header.Add(prefix+keyStr, str)
+ }
+ return nil
+}
+
+func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
+ value, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
+ u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
+
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
+
+ return nil
+}
+
+func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := v.Interface().(type) {
+ case []*string:
+ for _, item := range value {
+ query.Add(name, *item)
+ }
+ case map[string]*string:
+ for key, item := range value {
+ query.Add(key, *item)
+ }
+ case map[string][]*string:
+ for key, items := range value {
+ for _, item := range items {
+ query.Add(key, *item)
+ }
+ }
+ default:
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+ query.Set(name, str)
+ }
+
+ return nil
+}
+
+func cleanPath(u *url.URL) {
+ hasSlash := strings.HasSuffix(u.Path, "/")
+
+ // clean up path, removing duplicate `/`
+ u.Path = path.Clean(u.Path)
+ u.RawPath = path.Clean(u.RawPath)
+
+ if hasSlash && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ u.RawPath += "/"
+ }
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
+ v = reflect.Indirect(v)
+ if !v.IsValid() {
+ return "", errValueNotSet
+ }
+
+ switch value := v.Interface().(type) {
+ case string:
+ str = value
+ case []byte:
+ str = base64.StdEncoding.EncodeToString(value)
+ case bool:
+ str = strconv.FormatBool(value)
+ case int64:
+ str = strconv.FormatInt(value, 10)
+ case float64:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ if tag.Get("location") == "querystring" {
+ format = protocol.ISO8601TimeFormatName
+ }
+ }
+ str = protocol.FormatTime(format, value)
+ case aws.JSONValue:
+ if len(value) == 0 {
+ return "", errValueNotSet
+ }
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ str, err = protocol.EncodeJSONValue(value, escaping)
+ if err != nil {
+ return "", fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+ default:
+ err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return "", err
+ }
+ return str, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
new file mode 100644
index 00000000000..4366de2e1e8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i).Elem()
+ if !v.IsValid() {
+ return nil
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ field, _ := v.Type().FieldByName(payloadName)
+ if field.Tag.Get("type") != "structure" {
+ return nil
+ }
+
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+ return payload.Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+ v := reflect.Indirect(reflect.ValueOf(i))
+ if !v.IsValid() {
+ return ""
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ if member, ok := v.Type().FieldByName(payloadName); ok {
+ return member.Tag.Get("type")
+ }
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
new file mode 100644
index 00000000000..33fd53b126a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -0,0 +1,225 @@
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *request.Request) {
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalBody(r, v)
+ }
+}
+
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+ if r.RequestID == "" {
+ // Alternative version of request id in the header
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+ }
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalLocationElements(r, v)
+ }
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() {
+ switch payload.Interface().(type) {
+ case []byte:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ payload.Set(reflect.ValueOf(b))
+ }
+ case *string:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+ }
+ default:
+ switch payload.Type().String() {
+ case "io.ReadCloser":
+ payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+ case "io.ReadSeeker":
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to read response body", err)
+ return
+ }
+ payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
+ default:
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ defer r.HTTPResponse.Body.Close()
+ r.Error = awserr.New("SerializationError",
+ "failed to decode REST response",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func unmarshalLocationElements(r *request.Request, v reflect.Value) {
+ for i := 0; i < v.NumField(); i++ {
+ m, field := v.Field(i), v.Type().Field(i)
+ if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch field.Tag.Get("location") {
+ case "statusCode":
+ unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+ case "header":
+ err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ case "headers":
+ prefix := field.Tag.Get("locationName")
+ err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ }
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+ if !v.IsValid() {
+ return
+ }
+
+ switch v.Interface().(type) {
+ case *int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(&s))
+ }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+ switch r.Interface().(type) {
+ case map[string]*string: // we only support string map value types
+ out := map[string]*string{}
+ for k, v := range headers {
+ k = http.CanonicalHeaderKey(k)
+ if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+ out[k[len(prefix):]] = &v[0]
+ }
+ }
+ r.Set(reflect.ValueOf(out))
+ }
+ return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
+ isJSONValue := tag.Get("type") == "jsonvalue"
+ if isJSONValue {
+ if len(header) == 0 {
+ return nil
+ }
+ } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+ return nil
+ }
+
+ switch v.Interface().(type) {
+ case *string:
+ v.Set(reflect.ValueOf(&header))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&i))
+ case *float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&f))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ }
+ t, err := protocol.ParseTime(format, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ m, err := protocol.DecodeJSONValue(header, escaping)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(m))
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
new file mode 100644
index 00000000000..b7ed6c6f810
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
@@ -0,0 +1,72 @@
+package protocol
+
+import (
+ "strconv"
+ "time"
+)
+
+// Names of time formats supported by the SDK
+const (
+ RFC822TimeFormatName = "rfc822"
+ ISO8601TimeFormatName = "iso8601"
+ UnixTimeFormatName = "unixTimestamp"
+)
+
+// Time formats supported by the SDK
+const (
+ // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
+ RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+ // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
+ ISO8601TimeFormat = "2006-01-02T15:04:05Z"
+)
+
+// IsKnownTimestampFormat returns if the timestamp format name
+// is know to the SDK's protocols.
+func IsKnownTimestampFormat(name string) bool {
+ switch name {
+ case RFC822TimeFormatName:
+ fallthrough
+ case ISO8601TimeFormatName:
+ fallthrough
+ case UnixTimeFormatName:
+ return true
+ default:
+ return false
+ }
+}
+
+// FormatTime returns a string value of the time.
+func FormatTime(name string, t time.Time) string {
+ t = t.UTC()
+
+ switch name {
+ case RFC822TimeFormatName:
+ return t.Format(RFC822TimeFormat)
+ case ISO8601TimeFormatName:
+ return t.Format(ISO8601TimeFormat)
+ case UnixTimeFormatName:
+ return strconv.FormatInt(t.Unix(), 10)
+ default:
+ panic("unknown timestamp format name, " + name)
+ }
+}
+
+// ParseTime attempts to parse the time given the format. Returns
+// the time if it was able to be parsed, and fails otherwise.
+func ParseTime(formatName, value string) (time.Time, error) {
+ switch formatName {
+ case RFC822TimeFormatName:
+ return time.Parse(RFC822TimeFormat, value)
+ case ISO8601TimeFormatName:
+ return time.Parse(ISO8601TimeFormat, value)
+ case UnixTimeFormatName:
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Unix(int64(v), 0), nil
+ default:
+ panic("unknown timestamp format name, " + formatName)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
new file mode 100644
index 00000000000..da1a68111db
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
@@ -0,0 +1,21 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+ if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+ return
+ }
+
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
new file mode 100644
index 00000000000..cf981fe9513
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -0,0 +1,306 @@
+// Package xmlutil provides XML serialization of AWS requests and responses.
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// BuildXML will serialize params into an xml.Encoder. Error will be returned
+// if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+ return buildXML(params, e, false)
+}
+
+func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
+ b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+ root := NewXMLElement(xml.Name{})
+ if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+ return err
+ }
+ for _, c := range root.Children {
+ for _, v := range c {
+ return StructToXML(e, v, sorted)
+ }
+ }
+ return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+ encoder *xml.Encoder
+ namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ value = elemOf(value)
+ if !value.IsValid() { // no need to handle zero values
+ return nil
+ } else if tag.Get("location") != "" { // don't handle non-body location values
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := value.Type().FieldByName("_"); ok {
+ tag = tag + reflect.StructTag(" ") + field.Tag
+ }
+ return b.buildStruct(value, current, tag)
+ case "list":
+ return b.buildList(value, current, tag)
+ case "map":
+ return b.buildMap(value, current, tag)
+ default:
+ return b.buildScalar(value, current, tag)
+ }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+ // there is an xmlNamespace associated with this struct
+ if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+ ns := xml.Attr{
+ Name: xml.Name{Local: "xmlns"},
+ Value: uri,
+ }
+ if prefix != "" {
+ b.namespaces[prefix] = uri // register the namespace
+ ns.Name.Local = "xmlns:" + prefix
+ }
+
+ child.Attr = append(child.Attr, ns)
+ }
+
+ var payloadFields, nonPayloadFields int
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ member := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ mTag := field.Tag
+ if mTag.Get("location") != "" { // skip non-body members
+ nonPayloadFields++
+ continue
+ }
+ payloadFields++
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(token)
+ }
+
+ memberName := mTag.Get("locationName")
+ if memberName == "" {
+ memberName = field.Name
+ mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+ }
+ if err := b.buildValue(member, child, mTag); err != nil {
+ return err
+ }
+ }
+
+ // Only case where the child shape is not added is if the shape only contains
+ // non-payload fields, e.g headers/query.
+ if !(payloadFields == 0 && nonPayloadFields > 0) {
+ current.AddChild(child)
+ }
+
+ return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted lists
+ return nil
+ }
+
+ // check for unflattened list member
+ flattened := tag.Get("flattened") != ""
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if flattened {
+ for i := 0; i < value.Len(); i++ {
+ child := NewXMLElement(xname)
+ current.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ } else {
+ list := NewXMLElement(xname)
+ current.AddChild(list)
+
+ for i := 0; i < value.Len(); i++ {
+ iname := tag.Get("locationNameList")
+ if iname == "" {
+ iname = "member"
+ }
+
+ child := NewXMLElement(xml.Name{Local: iname})
+ list.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted maps
+ return nil
+ }
+
+ maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+ current.AddChild(maproot)
+ current = maproot
+
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ // sorting is not required for compliance, but it makes testing easier
+ keys := make([]string, value.Len())
+ for i, k := range value.MapKeys() {
+ keys[i] = k.String()
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := value.MapIndex(reflect.ValueOf(k))
+
+ mapcur := current
+ if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+ child := NewXMLElement(xml.Name{Local: "entry"})
+ mapcur.AddChild(child)
+ mapcur = child
+ }
+
+ kchild := NewXMLElement(xml.Name{Local: kname})
+ kchild.Text = k
+ vchild := NewXMLElement(xml.Name{Local: vname})
+ mapcur.AddChild(kchild)
+ mapcur.AddChild(vchild)
+
+ if err := b.buildValue(v, vchild, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ var str string
+ switch converted := value.Interface().(type) {
+ case string:
+ str = converted
+ case []byte:
+ if !value.IsNil() {
+ str = base64.StdEncoding.EncodeToString(converted)
+ }
+ case bool:
+ str = strconv.FormatBool(converted)
+ case int64:
+ str = strconv.FormatInt(converted, 10)
+ case int:
+ str = strconv.Itoa(converted)
+ case float64:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ case float32:
+ str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ str = protocol.FormatTime(format, converted)
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)",
+ tag.Get("locationName"), value.Interface(), value.Type().Name())
+ }
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+ attr := xml.Attr{Name: xname, Value: str}
+ current.Attr = append(current.Attr, attr)
+ } else { // regular text node
+ current.AddChild(&XMLNode{Name: xname, Text: str})
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100644
index 00000000000..ff1ef6830b9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,272 @@
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+ n, err := XMLToStruct(d, nil)
+ if err != nil {
+ return err
+ }
+ if n.Children != nil {
+ for _, root := range n.Children {
+ for _, c := range root {
+ if wrappedChild, ok := c.Children[wrapper]; ok {
+ c = wrappedChild[0] // pull out wrapped element
+ }
+
+ err = parse(reflect.ValueOf(v), c, "")
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ rtype := r.Type()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch rtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if _, ok := r.Interface().(*time.Time); !ok {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := r.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := rtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return parseStruct(r, node, tag)
+ case "list":
+ return parseList(r, node, tag)
+ case "map":
+ return parseMap(r, node, tag)
+ default:
+ return parseScalar(r, node, tag)
+ }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() { // create the structure if it's nil
+ s := reflect.New(r.Type().Elem())
+ r.Set(s)
+ r = s
+ }
+
+ r = r.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return parseStruct(r.FieldByName(payload), node, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if c := field.Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ // try to find the field by name in elements
+ elems := node.Children[name]
+
+ if elems == nil { // try to find the field in attributes
+ if val, ok := node.findElem(name); ok {
+ elems = []*XMLNode{{Text: val}}
+ }
+ }
+
+ member := r.FieldByName(field.Name)
+ for _, elem := range elems {
+ err := parse(member, elem, field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+
+ if tag.Get("flattened") == "" { // look at all item entries
+ mname := "member"
+ if name := tag.Get("locationNameList"); name != "" {
+ mname = name
+ }
+
+ if Children, ok := node.Children[mname]; ok {
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+ }
+
+ for i, c := range Children {
+ err := parse(r.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ } else { // flattened list means this is a single element
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, 0, 0))
+ }
+
+ childR := reflect.Zero(t.Elem())
+ r.Set(reflect.Append(r, childR))
+ err := parse(r.Index(r.Len()-1), node, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.IsNil() {
+ r.Set(reflect.MakeMap(r.Type()))
+ }
+
+ if tag.Get("flattened") == "" { // look at all child entries
+ for _, entry := range node.Children["entry"] {
+ parseMapEntry(r, entry, tag)
+ }
+ } else { // this element is itself an entry
+ parseMapEntry(r, node, tag)
+ }
+
+ return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ keys, ok := node.Children[kname]
+ values := node.Children[vname]
+ if ok {
+ for i, key := range keys {
+ keyR := reflect.ValueOf(key.Text)
+ value := values[i]
+ valueR := reflect.New(r.Type().Elem()).Elem()
+
+ parse(valueR, value, "")
+ r.SetMapIndex(keyR, valueR)
+ }
+ }
+ return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ switch r.Interface().(type) {
+ case *string:
+ r.Set(reflect.ValueOf(&node.Text))
+ return nil
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(b))
+ case *bool:
+ v, err := strconv.ParseBool(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *int64:
+ v, err := strconv.ParseInt(node.Text, 10, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *float64:
+ v, err := strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ t, err := protocol.ParseTime(format, node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644
index 00000000000..515ce15215b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,148 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+ Name xml.Name `json:",omitempty"`
+ Children map[string][]*XMLNode `json:",omitempty"`
+ Text string `json:",omitempty"`
+ Attr []xml.Attr `json:",omitempty"`
+
+ namespaces map[string]string
+ parent *XMLNode
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+ return &XMLNode{
+ Name: name,
+ Children: map[string][]*XMLNode{},
+ Attr: []xml.Attr{},
+ }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+ child.parent = n
+ if _, ok := n.Children[child.Name.Local]; !ok {
+ n.Children[child.Name.Local] = []*XMLNode{}
+ }
+ n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+ out := &XMLNode{}
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return out, err
+ }
+ }
+
+ if tok == nil {
+ break
+ }
+
+ switch typed := tok.(type) {
+ case xml.CharData:
+ out.Text = string(typed.Copy())
+ case xml.StartElement:
+ el := typed.Copy()
+ out.Attr = el.Attr
+ if out.Children == nil {
+ out.Children = map[string][]*XMLNode{}
+ }
+
+ name := typed.Name.Local
+ slice := out.Children[name]
+ if slice == nil {
+ slice = []*XMLNode{}
+ }
+ node, e := XMLToStruct(d, &el)
+ out.findNamespaces()
+ if e != nil {
+ return out, e
+ }
+ node.Name = typed.Name
+ node.findNamespaces()
+ tempOut := *out
+ // Save into a temp variable, simply because out gets squashed during
+ // loop iterations
+ node.parent = &tempOut
+ slice = append(slice, node)
+ out.Children[name] = slice
+ case xml.EndElement:
+ if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+ return out, nil
+ }
+ out = &XMLNode{}
+ }
+ }
+ return out, nil
+}
+
+func (n *XMLNode) findNamespaces() {
+ ns := map[string]string{}
+ for _, a := range n.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ }
+ }
+
+ n.namespaces = ns
+}
+
+func (n *XMLNode) findElem(name string) (string, bool) {
+ for node := n; node != nil; node = node.parent {
+ for _, a := range node.Attr {
+ namespace := a.Name.Space
+ if v, ok := node.namespaces[namespace]; ok {
+ namespace = v
+ }
+ if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
+ return a.Value, true
+ }
+ }
+ }
+ return "", false
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+ e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
+
+ if node.Text != "" {
+ e.EncodeToken(xml.CharData([]byte(node.Text)))
+ } else if sorted {
+ sortedNames := []string{}
+ for k := range node.Children {
+ sortedNames = append(sortedNames, k)
+ }
+ sort.Strings(sortedNames)
+
+ for _, k := range sortedNames {
+ for _, v := range node.Children[k] {
+ StructToXML(e, v, sorted)
+ }
+ }
+ } else {
+ for _, c := range node.Children {
+ for _, v := range c {
+ StructToXML(e, v, sorted)
+ }
+ }
+ }
+
+ e.EncodeToken(xml.EndElement{Name: node.Name})
+ return e.Flush()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
new file mode 100644
index 00000000000..81130896491
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -0,0 +1,2401 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const opAssumeRole = "AssumeRole"
+
+// AssumeRoleRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRole operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRole for more information on using the AssumeRole
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleRequest method.
+// req, resp := client.AssumeRoleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
+ op := &request.Operation{
+ Name: opAssumeRole,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleInput{}
+ }
+
+ output = &AssumeRoleOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssumeRole API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) that you can use to access
+// AWS resources that you might not normally have access to. Typically, you
+// use AssumeRole for cross-account access or federation. For a comparison of
+// AssumeRole with the other APIs that produce temporary credentials, see Requesting
+// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// Important: You cannot call AssumeRole by using AWS root account credentials;
+// access is denied. You must use credentials for an IAM user or an IAM role
+// to call AssumeRole.
+//
+// For cross-account access, imagine that you own multiple accounts and need
+// to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those credentials
+// and remembering which one can access which account can be time consuming.
+// Instead, you can create one set of long-term credentials in one account and
+// then use temporary security credentials to access all the other accounts
+// by assuming roles in those accounts. For more information about roles, see
+// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
+// in the IAM User Guide.
+//
+// For federation, you can, for example, grant single sign-on access to the
+// AWS Management Console. If you already have an identity and authentication
+// system in your corporate network, you don't have to recreate user identities
+// in AWS in order to grant those user identities access to AWS. Instead, after
+// a user has been authenticated, you call AssumeRole (and specify the role
+// with the appropriate permissions) to get temporary security credentials for
+// that user. With those temporary security credentials, you construct a sign-in
+// URL that users can use to access the console. For more information, see Common
+// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
+// in the IAM User Guide.
+//
+// By default, the temporary security credentials created by AssumeRole last
+// for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI operations but
+// does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any AWS service with the following exception: you cannot call
+// the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// To assume a role, your AWS account must be trusted by the role. The trust
+// relationship is defined in the role's trust policy when the role is created.
+// That trust policy states which accounts are allowed to delegate access to
+// this account's role.
+//
+// The user who wants to access the role must also have permissions delegated
+// from the role's administrator. If the user is in a different account than
+// the role, then the user's administrator must attach a policy that allows
+// the user to call AssumeRole on the ARN of the role in the other account.
+// If the user is in the same account as the role, then you can either attach
+// a policy to the user (identical to the previous different account user),
+// or you can add the user as a principal directly in the role's trust policy.
+// In this case, the trust policy acts as the only resource-based policy in
+// IAM, and users in the same account as the role do not need explicit permission
+// to assume the role. For more information about trust policies and resource-based
+// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
+// in the IAM User Guide.
+//
+// Using MFA with AssumeRole
+//
+// You can optionally include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios in which
+// you want to make sure that the user who is assuming the role has been authenticated
+// using an AWS MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication; if the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication
+// might look like the following example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that
+// the MFA devices produces.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRole for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithContext is the same as AssumeRole with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRole for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
+
+// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithSAML operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleWithSAMLRequest method.
+// req, resp := client.AssumeRoleWithSAMLRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithSAML,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithSAMLInput{}
+ }
+
+ output = &AssumeRoleWithSAMLOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// AssumeRoleWithSAML API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// via a SAML authentication response. This operation provides a mechanism for
+// tying an enterprise identity store or directory to role-based AWS access
+// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of
+// an access key ID, a secret access key, and a security token. Applications
+// can use these temporary security credentials to sign calls to AWS services.
+//
+// By default, the temporary security credentials created by AssumeRoleWithSAML
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. Your role session lasts for the
+// duration that you specify, or until the time specified in the SAML authentication
+// response's SessionNotOnOrAfter value, whichever is shorter. You can provide
+// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
+// duration setting for the role. This setting can have a value from 1 hour
+// to 12 hours. To learn how to view the maximum value for your role, see View
+// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI operations but
+// does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used
+// to make API calls to any AWS service with the following exception: you cannot
+// call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by the intersection of both the access policy
+// of the role that is being assumed, and the policy that you pass. This means
+// that both policies must grant the permission for the action to be allowed.
+// This gives you a way to further restrict the permissions for the resulting
+// temporary security credentials. You cannot use the passed policy to grant
+// permissions that are in excess of those allowed by the access policy of the
+// role that is being assumed. For more information, see Permissions for AssumeRole,
+// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithSAML, you must configure your
+// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider, and create
+// an IAM role that specifies this SAML provider in its trust policy.
+//
+// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
+// The identity of the caller is validated by using keys in the metadata document
+// that is uploaded for the SAML provider entity for your identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
+// logs. The entry includes the value in the NameID element of the SAML assertion.
+// We recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the Persistent
+// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
+//
+// For more information, see the following resources:
+//
+// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+// in the IAM User Guide.
+//
+// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+// in the IAM User Guide.
+//
+// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+// in the IAM User Guide.
+//
+// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithSAML for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithSAML for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
+
+// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
+// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithWebIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithWebIdentityInput{}
+ }
+
+ output = &AssumeRoleWithWebIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// in a mobile or web application with a web identity provider, such as Amazon
+// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
+// identity provider.
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can
+// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
+// identify a user and supply the user with a consistent identity throughout
+// the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
+// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// credentials. Therefore, you can distribute an application (for example, on
+// mobile devices) that requests temporary security credentials without including
+// long-term AWS credentials in the application, and without deploying server-based
+// proxy services that use long-term AWS credentials. Instead, the identity
+// of the caller is validated by using a token from the web identity provider.
+// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce
+// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to AWS service APIs.
+//
+// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI operations but
+// does not apply when you use those operations to create a console URL. For
+// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can
+// be used to make API calls to any AWS service with the following exception:
+// you cannot call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithWebIdentity, you must have
+// an identity token from a supported identity provider and create a role that
+// the application can assume. The role that your application assumes must trust
+// the identity provider that is associated with the identity token. In other
+// words, the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
+// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
+// of the provided Web Identity Token. We recommend that you avoid using any
+// personally identifiable information (PII) in this field. For example, you
+// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
+// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
+//
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// API, see the following resources:
+//
+// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
+// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+//
+// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+// This interactive website lets you walk through the process of authenticating
+// via Login with Amazon, Facebook, or Google, getting temporary security
+// credentials, and then using those credentials to make a request to AWS.
+//
+//
+// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
+// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
+// apps that show how to invoke the identity providers, and then how to use
+// the information from these providers to get and use temporary security
+// credentials.
+//
+// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
+// This article discusses web identity federation and shows an example of
+// how to use web identity federation to get access to content in Amazon
+// S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithWebIdentity for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
+// The request could not be fulfilled because the non-AWS identity provider
+// (IDP) that was asked to verify the incoming identity token could not be reached.
+// This is often a transient error caused by network conditions. Retry the request
+// a limited number of times so that you don't exceed the request rate. If the
+// error persists, the non-AWS identity provider might be down or not responding.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithWebIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
+
+// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
+// client's request for the DecodeAuthorizationMessage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DecodeAuthorizationMessageRequest method.
+// req, resp := client.DecodeAuthorizationMessageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
+ op := &request.Operation{
+ Name: opDecodeAuthorizationMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DecodeAuthorizationMessageInput{}
+ }
+
+ output = &DecodeAuthorizationMessageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DecodeAuthorizationMessage API operation for AWS Security Token Service.
+//
+// Decodes additional information about the authorization status of a request
+// from an encoded message returned in response to an AWS request.
+//
+// For example, if a user is not authorized to perform an action that he or
+// she has requested, the request returns a Client.UnauthorizedOperation response
+// (an HTTP 403 response). Some AWS actions additionally return an encoded message
+// that can provide details about this authorization failure.
+//
+// Only certain AWS actions return an encoded authorization message. The documentation
+// for an individual action indicates whether that action returns an encoded
+// message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// constitute privileged information that the user who requested the action
+// should not see. To decode an authorization status message, a user must be
+// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
+// (sts:DecodeAuthorizationMessage) action.
+//
+// The decoded message includes the following type of information:
+//
+// * Whether the request was denied due to an explicit deny or due to the
+// absence of an explicit allow. For more information, see Determining Whether
+// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+// in the IAM User Guide.
+//
+// * The principal who made the request.
+//
+// * The requested action.
+//
+// * The requested resource.
+//
+// * The values of condition keys in the context of the user's request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation DecodeAuthorizationMessage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
+// The error returned if the message passed to DecodeAuthorizationMessage was
+// invalid. This can happen if the token contains invalid characters, such as
+// linebreaks.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ return out, req.Send()
+}
+
+// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DecodeAuthorizationMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetCallerIdentity = "GetCallerIdentity"
+
+// GetCallerIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the GetCallerIdentity operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetCallerIdentity for more information on using the GetCallerIdentity
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetCallerIdentityRequest method.
+// req, resp := client.GetCallerIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
+ op := &request.Operation{
+ Name: opGetCallerIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetCallerIdentityInput{}
+ }
+
+ output = &GetCallerIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetCallerIdentity API operation for AWS Security Token Service.
+//
+// Returns details about the IAM identity whose credentials are used to call
+// the API.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetCallerIdentity for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ return out, req.Send()
+}
+
+// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetCallerIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetFederationToken = "GetFederationToken"
+
+// GetFederationTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetFederationToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetFederationToken for more information on using the GetFederationToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetFederationTokenRequest method.
+// req, resp := client.GetFederationTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
+ op := &request.Operation{
+ Name: opGetFederationToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetFederationTokenInput{}
+ }
+
+ output = &GetFederationTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetFederationToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) for a federated user.
+// A typical use is in a proxy application that gets temporary security credentials
+// on behalf of distributed applications inside a corporate network. Because
+// you must call the GetFederationToken action using the long-term security
+// credentials of an IAM user, this call is appropriate in contexts where those
+// credentials can be safely stored, usually in a server-based application.
+// For a comparison of GetFederationToken with the other APIs that produce temporary
+// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// If you are creating a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider, we recommend that you
+// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// The GetFederationToken action must be called by using the long-term AWS security
+// credentials of an IAM user. You can also call GetFederationToken using the
+// security credentials of an AWS root account, but we do not recommended it.
+// Instead, we recommend that you create an IAM user for the purpose of the
+// proxy application and then attach a policy to the IAM user that limits federated
+// users to only the actions and resources that they need access to. For more
+// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials that are obtained by using the long-term
+// credentials of an IAM user are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default
+// is 43200 seconds (12 hours). Temporary credentials that are obtained by using
+// AWS root account credentials have a maximum duration of 3600 seconds (1 hour).
+//
+// The temporary security credentials created by GetFederationToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot use these credentials to call any IAM APIs.
+//
+// * You cannot call any STS APIs except GetCallerIdentity.
+//
+// Permissions
+//
+// The permissions for the temporary security credentials returned by GetFederationToken
+// are determined by a combination of the following:
+//
+// * The policy or policies that are attached to the IAM user whose credentials
+// are used to call GetFederationToken.
+//
+// * The policy that is passed as a parameter in the call.
+//
+// The passed policy is attached to the temporary security credentials that
+// result from the GetFederationToken API call--that is, to the federated user.
+// When the federated user makes an AWS request, AWS evaluates the policy attached
+// to the federated user in combination with the policy or policies attached
+// to the IAM user whose credentials were used to call GetFederationToken. AWS
+// allows the federated user's request only when both the federated user and
+// the IAM user are explicitly allowed to perform the requested action. The
+// passed policy cannot grant more permissions than those that are defined in
+// the IAM user policy.
+//
+// A typical use case is that the permissions of the IAM user whose credentials
+// are used to call GetFederationToken are designed to allow access to all the
+// actions and resources that any federated user will need. Then, for individual
+// users, you pass a policy to the operation that scopes down the permissions
+// to a level that's appropriate to that individual user, using a policy that
+// allows only a subset of permissions that are granted to the IAM user.
+//
+// If you do not pass a policy, the resulting temporary security credentials
+// have no effective permissions. The only exception is when the temporary security
+// credentials are used to access a resource that has a resource-based policy
+// that specifically allows the federated user to access the resource.
+//
+// For more information about how permissions work, see Permissions for GetFederationToken
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+// For information about using GetFederationToken to create temporary security
+// credentials, see GetFederationToken—Federation Through a Custom Identity
+// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetFederationToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetFederationTokenWithContext is the same as GetFederationToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetFederationToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetSessionToken = "GetSessionToken"
+
+// GetSessionTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetSessionToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetSessionToken for more information on using the GetSessionToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetSessionTokenRequest method.
+// req, resp := client.GetSessionTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
+ op := &request.Operation{
+ Name: opGetSessionToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetSessionTokenInput{}
+ }
+
+ output = &GetSessionTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetSessionToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
+// IAM users would need to call GetSessionToken and submit an MFA code that
+// is associated with their MFA device. Using the temporary security credentials
+// that are returned from the call, IAM users can then make programmatic calls
+// to APIs that require MFA authentication. If you do not supply a correct MFA
+// code, then the API returns an access denied error. For a comparison of GetSessionToken
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The GetSessionToken action must be called by using the long-term AWS security
+// credentials of the AWS account or an IAM user. Credentials that are created
+// by IAM users are valid for the duration that you specify, from 900 seconds
+// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default
+// of 43200 seconds (12 hours); credentials that are created by using account
+// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600
+// seconds (1 hour), with a default of 1 hour.
+//
+// The temporary security credentials created by GetSessionToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot call any IAM APIs unless MFA authentication information is
+// included in the request.
+//
+// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity.
+//
+// We recommend that you do not call GetSessionToken with root account credentials.
+// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions,
+// and using IAM users for everyday interaction with AWS.
+//
+// The permissions associated with the temporary security credentials returned
+// by GetSessionToken are based on the permissions associated with account or
+// IAM user whose credentials are used to call the action. If GetSessionToken
+// is called using root account credentials, the temporary credentials have
+// root account permissions. Similarly, if GetSessionToken is called using the
+// credentials of an IAM user, the temporary credentials have the same permissions
+// as the IAM user.
+//
+// For more information about using GetSessionToken to create temporary credentials,
+// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetSessionToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetSessionTokenWithContext is the same as GetSessionToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetSessionToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+type AssumeRoleInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // A unique identifier that is used by third parties when assuming roles in
+ // their customers' accounts. For each role that the third party can assume,
+ // they should instruct their customers to ensure the role's trust policy checks
+ // for the external ID that the third party generated. Each time the third party
+ // assumes the role, they should pass the customer's external ID. The external
+ // ID is useful in order to help third parties bind a role to the customer who
+ // created it. For more information about the external ID, see How to Use an
+ // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validated this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ ExternalId *string `min:"2" type:"string"`
+
+ // An IAM policy in JSON format.
+ //
+ // This parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both (the intersection of) the access policy of the role that
+ // is being assumed, and the policy that you pass. This gives you a way to further
+ // restrict the permissions for the resulting temporary security credentials.
+ // You cannot use the passed policy to grant permissions that are in excess
+ // of those allowed by the access policy of the role that is being assumed.
+ // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+ // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role to assume.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identify a session when the same role
+ // is assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the
+ // account that owns the role. The role session name is also used in the ARN
+ // of the assumed role principal. This means that subsequent cross-account API
+ // requests using the temporary security credentials will expose the role session
+ // name to the external account in their CloudTrail logs.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.ExternalId != nil && len(*s.ExternalId) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetExternalId sets the ExternalId field's value.
+func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
+ s.ExternalId = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
+ s.Policy = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+type AssumeRoleOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AssumeRoleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+type AssumeRoleWithSAMLInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. Your role session lasts for
+ // the duration that you specify for the DurationSeconds parameter, or until
+ // the time specified in the SAML authentication response's SessionNotOnOrAfter
+ // value, whichever is shorter. You can provide a DurationSeconds value from
+ // 900 seconds (15 minutes) up to the maximum session duration setting for the
+ // role. This setting can have a value from 1 hour to 12 hours. If you specify
+ // a value higher than this setting, the operation fails. For example, if you
+ // specify a session duration of 12 hours, but your administrator set the maximum
+ // session duration to 6 hours, your operation fails. To learn how to view the
+ // maximum value for your role, see View the Maximum Session Duration Setting
+ // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
+ // the IdP.
+ //
+ // PrincipalArn is a required field
+ PrincipalArn *string `min:"20" type:"string" required:"true"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // The base-64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+ // in the Using IAM guide.
+ //
+ // SAMLAssertion is a required field
+ SAMLAssertion *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithSAMLInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.PrincipalArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
+ }
+ if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.SAMLAssertion == nil {
+ invalidParams.Add(request.NewErrParamRequired("SAMLAssertion"))
+ }
+ if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPrincipalArn sets the PrincipalArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
+ s.PrincipalArn = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetSAMLAssertion sets the SAMLAssertion field's value.
+func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
+ s.SAMLAssertion = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithSAMLOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The value of the Recipient attribute of the SubjectConfirmationData element
+ // of the SAML assertion.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // The value of the Issuer element of the SAML assertion.
+ Issuer *string `type:"string"`
+
+ // A hash value based on the concatenation of the Issuer response value, the
+ // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
+ // provider in IAM. The combination of NameQualifier and Subject can be used
+ // to uniquely identify a federated user.
+ //
+ // The following pseudocode shows how the hash value is calculated:
+ //
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+ // ) )
+ NameQualifier *string `type:"string"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The value of the NameID element in the Subject element of the SAML assertion.
+ Subject *string `type:"string"`
+
+ // The format of the name ID, as defined by the Format attribute in the NameID
+ // element of the SAML assertion. Typical examples of the format are transient
+ // or persistent.
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+ // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+ // is returned as transient. If the format includes any other prefix, the format
+ // is returned with no modifications.
+ SubjectType *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
+ s.Issuer = &v
+ return s
+}
+
+// SetNameQualifier sets the NameQualifier field's value.
+func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
+ s.NameQualifier = &v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetSubject sets the Subject field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
+ s.Subject = &v
+ return s
+}
+
+// SetSubjectType sets the SubjectType field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
+ s.SubjectType = &v
+ return s
+}
+
+type AssumeRoleWithWebIdentityInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format.
+ //
+ // The policy parameter is optional. If you pass a policy, the temporary security
+ // credentials that are returned by the operation have the permissions that
+ // are allowed by both the access policy of the role that is being assumed,
+ // and the policy that you pass. This gives you a way to further restrict the
+ // permissions for the resulting temporary security credentials. You cannot
+ // use the passed policy to grant permissions that are in excess of those allowed
+ // by the access policy of the role that is being assumed. For more information,
+ // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+ // in the IAM User Guide.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string `min:"1" type:"string"`
+
+ // The fully qualified host component of the domain name of the identity provider.
+ //
+ // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+ // and graph.facebook.com are the only supported identity providers for OAuth
+ // 2.0 access tokens. Do not include URL schemes and port numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
+ ProviderId *string `min:"4" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session. Typically, you pass the name
+ // or identifier that is associated with the user who is using your application.
+ // That way, the temporary security credentials that your application will use
+ // are associated with that user. This session name is included as part of the
+ // ARN and assumed role ID in the AssumedRoleUser response element.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
+ // the identity provider. Your application must get this token by authenticating
+ // the user who is using your application with a web identity provider before
+ // the application makes an AssumeRoleWithWebIdentity call.
+ //
+ // WebIdentityToken is a required field
+ WebIdentityToken *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithWebIdentityInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.ProviderId != nil && len(*s.ProviderId) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.WebIdentityToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("WebIdentityToken"))
+ }
+ if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
+ s.Policy = &v
+ return s
+}
+
+// SetProviderId sets the ProviderId field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
+ s.ProviderId = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetWebIdentityToken sets the WebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
+ s.WebIdentityToken = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithWebIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The intended audience (also known as client ID) of the web identity token.
+ // This is traditionally the client identifier issued to the application that
+ // requested the web identity token.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The issuing authority of the web identity token presented. For OpenID Connect
+ // ID Tokens this contains the value of the iss field. For OAuth 2.0 access
+ // tokens, this contains the value of the ProviderId parameter that was passed
+ // in the AssumeRoleWithWebIdentity request.
+ Provider *string `type:"string"`
+
+ // The unique user identifier that is returned by the identity provider. This
+ // identifier is associated with the WebIdentityToken that was submitted with
+ // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
+ // the user and the application that acquired the WebIdentityToken (pairwise
+ // identifier). For OpenID Connect ID tokens, this field contains the value
+ // returned by the identity provider as the token's sub (Subject) claim.
+ SubjectFromWebIdentityToken *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetProvider sets the Provider field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Provider = &v
+ return s
+}
+
+// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
+ s.SubjectFromWebIdentityToken = &v
+ return s
+}
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+type AssumedRoleUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the temporary security credentials that are returned from the
+ // AssumeRole action. For more information about ARNs and how to use them in
+ // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // A unique identifier that contains the role ID and the role session name of
+ // the role that is being assumed. The role ID is generated by AWS when the
+ // role is created.
+ //
+ // AssumedRoleId is a required field
+ AssumedRoleId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumedRoleUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumedRoleUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
+ s.Arn = &v
+ return s
+}
+
+// SetAssumedRoleId sets the AssumedRoleId field's value.
+func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
+ s.AssumedRoleId = &v
+ return s
+}
+
+// AWS credentials for API authentication.
+type Credentials struct {
+ _ struct{} `type:"structure"`
+
+ // The access key ID that identifies the temporary security credentials.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `min:"16" type:"string" required:"true"`
+
+ // The date on which the current credentials expire.
+ //
+ // Expiration is a required field
+ Expiration *time.Time `type:"timestamp" required:"true"`
+
+ // The secret access key that can be used to sign requests.
+ //
+ // SecretAccessKey is a required field
+ SecretAccessKey *string `type:"string" required:"true"`
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ //
+ // SessionToken is a required field
+ SessionToken *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Credentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Credentials) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *Credentials) SetAccessKeyId(v string) *Credentials {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Credentials) SetExpiration(v time.Time) *Credentials {
+ s.Expiration = &v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *Credentials) SetSessionToken(v string) *Credentials {
+ s.SessionToken = &v
+ return s
+}
+
+type DecodeAuthorizationMessageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The encoded message that was returned with the response.
+ //
+ // EncodedMessage is a required field
+ EncodedMessage *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DecodeAuthorizationMessageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
+ if s.EncodedMessage == nil {
+ invalidParams.Add(request.NewErrParamRequired("EncodedMessage"))
+ }
+ if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEncodedMessage sets the EncodedMessage field's value.
+func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
+ s.EncodedMessage = &v
+ return s
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+type DecodeAuthorizationMessageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An XML document that contains the decoded message.
+ DecodedMessage *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageOutput) GoString() string {
+ return s.String()
+}
+
+// SetDecodedMessage sets the DecodedMessage field's value.
+func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
+ s.DecodedMessage = &v
+ return s
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+type FederatedUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN that specifies the federated user that is associated with the credentials.
+ // For more information about ARNs and how to use them in policies, see IAM
+ // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // The string that identifies the federated user associated with the credentials,
+ // similar to the unique ID of an IAM user.
+ //
+ // FederatedUserId is a required field
+ FederatedUserId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s FederatedUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FederatedUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *FederatedUser) SetArn(v string) *FederatedUser {
+ s.Arn = &v
+ return s
+}
+
+// SetFederatedUserId sets the FederatedUserId field's value.
+func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
+ s.FederatedUserId = &v
+ return s
+}
+
+type GetCallerIdentityInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful GetCallerIdentity request, including
+// information about the entity making the request.
+type GetCallerIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID number of the account that owns or contains the calling
+ // entity.
+ Account *string `type:"string"`
+
+ // The AWS ARN associated with the calling entity.
+ Arn *string `min:"20" type:"string"`
+
+ // The unique identifier of the calling entity. The exact value depends on the
+ // type of entity making the call. The values returned are those listed in the
+ // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
+ // found on the Policy Variables reference page in the IAM User Guide.
+ UserId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccount sets the Account field's value.
+func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
+ s.Account = &v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
+ s.Arn = &v
+ return s
+}
+
+// SetUserId sets the UserId field's value.
+func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
+ s.UserId = &v
+ return s
+}
+
+type GetFederationTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the session should last. Acceptable durations
+ // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
+ // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
+ // using AWS account (root) credentials are restricted to a maximum of 3600
+ // seconds (one hour). If the specified duration is longer than one hour, the
+ // session obtained by using AWS account (root) credentials defaults to one
+ // hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The name of the federated user. The name is used as an identifier for the
+ // temporary security credentials (such as Bob). For example, you can reference
+ // the federated user name in a resource-based policy, such as in an Amazon
+ // S3 bucket policy.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // Name is a required field
+ Name *string `min:"2" type:"string" required:"true"`
+
+ // An IAM policy in JSON format that is passed with the GetFederationToken call
+ // and evaluated along with the policy or policies that are attached to the
+ // IAM user whose credentials are used to call GetFederationToken. The passed
+ // policy is used to scope down the permissions that are available to the IAM
+ // user, by allowing only a subset of the permissions that are granted to the
+ // IAM user. The passed policy cannot grant more permissions than those granted
+ // to the IAM user. The final permissions for the federated user are the most
+ // restrictive set based on the intersection of the passed policy and the IAM
+ // user policy.
+ //
+ // If you do not pass a policy, the resulting temporary security credentials
+ // have no effective permissions. The only exception is when the temporary security
+ // credentials are used to access a resource that has a resource-based policy
+ // that specifically allows the federated user to access the resource.
+ //
+ // The format for this parameter, as described by its regex pattern, is a string
+ // of characters up to 2048 characters in length. The characters can be any
+ // ASCII character from the space character to the end of the valid character
+ // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ //
+ // For more information about how permissions work, see Permissions for GetFederationToken
+ // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+ Policy *string `min:"1" type:"string"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetFederationTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Name == nil {
+ invalidParams.Add(request.NewErrParamRequired("Name"))
+ }
+ if s.Name != nil && len(*s.Name) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("Name", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
+ s.Name = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
+ s.Policy = &v
+ return s
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetFederationTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+
+ // Identifiers for the federated user associated with the credentials (such
+ // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
+ // can use the federated user's ARN in your resource-based policies, such as
+ // an Amazon S3 bucket policy.
+ FederatedUser *FederatedUser `type:"structure"`
+
+ // A percentage value indicating the size of the policy in packed form. The
+ // service rejects policies for which the packed size is greater than 100 percent
+ // of the allowed value.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetFederatedUser sets the FederatedUser field's value.
+func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
+ s.FederatedUser = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+type GetSessionTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the credentials should remain valid. Acceptable
+ // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
+ // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
+ // for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
+ // If the duration is longer than one hour, the session for AWS account owners
+ // defaults to one hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The identification number of the MFA device that is associated with the IAM
+ // user who is making the GetSessionToken call. Specify this value if the IAM
+ // user has a policy that requires MFA authentication. The value is either the
+ // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
+ // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ // You can find the device for an IAM user by going to the AWS Management Console
+ // and viewing the user's security credentials.
+ //
+ // The regex used to validated this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if MFA is required. If any policy requires
+ // the IAM user to submit an MFA code, specify this value. If MFA authentication
+ // is required, and the user does not provide a code when requesting a set of
+ // temporary security credentials, the user will receive an "access denied"
+ // response when requesting resources that require MFA authentication.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetSessionTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetSessionTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // Note: The size of the security token that STS APIs return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size. As
+ // of this writing, the typical size is less than 4096 bytes, but that can vary.
+ // Also, future updates to AWS might require larger sizes.
+ Credentials *Credentials `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
+ s.Credentials = v
+ return s
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
new file mode 100644
index 00000000000..ef681ab0c63
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -0,0 +1,72 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sts provides the client and types for making API
+// requests to AWS Security Token Service.
+//
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+// As an alternative to using the API, you can use one of the AWS SDKs, which
+// consist of libraries and sample code for various programming languages and
+// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
+// way to create programmatic access to STS. For example, the SDKs take care
+// of cryptographically signing requests, managing errors, and retrying requests
+// automatically. For information about the AWS SDKs, including how to download
+// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
+//
+// For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
+// in the AWS General Reference. For general information about the Query API,
+// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
+// in Using IAM. For information about using security tokens with other AWS
+// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the IAM User Guide.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/).
+//
+// Endpoints
+//
+// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
+// that maps to the US East (N. Virginia) region. Additional regions are available
+// and are activated by default. For more information, see Activating and Deactivating
+// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
+// in the AWS General Reference.
+//
+// Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on. To learn more about CloudTrail, including how to turn it on and find
+// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
+//
+// See sts package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
+//
+// Using the Client
+//
+// To contact AWS Security Token Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Security Token Service client STS for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
+package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
new file mode 100644
index 00000000000..e24884ef371
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -0,0 +1,73 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+const (
+
+ // ErrCodeExpiredTokenException for service response error code
+ // "ExpiredTokenException".
+ //
+ // The web identity token that was passed is expired or is not valid. Get a
+ // new identity token from the identity provider and then retry the request.
+ ErrCodeExpiredTokenException = "ExpiredTokenException"
+
+ // ErrCodeIDPCommunicationErrorException for service response error code
+ // "IDPCommunicationError".
+ //
+ // The request could not be fulfilled because the non-AWS identity provider
+ // (IDP) that was asked to verify the incoming identity token could not be reached.
+ // This is often a transient error caused by network conditions. Retry the request
+ // a limited number of times so that you don't exceed the request rate. If the
+ // error persists, the non-AWS identity provider might be down or not responding.
+ ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
+
+ // ErrCodeIDPRejectedClaimException for service response error code
+ // "IDPRejectedClaim".
+ //
+ // The identity provider (IdP) reported that authentication failed. This might
+ // be because the claim is invalid.
+ //
+ // If this error is returned for the AssumeRoleWithWebIdentity operation, it
+ // can also mean that the claim has expired or has been explicitly revoked.
+ ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
+
+ // ErrCodeInvalidAuthorizationMessageException for service response error code
+ // "InvalidAuthorizationMessageException".
+ //
+ // The error returned if the message passed to DecodeAuthorizationMessage was
+ // invalid. This can happen if the token contains invalid characters, such as
+ // linebreaks.
+ ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
+
+ // ErrCodeInvalidIdentityTokenException for service response error code
+ // "InvalidIdentityToken".
+ //
+ // The web identity token that was passed could not be validated by AWS. Get
+ // a new identity token from the identity provider and then retry the request.
+ ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
+
+ // ErrCodeMalformedPolicyDocumentException for service response error code
+ // "MalformedPolicyDocument".
+ //
+ // The request was rejected because the policy document was malformed. The error
+ // message describes the specific error.
+ ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
+
+ // ErrCodePackedPolicyTooLargeException for service response error code
+ // "PackedPolicyTooLarge".
+ //
+ // The request was rejected because the policy document was too large. The error
+ // message describes how big the policy document is, in packed form, as a percentage
+ // of what the API allows.
+ ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
+
+ // ErrCodeRegionDisabledException for service response error code
+ // "RegionDisabledException".
+ //
+ // STS is not activated in the requested region for the account that is being
+ // asked to generate credentials. The account administrator must use the IAM
+ // console to activate STS in that region. For more information, see Activating
+ // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+ // in the IAM User Guide.
+ ErrCodeRegionDisabledException = "RegionDisabledException"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
new file mode 100644
index 00000000000..185c914d1b3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -0,0 +1,95 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+)
+
+// STS provides the API operation methods for making requests to
+// AWS Security Token Service. See this package's package overview docs
+// for details on the service.
+//
+// STS methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type STS struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "sts" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "STS" // ServiceID is a unique identifer of a specific service.
+)
+
+// New creates a new instance of the STS client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a STS client from just a session.
+// svc := sts.New(mySession)
+//
+// // Create a STS client with additional configuration
+// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
+ svc := &STS{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2011-06-15",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(query.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a STS operation and runs any
+// custom request initialization.
+func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
new file mode 100644
index 00000000000..e068e731ea7
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
\ No newline at end of file
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
new file mode 100644
index 00000000000..2f12e428ede
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
@@ -0,0 +1,356 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/agent/common/v1/common.proto
+
+package v1
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type LibraryInfo_Language int32
+
+const (
+ LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0
+ LibraryInfo_CPP LibraryInfo_Language = 1
+ LibraryInfo_C_SHARP LibraryInfo_Language = 2
+ LibraryInfo_ERLANG LibraryInfo_Language = 3
+ LibraryInfo_GO_LANG LibraryInfo_Language = 4
+ LibraryInfo_JAVA LibraryInfo_Language = 5
+ LibraryInfo_NODE_JS LibraryInfo_Language = 6
+ LibraryInfo_PHP LibraryInfo_Language = 7
+ LibraryInfo_PYTHON LibraryInfo_Language = 8
+ LibraryInfo_RUBY LibraryInfo_Language = 9
+)
+
+var LibraryInfo_Language_name = map[int32]string{
+ 0: "LANGUAGE_UNSPECIFIED",
+ 1: "CPP",
+ 2: "C_SHARP",
+ 3: "ERLANG",
+ 4: "GO_LANG",
+ 5: "JAVA",
+ 6: "NODE_JS",
+ 7: "PHP",
+ 8: "PYTHON",
+ 9: "RUBY",
+}
+
+var LibraryInfo_Language_value = map[string]int32{
+ "LANGUAGE_UNSPECIFIED": 0,
+ "CPP": 1,
+ "C_SHARP": 2,
+ "ERLANG": 3,
+ "GO_LANG": 4,
+ "JAVA": 5,
+ "NODE_JS": 6,
+ "PHP": 7,
+ "PYTHON": 8,
+ "RUBY": 9,
+}
+
+func (x LibraryInfo_Language) String() string {
+ return proto.EnumName(LibraryInfo_Language_name, int32(x))
+}
+
+func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{2, 0}
+}
+
+// Identifier metadata of the Node (Application instrumented with OpenCensus)
+// that connects to OpenCensus Agent.
+// In the future we plan to extend the identifier proto definition to support
+// additional information (e.g cloud id, etc.)
+type Node struct {
+ // Identifier that uniquely identifies a process within a VM/container.
+ Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
+ // Information on the OpenCensus Library that initiates the stream.
+ LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"`
+ // Additional information on service.
+ ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"`
+ // Additional attributes.
+ Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Node) Reset() { *m = Node{} }
+func (m *Node) String() string { return proto.CompactTextString(m) }
+func (*Node) ProtoMessage() {}
+func (*Node) Descriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{0}
+}
+
+func (m *Node) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Node.Unmarshal(m, b)
+}
+func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Node.Marshal(b, m, deterministic)
+}
+func (m *Node) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Node.Merge(m, src)
+}
+func (m *Node) XXX_Size() int {
+ return xxx_messageInfo_Node.Size(m)
+}
+func (m *Node) XXX_DiscardUnknown() {
+ xxx_messageInfo_Node.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Node proto.InternalMessageInfo
+
+func (m *Node) GetIdentifier() *ProcessIdentifier {
+ if m != nil {
+ return m.Identifier
+ }
+ return nil
+}
+
+func (m *Node) GetLibraryInfo() *LibraryInfo {
+ if m != nil {
+ return m.LibraryInfo
+ }
+ return nil
+}
+
+func (m *Node) GetServiceInfo() *ServiceInfo {
+ if m != nil {
+ return m.ServiceInfo
+ }
+ return nil
+}
+
+func (m *Node) GetAttributes() map[string]string {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+// Identifier that uniquely identifies a process within a VM/container.
+type ProcessIdentifier struct {
+ // The host name. Usually refers to the machine/container name.
+ // For example: os.Hostname() in Go, socket.gethostname() in Python.
+ HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
+ // Process id.
+ Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+ // Start time of this ProcessIdentifier. Represented in epoch time.
+ StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} }
+func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) }
+func (*ProcessIdentifier) ProtoMessage() {}
+func (*ProcessIdentifier) Descriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{1}
+}
+
+func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b)
+}
+func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic)
+}
+func (m *ProcessIdentifier) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProcessIdentifier.Merge(m, src)
+}
+func (m *ProcessIdentifier) XXX_Size() int {
+ return xxx_messageInfo_ProcessIdentifier.Size(m)
+}
+func (m *ProcessIdentifier) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo
+
+func (m *ProcessIdentifier) GetHostName() string {
+ if m != nil {
+ return m.HostName
+ }
+ return ""
+}
+
+func (m *ProcessIdentifier) GetPid() uint32 {
+ if m != nil {
+ return m.Pid
+ }
+ return 0
+}
+
+func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp {
+ if m != nil {
+ return m.StartTimestamp
+ }
+ return nil
+}
+
+// Information on OpenCensus Library.
+type LibraryInfo struct {
+ // Language of OpenCensus Library.
+ Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"`
+ // Version of Agent exporter of Library.
+ ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"`
+ // Version of OpenCensus Library.
+ CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LibraryInfo) Reset() { *m = LibraryInfo{} }
+func (m *LibraryInfo) String() string { return proto.CompactTextString(m) }
+func (*LibraryInfo) ProtoMessage() {}
+func (*LibraryInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{2}
+}
+
+func (m *LibraryInfo) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LibraryInfo.Unmarshal(m, b)
+}
+func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic)
+}
+func (m *LibraryInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LibraryInfo.Merge(m, src)
+}
+func (m *LibraryInfo) XXX_Size() int {
+ return xxx_messageInfo_LibraryInfo.Size(m)
+}
+func (m *LibraryInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_LibraryInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo
+
+func (m *LibraryInfo) GetLanguage() LibraryInfo_Language {
+ if m != nil {
+ return m.Language
+ }
+ return LibraryInfo_LANGUAGE_UNSPECIFIED
+}
+
+func (m *LibraryInfo) GetExporterVersion() string {
+ if m != nil {
+ return m.ExporterVersion
+ }
+ return ""
+}
+
+func (m *LibraryInfo) GetCoreLibraryVersion() string {
+ if m != nil {
+ return m.CoreLibraryVersion
+ }
+ return ""
+}
+
+// Additional service information.
+type ServiceInfo struct {
+ // Name of the service.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ServiceInfo) Reset() { *m = ServiceInfo{} }
+func (m *ServiceInfo) String() string { return proto.CompactTextString(m) }
+func (*ServiceInfo) ProtoMessage() {}
+func (*ServiceInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{3}
+}
+
+func (m *ServiceInfo) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ServiceInfo.Unmarshal(m, b)
+}
+func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic)
+}
+func (m *ServiceInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceInfo.Merge(m, src)
+}
+func (m *ServiceInfo) XXX_Size() int {
+ return xxx_messageInfo_ServiceInfo.Size(m)
+}
+func (m *ServiceInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo
+
+func (m *ServiceInfo) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value)
+ proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node")
+ proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry")
+ proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier")
+ proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo")
+ proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84)
+}
+
+var fileDescriptor_126c72ed8a252c84 = []byte{
+ // 590 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e,
+ 0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee,
+ 0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01,
+ 0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde,
+ 0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9,
+ 0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2,
+ 0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89,
+ 0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9,
+ 0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7,
+ 0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39,
+ 0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b,
+ 0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b,
+ 0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13,
+ 0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06,
+ 0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d,
+ 0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67,
+ 0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2,
+ 0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a,
+ 0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a,
+ 0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76,
+ 0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23,
+ 0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c,
+ 0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c,
+ 0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92,
+ 0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e,
+ 0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51,
+ 0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14,
+ 0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83,
+ 0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0,
+ 0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86,
+ 0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4,
+ 0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd,
+ 0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9,
+ 0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c,
+ 0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70,
+ 0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1,
+ 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
new file mode 100644
index 00000000000..6759ced888b
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
@@ -0,0 +1,1370 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/metrics/v1/metrics.proto
+
+package v1
+
+import (
+ fmt "fmt"
+ v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+ proto "github.com/golang/protobuf/proto"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The kind of metric. It describes how the data is reported.
+//
+// A gauge is an instantaneous measurement of a value.
+//
+// A cumulative measurement is a value accumulated over a time interval. In
+// a time series, cumulative measurements should have the same start time,
+// increasing values and increasing end times, until an event resets the
+// cumulative value to zero and sets a new start time for the following
+// points.
+type MetricDescriptor_Type int32
+
+const (
+ // Do not use this default value.
+ MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0
+ // Integer gauge. The value can go both up and down.
+ MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1
+ // Floating point gauge. The value can go both up and down.
+ MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2
+ // Distribution gauge measurement. The count and sum can go both up and
+ // down. Recorded values are always >= 0.
+ // Used in scenarios like a snapshot of time the current items in a queue
+ // have spent there.
+ MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3
+ // Integer cumulative measurement. The value cannot decrease, if resets
+ // then the start_time should also be reset.
+ MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4
+ // Floating point cumulative measurement. The value cannot decrease, if
+ // resets then the start_time should also be reset. Recorded values are
+ // always >= 0.
+ MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5
+ // Distribution cumulative measurement. The count and sum cannot decrease,
+ // if resets then the start_time should also be reset.
+ MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6
+ // Some frameworks implemented Histograms as a summary of observations
+ // (usually things like request durations and response sizes). While it
+ // also provides a total count of observations and a sum of all observed
+ // values, it calculates configurable percentiles over a sliding time
+ // window. This is not recommended, since it cannot be aggregated.
+ MetricDescriptor_SUMMARY MetricDescriptor_Type = 7
+)
+
+var MetricDescriptor_Type_name = map[int32]string{
+ 0: "UNSPECIFIED",
+ 1: "GAUGE_INT64",
+ 2: "GAUGE_DOUBLE",
+ 3: "GAUGE_DISTRIBUTION",
+ 4: "CUMULATIVE_INT64",
+ 5: "CUMULATIVE_DOUBLE",
+ 6: "CUMULATIVE_DISTRIBUTION",
+ 7: "SUMMARY",
+}
+
+var MetricDescriptor_Type_value = map[string]int32{
+ "UNSPECIFIED": 0,
+ "GAUGE_INT64": 1,
+ "GAUGE_DOUBLE": 2,
+ "GAUGE_DISTRIBUTION": 3,
+ "CUMULATIVE_INT64": 4,
+ "CUMULATIVE_DOUBLE": 5,
+ "CUMULATIVE_DISTRIBUTION": 6,
+ "SUMMARY": 7,
+}
+
+func (x MetricDescriptor_Type) String() string {
+ return proto.EnumName(MetricDescriptor_Type_name, int32(x))
+}
+
+func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{1, 0}
+}
+
+// Defines a Metric which has one or more timeseries.
+type Metric struct {
+ // The descriptor of the Metric. This is an optimization for network wire
+ // size, from data-model perspective a Metric contains always a
+ // MetricDescriptor.
+ //
+ // Types that are valid to be assigned to Descriptor_:
+ // *Metric_MetricDescriptor
+ // *Metric_Name
+ Descriptor_ isMetric_Descriptor_ `protobuf_oneof:"descriptor"`
+ // One or more timeseries for a single metric, where each timeseries has
+ // one or more points.
+ Timeseries []*TimeSeries `protobuf:"bytes,3,rep,name=timeseries,proto3" json:"timeseries,omitempty"`
+ // The resource for the metric. If unset, it may be set to a default value
+ // provided for a sequence of messages in an RPC stream.
+ Resource *v1.Resource `protobuf:"bytes,4,opt,name=resource,proto3" json:"resource,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+func (*Metric) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{0}
+}
+
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Metric.Unmarshal(m, b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+}
+func (m *Metric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metric.Merge(m, src)
+}
+func (m *Metric) XXX_Size() int {
+ return xxx_messageInfo_Metric.Size(m)
+}
+func (m *Metric) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
+
+type isMetric_Descriptor_ interface {
+ isMetric_Descriptor_()
+}
+
+type Metric_MetricDescriptor struct {
+ MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3,oneof"`
+}
+
+type Metric_Name struct {
+ Name string `protobuf:"bytes,2,opt,name=name,proto3,oneof"`
+}
+
+func (*Metric_MetricDescriptor) isMetric_Descriptor_() {}
+
+func (*Metric_Name) isMetric_Descriptor_() {}
+
+func (m *Metric) GetDescriptor_() isMetric_Descriptor_ {
+ if m != nil {
+ return m.Descriptor_
+ }
+ return nil
+}
+
+func (m *Metric) GetMetricDescriptor() *MetricDescriptor {
+ if x, ok := m.GetDescriptor_().(*Metric_MetricDescriptor); ok {
+ return x.MetricDescriptor
+ }
+ return nil
+}
+
+func (m *Metric) GetName() string {
+ if x, ok := m.GetDescriptor_().(*Metric_Name); ok {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Metric) GetTimeseries() []*TimeSeries {
+ if m != nil {
+ return m.Timeseries
+ }
+ return nil
+}
+
+func (m *Metric) GetResource() *v1.Resource {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Metric) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Metric_OneofMarshaler, _Metric_OneofUnmarshaler, _Metric_OneofSizer, []interface{}{
+ (*Metric_MetricDescriptor)(nil),
+ (*Metric_Name)(nil),
+ }
+}
+
+func _Metric_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Metric)
+ // descriptor
+ switch x := m.Descriptor_.(type) {
+ case *Metric_MetricDescriptor:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.MetricDescriptor); err != nil {
+ return err
+ }
+ case *Metric_Name:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.Name)
+ case nil:
+ default:
+ return fmt.Errorf("Metric.Descriptor_ has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Metric_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Metric)
+ switch tag {
+ case 1: // descriptor.metric_descriptor
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(MetricDescriptor)
+ err := b.DecodeMessage(msg)
+ m.Descriptor_ = &Metric_MetricDescriptor{msg}
+ return true, err
+ case 2: // descriptor.name
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Descriptor_ = &Metric_Name{x}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Metric_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Metric)
+ // descriptor
+ switch x := m.Descriptor_.(type) {
+ case *Metric_MetricDescriptor:
+ s := proto.Size(x.MetricDescriptor)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Metric_Name:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.Name)))
+ n += len(x.Name)
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Defines a metric type and its schema.
+type MetricDescriptor struct {
+ // The metric type, including its DNS name prefix. It must be unique.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A detailed description of the metric, which can be used in documentation.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The unit in which the metric value is reported. Follows the format
+ // described by http://unitsofmeasure.org/ucum.html.
+ Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
+ Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"`
+ // The label keys associated with the metric descriptor.
+ LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} }
+func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) }
+func (*MetricDescriptor) ProtoMessage() {}
+func (*MetricDescriptor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{1}
+}
+
+func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b)
+}
+func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic)
+}
+func (m *MetricDescriptor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricDescriptor.Merge(m, src)
+}
+func (m *MetricDescriptor) XXX_Size() int {
+ return xxx_messageInfo_MetricDescriptor.Size(m)
+}
+func (m *MetricDescriptor) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetricDescriptor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo
+
+func (m *MetricDescriptor) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetUnit() string {
+ if m != nil {
+ return m.Unit
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetType() MetricDescriptor_Type {
+ if m != nil {
+ return m.Type
+ }
+ return MetricDescriptor_UNSPECIFIED
+}
+
+func (m *MetricDescriptor) GetLabelKeys() []*LabelKey {
+ if m != nil {
+ return m.LabelKeys
+ }
+ return nil
+}
+
+// Defines a label key associated with a metric descriptor.
+type LabelKey struct {
+ // The key for the label.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // A human-readable description of what this label key represents.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LabelKey) Reset() { *m = LabelKey{} }
+func (m *LabelKey) String() string { return proto.CompactTextString(m) }
+func (*LabelKey) ProtoMessage() {}
+func (*LabelKey) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{2}
+}
+
+func (m *LabelKey) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LabelKey.Unmarshal(m, b)
+}
+func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic)
+}
+func (m *LabelKey) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelKey.Merge(m, src)
+}
+func (m *LabelKey) XXX_Size() int {
+ return xxx_messageInfo_LabelKey.Size(m)
+}
+func (m *LabelKey) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelKey.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelKey proto.InternalMessageInfo
+
+func (m *LabelKey) GetKey() string {
+ if m != nil {
+ return m.Key
+ }
+ return ""
+}
+
+func (m *LabelKey) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+// A collection of data points that describes the time-varying values
+// of a metric.
+type TimeSeries struct {
+ // Must be present for cumulative metrics. The time when the cumulative value
+ // was reset to zero. Exclusive. The cumulative value is over the time interval
+ // (start_timestamp, timestamp]. If not specified, the backend can use the
+ // previous recorded value.
+ StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
+ // The set of label values that uniquely identify this timeseries. Applies to
+ // all points. The order of label values must match that of label keys in the
+ // metric descriptor.
+ LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"`
+ // The data points of this timeseries. Point.value type MUST match the
+ // MetricDescriptor.type.
+ Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TimeSeries) Reset() { *m = TimeSeries{} }
+func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
+func (*TimeSeries) ProtoMessage() {}
+func (*TimeSeries) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{3}
+}
+
+func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TimeSeries.Unmarshal(m, b)
+}
+func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
+}
+func (m *TimeSeries) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeries.Merge(m, src)
+}
+func (m *TimeSeries) XXX_Size() int {
+ return xxx_messageInfo_TimeSeries.Size(m)
+}
+func (m *TimeSeries) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeSeries.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
+
+func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp {
+ if m != nil {
+ return m.StartTimestamp
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetLabelValues() []*LabelValue {
+ if m != nil {
+ return m.LabelValues
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetPoints() []*Point {
+ if m != nil {
+ return m.Points
+ }
+ return nil
+}
+
+type LabelValue struct {
+ // The value for the label.
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ // If false the value field is ignored and considered not set.
+ // This is used to differentiate a missing label from an empty string.
+ HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LabelValue) Reset() { *m = LabelValue{} }
+func (m *LabelValue) String() string { return proto.CompactTextString(m) }
+func (*LabelValue) ProtoMessage() {}
+func (*LabelValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{4}
+}
+
+func (m *LabelValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LabelValue.Unmarshal(m, b)
+}
+func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic)
+}
+func (m *LabelValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelValue.Merge(m, src)
+}
+func (m *LabelValue) XXX_Size() int {
+ return xxx_messageInfo_LabelValue.Size(m)
+}
+func (m *LabelValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelValue proto.InternalMessageInfo
+
+func (m *LabelValue) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+func (m *LabelValue) GetHasValue() bool {
+ if m != nil {
+ return m.HasValue
+ }
+ return false
+}
+
+// A timestamped measurement.
+type Point struct {
+ // The moment when this point was recorded. Inclusive.
+ // If not specified, the timestamp will be decided by the backend.
+ Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // The actual point value.
+ //
+ // Types that are valid to be assigned to Value:
+ // *Point_Int64Value
+ // *Point_DoubleValue
+ // *Point_DistributionValue
+ // *Point_SummaryValue
+ Value isPoint_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Point) Reset() { *m = Point{} }
+func (m *Point) String() string { return proto.CompactTextString(m) }
+func (*Point) ProtoMessage() {}
+func (*Point) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{5}
+}
+
+func (m *Point) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Point.Unmarshal(m, b)
+}
+func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Point.Marshal(b, m, deterministic)
+}
+func (m *Point) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Point.Merge(m, src)
+}
+func (m *Point) XXX_Size() int {
+ return xxx_messageInfo_Point.Size(m)
+}
+func (m *Point) XXX_DiscardUnknown() {
+ xxx_messageInfo_Point.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Point proto.InternalMessageInfo
+
+func (m *Point) GetTimestamp() *timestamp.Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+type isPoint_Value interface {
+ isPoint_Value()
+}
+
+type Point_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Point_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Point_DistributionValue struct {
+ DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"`
+}
+
+type Point_SummaryValue struct {
+ SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"`
+}
+
+func (*Point_Int64Value) isPoint_Value() {}
+
+func (*Point_DoubleValue) isPoint_Value() {}
+
+func (*Point_DistributionValue) isPoint_Value() {}
+
+func (*Point_SummaryValue) isPoint_Value() {}
+
+func (m *Point) GetValue() isPoint_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Point) GetInt64Value() int64 {
+ if x, ok := m.GetValue().(*Point_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (m *Point) GetDoubleValue() float64 {
+ if x, ok := m.GetValue().(*Point_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (m *Point) GetDistributionValue() *DistributionValue {
+ if x, ok := m.GetValue().(*Point_DistributionValue); ok {
+ return x.DistributionValue
+ }
+ return nil
+}
+
+func (m *Point) GetSummaryValue() *SummaryValue {
+ if x, ok := m.GetValue().(*Point_SummaryValue); ok {
+ return x.SummaryValue
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Point) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Point_OneofMarshaler, _Point_OneofUnmarshaler, _Point_OneofSizer, []interface{}{
+ (*Point_Int64Value)(nil),
+ (*Point_DoubleValue)(nil),
+ (*Point_DistributionValue)(nil),
+ (*Point_SummaryValue)(nil),
+ }
+}
+
+func _Point_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Point)
+ // value
+ switch x := m.Value.(type) {
+ case *Point_Int64Value:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Int64Value))
+ case *Point_DoubleValue:
+ b.EncodeVarint(3<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.DoubleValue))
+ case *Point_DistributionValue:
+ b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.DistributionValue); err != nil {
+ return err
+ }
+ case *Point_SummaryValue:
+ b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.SummaryValue); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Point.Value has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Point_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Point)
+ switch tag {
+ case 2: // value.int64_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &Point_Int64Value{int64(x)}
+ return true, err
+ case 3: // value.double_value
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Value = &Point_DoubleValue{math.Float64frombits(x)}
+ return true, err
+ case 4: // value.distribution_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(DistributionValue)
+ err := b.DecodeMessage(msg)
+ m.Value = &Point_DistributionValue{msg}
+ return true, err
+ case 5: // value.summary_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(SummaryValue)
+ err := b.DecodeMessage(msg)
+ m.Value = &Point_SummaryValue{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Point_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Point)
+ // value
+ switch x := m.Value.(type) {
+ case *Point_Int64Value:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Int64Value))
+ case *Point_DoubleValue:
+ n += 1 // tag and wire
+ n += 8
+ case *Point_DistributionValue:
+ s := proto.Size(x.DistributionValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Point_SummaryValue:
+ s := proto.Size(x.SummaryValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Distribution contains summary statistics for a population of values. It
+// optionally contains a histogram representing the distribution of those
+// values across a set of buckets.
+type DistributionValue struct {
+ // The number of values in the population. Must be non-negative. This value
+ // must equal the sum of the values in bucket_counts if a histogram is
+ // provided.
+ Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The sum of the values in the population. If count is zero then this field
+ // must be zero.
+ Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"`
+ // The sum of squared deviations from the mean of the values in the
+ // population. For values x_i this is:
+ //
+ // Sum[i=1..n]((x_i - mean)^2)
+ //
+ // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
+ // describes Welford's method for accumulating this sum in one pass.
+ //
+ // If count is zero then this field must be zero.
+ SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"`
+ // Don't change bucket boundaries within a TimeSeries if your backend doesn't
+ // support this. To save network bandwidth this field can be sent only the
+ // first time a metric is sent when using a streaming RPC.
+ BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"`
+ // If the distribution does not have a histogram, then omit this field.
+ // If there is a histogram, then the sum of the values in the Bucket counts
+ // must equal the value in the count field of the distribution.
+ Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue) Reset() { *m = DistributionValue{} }
+func (m *DistributionValue) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue) ProtoMessage() {}
+func (*DistributionValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6}
+}
+
+func (m *DistributionValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue.Unmarshal(m, b)
+}
+func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue.Merge(m, src)
+}
+func (m *DistributionValue) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue.Size(m)
+}
+func (m *DistributionValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue proto.InternalMessageInfo
+
+func (m *DistributionValue) GetCount() int64 {
+ if m != nil {
+ return m.Count
+ }
+ return 0
+}
+
+func (m *DistributionValue) GetSum() float64 {
+ if m != nil {
+ return m.Sum
+ }
+ return 0
+}
+
+func (m *DistributionValue) GetSumOfSquaredDeviation() float64 {
+ if m != nil {
+ return m.SumOfSquaredDeviation
+ }
+ return 0
+}
+
+func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions {
+ if m != nil {
+ return m.BucketOptions
+ }
+ return nil
+}
+
+func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket {
+ if m != nil {
+ return m.Buckets
+ }
+ return nil
+}
+
+// A Distribution may optionally contain a histogram of the values in the
+// population. The bucket boundaries for that histogram are described by
+// BucketOptions.
+//
+// If bucket_options has no type, then there is no histogram associated with
+// the Distribution.
+type DistributionValue_BucketOptions struct {
+ // Types that are valid to be assigned to Type:
+ // *DistributionValue_BucketOptions_Explicit_
+ Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} }
+func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue_BucketOptions) ProtoMessage() {}
+func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6, 0}
+}
+
+func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b)
+}
+func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src)
+}
+func (m *DistributionValue_BucketOptions) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue_BucketOptions.Size(m)
+}
+func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo
+
+type isDistributionValue_BucketOptions_Type interface {
+ isDistributionValue_BucketOptions_Type()
+}
+
+type DistributionValue_BucketOptions_Explicit_ struct {
+ Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"`
+}
+
+func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {}
+
+func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit {
+ if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok {
+ return x.Explicit
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*DistributionValue_BucketOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _DistributionValue_BucketOptions_OneofMarshaler, _DistributionValue_BucketOptions_OneofUnmarshaler, _DistributionValue_BucketOptions_OneofSizer, []interface{}{
+ (*DistributionValue_BucketOptions_Explicit_)(nil),
+ }
+}
+
+func _DistributionValue_BucketOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*DistributionValue_BucketOptions)
+ // type
+ switch x := m.Type.(type) {
+ case *DistributionValue_BucketOptions_Explicit_:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Explicit); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("DistributionValue_BucketOptions.Type has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _DistributionValue_BucketOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*DistributionValue_BucketOptions)
+ switch tag {
+ case 1: // type.explicit
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(DistributionValue_BucketOptions_Explicit)
+ err := b.DecodeMessage(msg)
+ m.Type = &DistributionValue_BucketOptions_Explicit_{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _DistributionValue_BucketOptions_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*DistributionValue_BucketOptions)
+ // type
+ switch x := m.Type.(type) {
+ case *DistributionValue_BucketOptions_Explicit_:
+ s := proto.Size(x.Explicit)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Specifies a set of buckets with arbitrary upper-bounds.
+// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket
+// index i are:
+//
+// [0, bucket_bounds[i]) for i == 0
+// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1
+// [bucket_bounds[i-1], +infinity) for i == N-1
+type DistributionValue_BucketOptions_Explicit struct {
+ // The values must be strictly increasing and > 0.
+ Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue_BucketOptions_Explicit) Reset() {
+ *m = DistributionValue_BucketOptions_Explicit{}
+}
+func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {}
+func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0}
+}
+
+func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b)
+}
+func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src)
+}
+func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m)
+}
+func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo
+
+func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 {
+ if m != nil {
+ return m.Bounds
+ }
+ return nil
+}
+
+type DistributionValue_Bucket struct {
+ // The number of values in each bucket of the histogram, as described in
+ // bucket_bounds.
+ Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ // If the distribution does not have a histogram, then omit this field.
+ Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} }
+func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue_Bucket) ProtoMessage() {}
+func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6, 1}
+}
+
+func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b)
+}
+func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue_Bucket.Merge(m, src)
+}
+func (m *DistributionValue_Bucket) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue_Bucket.Size(m)
+}
+func (m *DistributionValue_Bucket) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo
+
+func (m *DistributionValue_Bucket) GetCount() int64 {
+ if m != nil {
+ return m.Count
+ }
+ return 0
+}
+
+func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar {
+ if m != nil {
+ return m.Exemplar
+ }
+ return nil
+}
+
+// Exemplars are example points that may be used to annotate aggregated
+// Distribution values. They are metadata that gives information about a
+// particular value added to a Distribution bucket.
+type DistributionValue_Exemplar struct {
+ // Value of the exemplar point. It determines which bucket the exemplar
+ // belongs to.
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ // The observation (sampling) time of the above value.
+ Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // Contextual information about the example value.
+ Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} }
+func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue_Exemplar) ProtoMessage() {}
+func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6, 2}
+}
+
+func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b)
+}
+func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src)
+}
+func (m *DistributionValue_Exemplar) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue_Exemplar.Size(m)
+}
+func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo
+
+func (m *DistributionValue_Exemplar) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+func (m *DistributionValue_Exemplar) GetAttachments() map[string]string {
+ if m != nil {
+ return m.Attachments
+ }
+ return nil
+}
+
+// The start_timestamp only applies to the count and sum in the SummaryValue.
+type SummaryValue struct {
+ // The total number of recorded values since start_time. Optional since
+ // some systems don't expose this.
+ Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The total sum of recorded values since start_time. Optional since some
+ // systems don't expose this. If count is zero then this field must be zero.
+ // This field must be unset if the sum is not available.
+ Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
+ // Values calculated over an arbitrary time window.
+ Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SummaryValue) Reset() { *m = SummaryValue{} }
+func (m *SummaryValue) String() string { return proto.CompactTextString(m) }
+func (*SummaryValue) ProtoMessage() {}
+func (*SummaryValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{7}
+}
+
+func (m *SummaryValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SummaryValue.Unmarshal(m, b)
+}
+func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic)
+}
+func (m *SummaryValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SummaryValue.Merge(m, src)
+}
+func (m *SummaryValue) XXX_Size() int {
+ return xxx_messageInfo_SummaryValue.Size(m)
+}
+func (m *SummaryValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_SummaryValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SummaryValue proto.InternalMessageInfo
+
+func (m *SummaryValue) GetCount() *wrappers.Int64Value {
+ if m != nil {
+ return m.Count
+ }
+ return nil
+}
+
+func (m *SummaryValue) GetSum() *wrappers.DoubleValue {
+ if m != nil {
+ return m.Sum
+ }
+ return nil
+}
+
+func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+// The values in this message can be reset at arbitrary unknown times, with
+// the requirement that all of them are reset at the same time.
+type SummaryValue_Snapshot struct {
+ // The number of values in the snapshot. Optional since some systems don't
+ // expose this.
+ Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The sum of values in the snapshot. Optional since some systems don't
+ // expose this. If count is zero then this field must be zero or not set
+ // (if not supported).
+ Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
+ // A list of values at different percentiles of the distribution calculated
+ // from the current snapshot. The percentiles must be strictly increasing.
+ PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} }
+func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) }
+func (*SummaryValue_Snapshot) ProtoMessage() {}
+func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{7, 0}
+}
+
+func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b)
+}
+func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic)
+}
+func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src)
+}
+func (m *SummaryValue_Snapshot) XXX_Size() int {
+ return xxx_messageInfo_SummaryValue_Snapshot.Size(m)
+}
+func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo
+
+func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value {
+ if m != nil {
+ return m.Count
+ }
+ return nil
+}
+
+func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue {
+ if m != nil {
+ return m.Sum
+ }
+ return nil
+}
+
+func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile {
+ if m != nil {
+ return m.PercentileValues
+ }
+ return nil
+}
+
+// Represents the value at a given percentile of a distribution.
+type SummaryValue_Snapshot_ValueAtPercentile struct {
+ // The percentile of a distribution. Must be in the interval
+ // (0.0, 100.0].
+ Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"`
+ // The value at the given percentile of a distribution.
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() {
+ *m = SummaryValue_Snapshot_ValueAtPercentile{}
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) }
+func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {}
+func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0}
+}
+
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b)
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic)
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src)
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int {
+ return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m)
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() {
+ xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo
+
+func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 {
+ if m != nil {
+ return m.Percentile
+ }
+ return 0
+}
+
+func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value)
+ proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric")
+ proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor")
+ proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey")
+ proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries")
+ proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue")
+ proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point")
+ proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue")
+ proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions")
+ proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit")
+ proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket")
+ proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar")
+ proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry")
+ proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue")
+ proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot")
+ proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a)
+}
+
+var fileDescriptor_0ee3deb72053811a = []byte{
+ // 1114 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5,
+ 0x17, 0xf7, 0xda, 0x89, 0xe3, 0x9c, 0x75, 0xd3, 0xf5, 0x28, 0xed, 0xdf, 0x72, 0xfe, 0x0a, 0x61,
+ 0x11, 0x90, 0x0a, 0x65, 0xad, 0x98, 0xd2, 0x56, 0x15, 0x2a, 0x8a, 0x63, 0x37, 0x36, 0xe4, 0xc3,
+ 0x1a, 0xdb, 0x95, 0x40, 0x48, 0xd6, 0x7a, 0x3d, 0x49, 0x96, 0x78, 0x3f, 0xba, 0x33, 0x6b, 0xf0,
+ 0x0b, 0xf0, 0x08, 0x70, 0xcb, 0x2d, 0xe2, 0x39, 0xb8, 0xe2, 0x09, 0x78, 0x0a, 0x5e, 0x01, 0xed,
+ 0xcc, 0xec, 0x47, 0x62, 0x70, 0x71, 0x91, 0xb8, 0x9b, 0x73, 0xe6, 0xfc, 0x7e, 0x73, 0xbe, 0x77,
+ 0xe1, 0x91, 0xe7, 0x13, 0xd7, 0x22, 0x2e, 0x0d, 0x69, 0xdd, 0x0f, 0x3c, 0xe6, 0xd5, 0x1d, 0xc2,
+ 0x02, 0xdb, 0xa2, 0xf5, 0xd9, 0x61, 0x7c, 0x34, 0xf8, 0x05, 0xda, 0x49, 0x4d, 0x85, 0xc6, 0x88,
+ 0xef, 0x67, 0x87, 0xb5, 0x77, 0xae, 0x3c, 0xef, 0x6a, 0x4a, 0x04, 0xc7, 0x38, 0xbc, 0xac, 0x33,
+ 0xdb, 0x21, 0x94, 0x99, 0x8e, 0x2f, 0x6c, 0x6b, 0xbb, 0x77, 0x0d, 0xbe, 0x0d, 0x4c, 0xdf, 0x27,
+ 0x81, 0xe4, 0xaa, 0x7d, 0xb4, 0xe0, 0x48, 0x40, 0xa8, 0x17, 0x06, 0x16, 0x89, 0x3c, 0x89, 0xcf,
+ 0xc2, 0x58, 0xff, 0x31, 0x0f, 0xc5, 0x33, 0xfe, 0x38, 0xfa, 0x1a, 0x2a, 0xc2, 0x8d, 0xd1, 0x84,
+ 0x50, 0x2b, 0xb0, 0x7d, 0xe6, 0x05, 0x55, 0x65, 0x4f, 0xd9, 0x57, 0x1b, 0x07, 0xc6, 0x12, 0x8f,
+ 0x0d, 0x81, 0x6f, 0x25, 0xa0, 0x4e, 0x0e, 0x6b, 0xce, 0x1d, 0x1d, 0xda, 0x86, 0x35, 0xd7, 0x74,
+ 0x48, 0x35, 0xbf, 0xa7, 0xec, 0x6f, 0x76, 0x72, 0x98, 0x4b, 0xe8, 0x04, 0x80, 0x87, 0x47, 0x02,
+ 0x9b, 0xd0, 0x6a, 0x61, 0xaf, 0xb0, 0xaf, 0x36, 0x3e, 0x5c, 0xfa, 0xd8, 0xc0, 0x76, 0x48, 0x9f,
+ 0x9b, 0xe3, 0x0c, 0x14, 0x35, 0xa1, 0x14, 0x47, 0x56, 0x5d, 0xe3, 0x3e, 0x7f, 0xb0, 0x48, 0x93,
+ 0xc4, 0x3e, 0x3b, 0x34, 0xb0, 0x3c, 0xe3, 0x04, 0xd7, 0x2c, 0x03, 0xa4, 0x91, 0xeb, 0x3f, 0x14,
+ 0x40, 0xbb, 0x1b, 0x19, 0x42, 0x32, 0x8a, 0x28, 0x2d, 0x9b, 0x32, 0x86, 0x3d, 0x50, 0x63, 0x98,
+ 0xed, 0xb9, 0x22, 0x40, 0x9c, 0x55, 0x45, 0xa8, 0xd0, 0xb5, 0x59, 0xb5, 0x20, 0x50, 0xd1, 0x19,
+ 0xbd, 0x84, 0x35, 0x36, 0xf7, 0x85, 0xb3, 0x5b, 0x8d, 0xc6, 0x4a, 0x09, 0x36, 0x06, 0x73, 0x9f,
+ 0x60, 0x8e, 0x47, 0x2d, 0x80, 0xa9, 0x39, 0x26, 0xd3, 0xd1, 0x0d, 0x99, 0xd3, 0xea, 0x3a, 0xcf,
+ 0xe0, 0xfb, 0x4b, 0xd9, 0x4e, 0x23, 0xf3, 0x2f, 0xc8, 0x1c, 0x6f, 0x4e, 0xe5, 0x89, 0xea, 0x3f,
+ 0x2b, 0xb0, 0x16, 0x91, 0xa2, 0xfb, 0xa0, 0x0e, 0xcf, 0xfb, 0xbd, 0xf6, 0x71, 0xf7, 0x65, 0xb7,
+ 0xdd, 0xd2, 0x72, 0x91, 0xe2, 0xe4, 0x68, 0x78, 0xd2, 0x1e, 0x75, 0xcf, 0x07, 0x4f, 0x1e, 0x6b,
+ 0x0a, 0xd2, 0xa0, 0x2c, 0x14, 0xad, 0x8b, 0x61, 0xf3, 0xb4, 0xad, 0xe5, 0xd1, 0x43, 0x40, 0x52,
+ 0xd3, 0xed, 0x0f, 0x70, 0xb7, 0x39, 0x1c, 0x74, 0x2f, 0xce, 0xb5, 0x02, 0xda, 0x06, 0xed, 0x78,
+ 0x78, 0x36, 0x3c, 0x3d, 0x1a, 0x74, 0x5f, 0xc5, 0xf8, 0x35, 0xf4, 0x00, 0x2a, 0x19, 0xad, 0x24,
+ 0x59, 0x47, 0x3b, 0xf0, 0xbf, 0xac, 0x3a, 0xcb, 0x54, 0x44, 0x2a, 0x6c, 0xf4, 0x87, 0x67, 0x67,
+ 0x47, 0xf8, 0x4b, 0x6d, 0x43, 0x7f, 0x01, 0xa5, 0x38, 0x04, 0xa4, 0x41, 0xe1, 0x86, 0xcc, 0x65,
+ 0x39, 0xa2, 0xe3, 0x9b, 0xab, 0xa1, 0xff, 0xae, 0x00, 0xa4, 0x5d, 0x84, 0x8e, 0xe1, 0x3e, 0x65,
+ 0x66, 0xc0, 0x46, 0xc9, 0x9c, 0xc9, 0xa6, 0xaf, 0x19, 0x62, 0xd0, 0x8c, 0x78, 0xd0, 0x78, 0xef,
+ 0x71, 0x0b, 0xbc, 0xc5, 0x21, 0x89, 0x8c, 0x3e, 0x87, 0xb2, 0xa8, 0xc2, 0xcc, 0x9c, 0x86, 0x84,
+ 0x56, 0xf3, 0xff, 0xa0, 0x93, 0x79, 0x10, 0xaf, 0x22, 0x7b, 0xac, 0x4e, 0x93, 0x33, 0x45, 0xcf,
+ 0xa1, 0xe8, 0x7b, 0xb6, 0xcb, 0xe2, 0x79, 0xd0, 0x97, 0xb2, 0xf4, 0x22, 0x53, 0x2c, 0x11, 0xfa,
+ 0x67, 0x00, 0x29, 0x2d, 0xda, 0x86, 0x75, 0xee, 0x8f, 0xcc, 0x8f, 0x10, 0xd0, 0x0e, 0x6c, 0x5e,
+ 0x9b, 0x54, 0x78, 0xca, 0xf3, 0x53, 0xc2, 0xa5, 0x6b, 0x93, 0x72, 0x88, 0xfe, 0x6b, 0x1e, 0xd6,
+ 0x39, 0x25, 0x7a, 0x06, 0x9b, 0xab, 0x64, 0x24, 0x35, 0x46, 0xef, 0x82, 0x6a, 0xbb, 0xec, 0xc9,
+ 0xe3, 0xcc, 0x13, 0x85, 0x4e, 0x0e, 0x03, 0x57, 0x0a, 0xcf, 0xde, 0x83, 0xf2, 0xc4, 0x0b, 0xc7,
+ 0x53, 0x22, 0x6d, 0xa2, 0xc9, 0x50, 0x3a, 0x39, 0xac, 0x0a, 0xad, 0x30, 0x1a, 0x01, 0x9a, 0xd8,
+ 0x94, 0x05, 0xf6, 0x38, 0x8c, 0x0a, 0x27, 0x4d, 0xc5, 0x74, 0x1b, 0x4b, 0x93, 0xd2, 0xca, 0xc0,
+ 0x38, 0x57, 0x27, 0x87, 0x2b, 0x93, 0xbb, 0x4a, 0xd4, 0x83, 0x7b, 0x34, 0x74, 0x1c, 0x33, 0x98,
+ 0x4b, 0xee, 0x75, 0xce, 0xfd, 0x68, 0x29, 0x77, 0x5f, 0x20, 0x62, 0xda, 0x32, 0xcd, 0xc8, 0xcd,
+ 0x0d, 0x99, 0x71, 0xfd, 0xb7, 0x22, 0x54, 0x16, 0xbc, 0x88, 0x0a, 0x62, 0x79, 0xa1, 0xcb, 0x78,
+ 0x3e, 0x0b, 0x58, 0x08, 0x51, 0x13, 0xd3, 0xd0, 0xe1, 0x79, 0x52, 0x70, 0x74, 0x44, 0x4f, 0xa1,
+ 0x4a, 0x43, 0x67, 0xe4, 0x5d, 0x8e, 0xe8, 0xeb, 0xd0, 0x0c, 0xc8, 0x64, 0x34, 0x21, 0x33, 0xdb,
+ 0xe4, 0x1d, 0xcd, 0x53, 0x85, 0x1f, 0xd0, 0xd0, 0xb9, 0xb8, 0xec, 0x8b, 0xdb, 0x56, 0x7c, 0x89,
+ 0x2c, 0xd8, 0x1a, 0x87, 0xd6, 0x0d, 0x61, 0x23, 0x8f, 0x37, 0x3b, 0x95, 0xe9, 0xfa, 0x74, 0xb5,
+ 0x74, 0x19, 0x4d, 0x4e, 0x72, 0x21, 0x38, 0xf0, 0xbd, 0x71, 0x56, 0x44, 0x17, 0xb0, 0x21, 0x14,
+ 0xf1, 0xbe, 0xf9, 0xe4, 0xad, 0xd8, 0x71, 0xcc, 0x52, 0xfb, 0x49, 0x81, 0x7b, 0xb7, 0x5e, 0x44,
+ 0x16, 0x94, 0xc8, 0x77, 0xfe, 0xd4, 0xb6, 0x6c, 0x26, 0x7b, 0xaf, 0xfd, 0x6f, 0x22, 0x30, 0xda,
+ 0x92, 0xac, 0x93, 0xc3, 0x09, 0x71, 0x4d, 0x87, 0x52, 0xac, 0x47, 0x0f, 0xa1, 0x38, 0xf6, 0x42,
+ 0x77, 0x42, 0xab, 0xca, 0x5e, 0x61, 0x5f, 0xc1, 0x52, 0x6a, 0x16, 0xc5, 0x9a, 0xae, 0x51, 0x28,
+ 0x0a, 0xc6, 0xbf, 0xa9, 0x61, 0x3f, 0x72, 0x98, 0x38, 0xfe, 0xd4, 0x0c, 0x78, 0x21, 0xd5, 0xc6,
+ 0xd3, 0x15, 0x1d, 0x6e, 0x4b, 0x38, 0x4e, 0x88, 0x6a, 0xdf, 0xe7, 0x23, 0x0f, 0x85, 0x70, 0x7b,
+ 0x98, 0x95, 0x78, 0x98, 0x6f, 0x4d, 0x69, 0x7e, 0x95, 0x29, 0xfd, 0x06, 0x54, 0x93, 0x31, 0xd3,
+ 0xba, 0x76, 0x48, 0xba, 0x6b, 0x3a, 0x6f, 0xe9, 0xb4, 0x71, 0x94, 0x52, 0xb5, 0x5d, 0x16, 0xcc,
+ 0x71, 0x96, 0xbc, 0xf6, 0x02, 0xb4, 0xbb, 0x06, 0x7f, 0xb1, 0xba, 0x93, 0x08, 0xf3, 0x99, 0x75,
+ 0xf5, 0x3c, 0xff, 0x4c, 0xd1, 0xff, 0x28, 0x40, 0x39, 0x3b, 0x77, 0xe8, 0x30, 0x5b, 0x04, 0xb5,
+ 0xb1, 0xb3, 0x10, 0x72, 0x37, 0xd9, 0x35, 0x71, 0x85, 0x8c, 0x74, 0xca, 0xd4, 0xc6, 0xff, 0x17,
+ 0x00, 0xad, 0x74, 0xf1, 0x88, 0x19, 0x3c, 0x87, 0x12, 0x75, 0x4d, 0x9f, 0x5e, 0x7b, 0xe2, 0xc3,
+ 0xad, 0xbe, 0xe1, 0x23, 0x9d, 0xf5, 0xcf, 0xe8, 0x4b, 0x24, 0x4e, 0x38, 0x6a, 0xbf, 0xe4, 0xa1,
+ 0x14, 0xab, 0xff, 0x0b, 0xff, 0x5f, 0x43, 0xc5, 0x27, 0x81, 0x45, 0x5c, 0x66, 0xc7, 0x6b, 0x36,
+ 0xae, 0x72, 0x6b, 0xf5, 0x40, 0x0c, 0x2e, 0x1e, 0xb1, 0x5e, 0x42, 0x89, 0xb5, 0x94, 0x5e, 0x7c,
+ 0xb9, 0x6a, 0x5d, 0xa8, 0x2c, 0x98, 0xa1, 0x5d, 0x80, 0xd4, 0x50, 0x36, 0x6f, 0x46, 0x73, 0xbb,
+ 0xea, 0x71, 0x5f, 0x37, 0x67, 0xb0, 0x6b, 0x7b, 0xcb, 0xdc, 0x6c, 0x96, 0xc5, 0x5f, 0x11, 0xed,
+ 0x45, 0x17, 0x3d, 0xe5, 0xab, 0xd6, 0x95, 0xcd, 0xae, 0xc3, 0xb1, 0x61, 0x79, 0x4e, 0x5d, 0x60,
+ 0x0e, 0x6c, 0x97, 0xb2, 0x20, 0x8c, 0x7a, 0x8e, 0x6f, 0xc7, 0x7a, 0x4a, 0x77, 0x20, 0x7e, 0x8c,
+ 0xaf, 0x88, 0x7b, 0x70, 0x95, 0xfd, 0x51, 0x1f, 0x17, 0xf9, 0xc5, 0xc7, 0x7f, 0x06, 0x00, 0x00,
+ 0xff, 0xff, 0x24, 0xa6, 0x3d, 0x2b, 0xce, 0x0b, 0x00, 0x00,
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
new file mode 100644
index 00000000000..560dbd94a07
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
@@ -0,0 +1,99 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/resource/v1/resource.proto
+
+package v1
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Resource information.
+type Resource struct {
+ // Type identifier for the resource.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Set of labels that describe the resource.
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Resource) Reset() { *m = Resource{} }
+func (m *Resource) String() string { return proto.CompactTextString(m) }
+func (*Resource) ProtoMessage() {}
+func (*Resource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_584700775a2fc762, []int{0}
+}
+
+func (m *Resource) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Resource.Unmarshal(m, b)
+}
+func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
+}
+func (m *Resource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Resource.Merge(m, src)
+}
+func (m *Resource) XXX_Size() int {
+ return xxx_messageInfo_Resource.Size(m)
+}
+func (m *Resource) XXX_DiscardUnknown() {
+ xxx_messageInfo_Resource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Resource proto.InternalMessageInfo
+
+func (m *Resource) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *Resource) GetLabels() map[string]string {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource")
+ proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762)
+}
+
+var fileDescriptor_584700775a2fc762 = []byte{
+ // 234 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd,
+ 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d,
+ 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08,
+ 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe,
+ 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98,
+ 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1,
+ 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25,
+ 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3,
+ 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12,
+ 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf,
+ 0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19,
+ 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5,
+ 0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99,
+ 0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 00000000000..442c0e0999c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,440 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+package structpb // import "github.com/golang/protobuf/ptypes/struct"
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+ // Null value.
+ NullValue_NULL_VALUE NullValue = 0
+)
+
+var NullValue_name = map[int32]string{
+ 0: "NULL_VALUE",
+}
+var NullValue_value = map[string]int32{
+ "NULL_VALUE": 0,
+}
+
+func (x NullValue) String() string {
+ return proto.EnumName(NullValue_name, int32(x))
+}
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_struct_3a5a94e0c7801b27, []int{0}
+}
+func (NullValue) XXX_WellKnownType() string { return "NullValue" }
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+ // Unordered map of dynamically typed values.
+ Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Struct) Reset() { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage() {}
+func (*Struct) Descriptor() ([]byte, []int) {
+ return fileDescriptor_struct_3a5a94e0c7801b27, []int{0}
+}
+func (*Struct) XXX_WellKnownType() string { return "Struct" }
+func (m *Struct) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Struct.Unmarshal(m, b)
+}
+func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
+}
+func (dst *Struct) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Struct.Merge(dst, src)
+}
+func (m *Struct) XXX_Size() int {
+ return xxx_messageInfo_Struct.Size(m)
+}
+func (m *Struct) XXX_DiscardUnknown() {
+ xxx_messageInfo_Struct.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Struct proto.InternalMessageInfo
+
+func (m *Struct) GetFields() map[string]*Value {
+ if m != nil {
+ return m.Fields
+ }
+ return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+ // The kind of value.
+ //
+ // Types that are valid to be assigned to Kind:
+ // *Value_NullValue
+ // *Value_NumberValue
+ // *Value_StringValue
+ // *Value_BoolValue
+ // *Value_StructValue
+ // *Value_ListValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Value) Reset() { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage() {}
+func (*Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_struct_3a5a94e0c7801b27, []int{1}
+}
+func (*Value) XXX_WellKnownType() string { return "Value" }
+func (m *Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Value.Unmarshal(m, b)
+}
+func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Value.Marshal(b, m, deterministic)
+}
+func (dst *Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Value.Merge(dst, src)
+}
+func (m *Value) XXX_Size() int {
+ return xxx_messageInfo_Value.Size(m)
+}
+func (m *Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Value proto.InternalMessageInfo
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"`
+}
+type Value_NumberValue struct {
+ NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"`
+}
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"`
+}
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"`
+}
+type Value_StructValue struct {
+ StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"`
+}
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+func (*Value_NumberValue) isValue_Kind() {}
+func (*Value_StringValue) isValue_Kind() {}
+func (*Value_BoolValue) isValue_Kind() {}
+func (*Value_StructValue) isValue_Kind() {}
+func (*Value_ListValue) isValue_Kind() {}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (m *Value) GetNullValue() NullValue {
+ if x, ok := m.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return NullValue_NULL_VALUE
+}
+
+func (m *Value) GetNumberValue() float64 {
+ if x, ok := m.GetKind().(*Value_NumberValue); ok {
+ return x.NumberValue
+ }
+ return 0
+}
+
+func (m *Value) GetStringValue() string {
+ if x, ok := m.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (m *Value) GetBoolValue() bool {
+ if x, ok := m.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *Value) GetStructValue() *Struct {
+ if x, ok := m.GetKind().(*Value_StructValue); ok {
+ return x.StructValue
+ }
+ return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+ if x, ok := m.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_NumberValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_StructValue)(nil),
+ (*Value_ListValue)(nil),
+ }
+}
+
+func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Value)
+ // kind
+ switch x := m.Kind.(type) {
+ case *Value_NullValue:
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.NullValue))
+ case *Value_NumberValue:
+ b.EncodeVarint(2<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.NumberValue))
+ case *Value_StringValue:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.StringValue)
+ case *Value_BoolValue:
+ t := uint64(0)
+ if x.BoolValue {
+ t = 1
+ }
+ b.EncodeVarint(4<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *Value_StructValue:
+ b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.StructValue); err != nil {
+ return err
+ }
+ case *Value_ListValue:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ListValue); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Value.Kind has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Value)
+ switch tag {
+ case 1: // kind.null_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_NullValue{NullValue(x)}
+ return true, err
+ case 2: // kind.number_value
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Kind = &Value_NumberValue{math.Float64frombits(x)}
+ return true, err
+ case 3: // kind.string_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Kind = &Value_StringValue{x}
+ return true, err
+ case 4: // kind.bool_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Kind = &Value_BoolValue{x != 0}
+ return true, err
+ case 5: // kind.struct_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Struct)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_StructValue{msg}
+ return true, err
+ case 6: // kind.list_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ListValue)
+ err := b.DecodeMessage(msg)
+ m.Kind = &Value_ListValue{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Value_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Value)
+ // kind
+ switch x := m.Kind.(type) {
+ case *Value_NullValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.NullValue))
+ case *Value_NumberValue:
+ n += 1 // tag and wire
+ n += 8
+ case *Value_StringValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.StringValue)))
+ n += len(x.StringValue)
+ case *Value_BoolValue:
+ n += 1 // tag and wire
+ n += 1
+ case *Value_StructValue:
+ s := proto.Size(x.StructValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Value_ListValue:
+ s := proto.Size(x.ListValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+ // Repeated field of dynamically typed values.
+ Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListValue) Reset() { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage() {}
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_struct_3a5a94e0c7801b27, []int{2}
+}
+func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+func (m *ListValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListValue.Unmarshal(m, b)
+}
+func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
+}
+func (dst *ListValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListValue.Merge(dst, src)
+}
+func (m *ListValue) XXX_Size() int {
+ return xxx_messageInfo_ListValue.Size(m)
+}
+func (m *ListValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListValue proto.InternalMessageInfo
+
+func (m *ListValue) GetValues() []*Value {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+ proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
+ proto.RegisterType((*Value)(nil), "google.protobuf.Value")
+ proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
+ proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
+}
+
+func init() {
+ proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27)
+}
+
+var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{
+ // 417 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+ 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
+ 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
+ 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
+ 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
+ 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
+ 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
+ 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
+ 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
+ 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
+ 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
+ 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
+ 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
+ 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
+ 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
+ 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
+ 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
+ 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
+ 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
+ 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
+ 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
+ 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
+ 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
+ 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
+ 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
+ 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 00000000000..d1fc4d0b8a0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,443 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+package wrappers // import "github.com/golang/protobuf/ptypes/wrappers"
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+ // The double value.
+ Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DoubleValue) Reset() { *m = DoubleValue{} }
+func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
+func (*DoubleValue) ProtoMessage() {}
+func (*DoubleValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_wrappers_16c7c35c009f3253, []int{0}
+}
+func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
+func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
+}
+func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
+}
+func (dst *DoubleValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DoubleValue.Merge(dst, src)
+}
+func (m *DoubleValue) XXX_Size() int {
+ return xxx_messageInfo_DoubleValue.Size(m)
+}
+func (m *DoubleValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_DoubleValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
+
+func (m *DoubleValue) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+ // The float value.
+ Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FloatValue) Reset() { *m = FloatValue{} }
+func (m *FloatValue) String() string { return proto.CompactTextString(m) }
+func (*FloatValue) ProtoMessage() {}
+func (*FloatValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_wrappers_16c7c35c009f3253, []int{1}
+}
+func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
+func (m *FloatValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FloatValue.Unmarshal(m, b)
+}
+func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
+}
+func (dst *FloatValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FloatValue.Merge(dst, src)
+}
+func (m *FloatValue) XXX_Size() int {
+ return xxx_messageInfo_FloatValue.Size(m)
+}
+func (m *FloatValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_FloatValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FloatValue proto.InternalMessageInfo
+
+func (m *FloatValue) GetValue() float32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+ // The int64 value.
+ Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Int64Value) Reset() { *m = Int64Value{} }
+func (m *Int64Value) String() string { return proto.CompactTextString(m) }
+func (*Int64Value) ProtoMessage() {}
+func (*Int64Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_wrappers_16c7c35c009f3253, []int{2}
+}
+func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
+func (m *Int64Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Int64Value.Unmarshal(m, b)
+}
+func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
+}
+func (dst *Int64Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Int64Value.Merge(dst, src)
+}
+func (m *Int64Value) XXX_Size() int {
+ return xxx_messageInfo_Int64Value.Size(m)
+}
+func (m *Int64Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_Int64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int64Value proto.InternalMessageInfo
+
+func (m *Int64Value) GetValue() int64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+ // The uint64 value.
+ Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UInt64Value) Reset() { *m = UInt64Value{} }
+func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
+func (*UInt64Value) ProtoMessage() {}
+func (*UInt64Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_wrappers_16c7c35c009f3253, []int{3}
+}
+func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
+func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
+}
+func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
+}
+func (dst *UInt64Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UInt64Value.Merge(dst, src)
+}
+func (m *UInt64Value) XXX_Size() int {
+ return xxx_messageInfo_UInt64Value.Size(m)
+}
+func (m *UInt64Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_UInt64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
+
+func (m *UInt64Value) GetValue() uint64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+ // The int32 value.
+ Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Int32Value) Reset() { *m = Int32Value{} }
+func (m *Int32Value) String() string { return proto.CompactTextString(m) }
+func (*Int32Value) ProtoMessage() {}
+func (*Int32Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_wrappers_16c7c35c009f3253, []int{4}
+}
+func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
+func (m *Int32Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Int32Value.Unmarshal(m, b)
+}
+func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
+}
+func (dst *Int32Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Int32Value.Merge(dst, src)
+}
+func (m *Int32Value) XXX_Size() int {
+ return xxx_messageInfo_Int32Value.Size(m)
+}
+func (m *Int32Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_Int32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int32Value proto.InternalMessageInfo
+
+func (m *Int32Value) GetValue() int32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+ // The uint32 value.
+ Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UInt32Value) Reset() { *m = UInt32Value{} }
+func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
+func (*UInt32Value) ProtoMessage() {}
+func (*UInt32Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_wrappers_16c7c35c009f3253, []int{5}
+}
+func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
+func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
+}
+func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
+}
+func (dst *UInt32Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UInt32Value.Merge(dst, src)
+}
+func (m *UInt32Value) XXX_Size() int {
+ return xxx_messageInfo_UInt32Value.Size(m)
+}
+func (m *UInt32Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_UInt32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
+
+func (m *UInt32Value) GetValue() uint32 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+ // The bool value.
+ Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BoolValue) Reset() { *m = BoolValue{} }
+func (m *BoolValue) String() string { return proto.CompactTextString(m) }
+func (*BoolValue) ProtoMessage() {}
+func (*BoolValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_wrappers_16c7c35c009f3253, []int{6}
+}
+func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
+func (m *BoolValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BoolValue.Unmarshal(m, b)
+}
+func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
+}
+func (dst *BoolValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BoolValue.Merge(dst, src)
+}
+func (m *BoolValue) XXX_Size() int {
+ return xxx_messageInfo_BoolValue.Size(m)
+}
+func (m *BoolValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_BoolValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BoolValue proto.InternalMessageInfo
+
+func (m *BoolValue) GetValue() bool {
+ if m != nil {
+ return m.Value
+ }
+ return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+ // The string value.
+ Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StringValue) Reset() { *m = StringValue{} }
+func (m *StringValue) String() string { return proto.CompactTextString(m) }
+func (*StringValue) ProtoMessage() {}
+func (*StringValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_wrappers_16c7c35c009f3253, []int{7}
+}
+func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
+func (m *StringValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StringValue.Unmarshal(m, b)
+}
+func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
+}
+func (dst *StringValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StringValue.Merge(dst, src)
+}
+func (m *StringValue) XXX_Size() int {
+ return xxx_messageInfo_StringValue.Size(m)
+}
+func (m *StringValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_StringValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StringValue proto.InternalMessageInfo
+
+func (m *StringValue) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+ // The bytes value.
+ Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BytesValue) Reset() { *m = BytesValue{} }
+func (m *BytesValue) String() string { return proto.CompactTextString(m) }
+func (*BytesValue) ProtoMessage() {}
+func (*BytesValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_wrappers_16c7c35c009f3253, []int{8}
+}
+func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
+func (m *BytesValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BytesValue.Unmarshal(m, b)
+}
+func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
+}
+func (dst *BytesValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BytesValue.Merge(dst, src)
+}
+func (m *BytesValue) XXX_Size() int {
+ return xxx_messageInfo_BytesValue.Size(m)
+}
+func (m *BytesValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_BytesValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesValue proto.InternalMessageInfo
+
+func (m *BytesValue) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
+ proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
+ proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
+ proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
+ proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
+ proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
+ proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
+ proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
+ proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
+}
+
+func init() {
+ proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253)
+}
+
+var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{
+ // 259 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
+ 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
+ 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
+ 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
+ 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
+ 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
+ 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
+ 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
+ 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
+ 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
+ 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
+ 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
+ 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
+ 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
+ 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
+ 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644
index 00000000000..b03310a91fd
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go
new file mode 100644
index 00000000000..8e26ffeecff
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/api.go
@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+ ast ASTNode
+ intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+ return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+ jmespath, err := Compile(expression)
+ if err != nil {
+ panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+ }
+ return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+ return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+ intr := newInterpreter()
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(ast, data)
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100644
index 00000000000..1cd2d239c96
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+ if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+ return fmt.Sprintf("astNodeType(%d)", i)
+ }
+ return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go
new file mode 100644
index 00000000000..9b7cd89b4bc
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/functions.go
@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+ jpUnknown jpType = "unknown"
+ jpNumber jpType = "number"
+ jpString jpType = "string"
+ jpArray jpType = "array"
+ jpObject jpType = "object"
+ jpArrayNumber jpType = "array[number]"
+ jpArrayString jpType = "array[string]"
+ jpExpref jpType = "expref"
+ jpAny jpType = "any"
+)
+
+type functionEntry struct {
+ name string
+ arguments []argSpec
+ handler jpFunction
+ hasExpRef bool
+}
+
+type argSpec struct {
+ types []jpType
+ variadic bool
+}
+
+type byExprString struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprString) Len() int {
+ return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type byExprFloat struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+ return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type functionCaller struct {
+ functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+ caller := &functionCaller{}
+ caller.functionTable = map[string]functionEntry{
+ "length": {
+ name: "length",
+ arguments: []argSpec{
+ {types: []jpType{jpString, jpArray, jpObject}},
+ },
+ handler: jpfLength,
+ },
+ "starts_with": {
+ name: "starts_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfStartsWith,
+ },
+ "abs": {
+ name: "abs",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfAbs,
+ },
+ "avg": {
+ name: "avg",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfAvg,
+ },
+ "ceil": {
+ name: "ceil",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfCeil,
+ },
+ "contains": {
+ name: "contains",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfContains,
+ },
+ "ends_with": {
+ name: "ends_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfEndsWith,
+ },
+ "floor": {
+ name: "floor",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfFloor,
+ },
+ "map": {
+ name: "amp",
+ arguments: []argSpec{
+ {types: []jpType{jpExpref}},
+ {types: []jpType{jpArray}},
+ },
+ handler: jpfMap,
+ hasExpRef: true,
+ },
+ "max": {
+ name: "max",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMax,
+ },
+ "merge": {
+ name: "merge",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}, variadic: true},
+ },
+ handler: jpfMerge,
+ },
+ "max_by": {
+ name: "max_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMaxBy,
+ hasExpRef: true,
+ },
+ "sum": {
+ name: "sum",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfSum,
+ },
+ "min": {
+ name: "min",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMin,
+ },
+ "min_by": {
+ name: "min_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMinBy,
+ hasExpRef: true,
+ },
+ "type": {
+ name: "type",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfType,
+ },
+ "keys": {
+ name: "keys",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfKeys,
+ },
+ "values": {
+ name: "values",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfValues,
+ },
+ "sort": {
+ name: "sort",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayString, jpArrayNumber}},
+ },
+ handler: jpfSort,
+ },
+ "sort_by": {
+ name: "sort_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfSortBy,
+ hasExpRef: true,
+ },
+ "join": {
+ name: "join",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpArrayString}},
+ },
+ handler: jpfJoin,
+ },
+ "reverse": {
+ name: "reverse",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ },
+ handler: jpfReverse,
+ },
+ "to_array": {
+ name: "to_array",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToArray,
+ },
+ "to_string": {
+ name: "to_string",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToString,
+ },
+ "to_number": {
+ name: "to_number",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToNumber,
+ },
+ "not_null": {
+ name: "not_null",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}, variadic: true},
+ },
+ handler: jpfNotNull,
+ },
+ }
+ return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+ if len(e.arguments) == 0 {
+ return arguments, nil
+ }
+ if !e.arguments[len(e.arguments)-1].variadic {
+ if len(e.arguments) != len(arguments) {
+ return nil, errors.New("incorrect number of args")
+ }
+ for i, spec := range e.arguments {
+ userArg := arguments[i]
+ err := spec.typeCheck(userArg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arguments, nil
+ }
+ if len(arguments) < len(e.arguments) {
+ return nil, errors.New("Invalid arity.")
+ }
+ return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+ for _, t := range a.types {
+ switch t {
+ case jpNumber:
+ if _, ok := arg.(float64); ok {
+ return nil
+ }
+ case jpString:
+ if _, ok := arg.(string); ok {
+ return nil
+ }
+ case jpArray:
+ if isSliceType(arg) {
+ return nil
+ }
+ case jpObject:
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil
+ }
+ case jpArrayNumber:
+ if _, ok := toArrayNum(arg); ok {
+ return nil
+ }
+ case jpArrayString:
+ if _, ok := toArrayStr(arg); ok {
+ return nil
+ }
+ case jpAny:
+ return nil
+ case jpExpref:
+ if _, ok := arg.(expRef); ok {
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+ entry, ok := f.functionTable[name]
+ if !ok {
+ return nil, errors.New("unknown function: " + name)
+ }
+ resolvedArgs, err := entry.resolveArgs(arguments)
+ if err != nil {
+ return nil, err
+ }
+ if entry.hasExpRef {
+ var extra []interface{}
+ extra = append(extra, intr)
+ resolvedArgs = append(extra, resolvedArgs...)
+ }
+ return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+ num := arguments[0].(float64)
+ return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if c, ok := arg.(string); ok {
+ return float64(utf8.RuneCountInString(c)), nil
+ } else if isSliceType(arg) {
+ v := reflect.ValueOf(arg)
+ return float64(v.Len()), nil
+ } else if c, ok := arg.(map[string]interface{}); ok {
+ return float64(len(c)), nil
+ }
+ return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ prefix := arguments[1].(string)
+ return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+ // We've already type checked the value so we can safely use
+ // type assertions.
+ args := arguments[0].([]interface{})
+ length := float64(len(args))
+ numerator := 0.0
+ for _, n := range args {
+ numerator += n.(float64)
+ }
+ return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+ search := arguments[0]
+ el := arguments[1]
+ if searchStr, ok := search.(string); ok {
+ if elStr, ok := el.(string); ok {
+ return strings.Index(searchStr, elStr) != -1, nil
+ }
+ return false, nil
+ }
+ // Otherwise this is a generic contains for []interface{}
+ general := search.([]interface{})
+ for _, item := range general {
+ if item == el {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ suffix := arguments[1].(string)
+ return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ exp := arguments[1].(expRef)
+ node := exp.ref
+ arr := arguments[2].([]interface{})
+ mapped := make([]interface{}, 0, len(arr))
+ for _, value := range arr {
+ current, err := intr.Execute(node, value)
+ if err != nil {
+ return nil, err
+ }
+ mapped = append(mapped, current)
+ }
+ return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ // Otherwise we're dealing with a max() of strings.
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+ final := make(map[string]interface{})
+ for _, m := range arguments {
+ mapped := m.(map[string]interface{})
+ for key, value := range mapped {
+ final[key] = value
+ }
+ }
+ return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ switch t := start.(type) {
+ case float64:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ case string:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ default:
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+ items, _ := toArrayNum(arguments[0])
+ sum := 0.0
+ for _, item := range items {
+ sum += item
+ }
+ return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if t, ok := start.(float64); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else if t, ok := start.(string); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if _, ok := arg.(float64); ok {
+ return "number", nil
+ }
+ if _, ok := arg.(string); ok {
+ return "string", nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return "array", nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return "object", nil
+ }
+ if arg == nil {
+ return "null", nil
+ }
+ if arg == true || arg == false {
+ return "boolean", nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for key := range arg {
+ collected = append(collected, key)
+ }
+ return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for _, value := range arg {
+ collected = append(collected, value)
+ }
+ return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ d := sort.Float64Slice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+ }
+ // Otherwise we're dealing with sort()'ing strings.
+ items, _ := toArrayStr(arguments[0])
+ d := sort.StringSlice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return arr, nil
+ } else if len(arr) == 1 {
+ return arr, nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := start.(float64); ok {
+ sortable := &byExprFloat{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else if _, ok := start.(string); ok {
+ sortable := &byExprString{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+ sep := arguments[0].(string)
+ // We can't just do arguments[1].([]string), we have to
+ // manually convert each item to a string.
+ arrayStr := []string{}
+ for _, item := range arguments[1].([]interface{}) {
+ arrayStr = append(arrayStr, item.(string))
+ }
+ return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+ if s, ok := arguments[0].(string); ok {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r), nil
+ }
+ items := arguments[0].([]interface{})
+ length := len(items)
+ reversed := make([]interface{}, length)
+ for i, item := range items {
+ reversed[length-(i+1)] = item
+ }
+ return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+ if _, ok := arguments[0].([]interface{}); ok {
+ return arguments[0], nil
+ }
+ return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+ if v, ok := arguments[0].(string); ok {
+ return v, nil
+ }
+ result, err := json.Marshal(arguments[0])
+ if err != nil {
+ return nil, err
+ }
+ return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if v, ok := arg.(float64); ok {
+ return v, nil
+ }
+ if v, ok := arg.(string); ok {
+ conv, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ return nil, nil
+ }
+ return conv, nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return nil, nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil, nil
+ }
+ if arg == nil {
+ return nil, nil
+ }
+ if arg == true || arg == false {
+ return nil, nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+ for _, arg := range arguments {
+ if arg != nil {
+ return arg, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100644
index 00000000000..13c74604c2c
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go
@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+/* This is a tree based interpreter. It walks the AST and directly
+ interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+ fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+ interpreter := treeInterpreter{}
+ interpreter.fCall = newFunctionCaller()
+ return &interpreter
+}
+
+type expRef struct {
+ ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+ switch node.nodeType {
+ case ASTComparator:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ right, err := intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ switch node.value {
+ case tEQ:
+ return objsEqual(left, right), nil
+ case tNE:
+ return !objsEqual(left, right), nil
+ }
+ leftNum, ok := left.(float64)
+ if !ok {
+ return nil, nil
+ }
+ rightNum, ok := right.(float64)
+ if !ok {
+ return nil, nil
+ }
+ switch node.value {
+ case tGT:
+ return leftNum > rightNum, nil
+ case tGTE:
+ return leftNum >= rightNum, nil
+ case tLT:
+ return leftNum < rightNum, nil
+ case tLTE:
+ return leftNum <= rightNum, nil
+ }
+ case ASTExpRef:
+ return expRef{ref: node.children[0]}, nil
+ case ASTFunctionExpression:
+ resolvedArgs := []interface{}{}
+ for _, arg := range node.children {
+ current, err := intr.Execute(arg, value)
+ if err != nil {
+ return nil, err
+ }
+ resolvedArgs = append(resolvedArgs, current)
+ }
+ return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+ case ASTField:
+ if m, ok := value.(map[string]interface{}); ok {
+ key := node.value.(string)
+ return m[key], nil
+ }
+ return intr.fieldFromStruct(node.value.(string), value)
+ case ASTFilterProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.filterProjectionWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ for _, element := range sliceType {
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+ case ASTFlatten:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ // If we can't type convert to []interface{}, there's
+ // a chance this could still work via reflection if we're
+ // dealing with user provided types.
+ if isSliceType(left) {
+ return intr.flattenWithReflection(left)
+ }
+ return nil, nil
+ }
+ flattened := []interface{}{}
+ for _, element := range sliceType {
+ if elementSlice, ok := element.([]interface{}); ok {
+ flattened = append(flattened, elementSlice...)
+ } else if isSliceType(element) {
+ reflectFlat := []interface{}{}
+ v := reflect.ValueOf(element)
+ for i := 0; i < v.Len(); i++ {
+ reflectFlat = append(reflectFlat, v.Index(i).Interface())
+ }
+ flattened = append(flattened, reflectFlat...)
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+ case ASTIdentity, ASTCurrentNode:
+ return value, nil
+ case ASTIndex:
+ if sliceType, ok := value.([]interface{}); ok {
+ index := node.value.(int)
+ if index < 0 {
+ index += len(sliceType)
+ }
+ if index < len(sliceType) && index >= 0 {
+ return sliceType[index], nil
+ }
+ return nil, nil
+ }
+ // Otherwise try via reflection.
+ rv := reflect.ValueOf(value)
+ if rv.Kind() == reflect.Slice {
+ index := node.value.(int)
+ if index < 0 {
+ index += rv.Len()
+ }
+ if index < rv.Len() && index >= 0 {
+ v := rv.Index(index)
+ return v.Interface(), nil
+ }
+ }
+ return nil, nil
+ case ASTKeyValPair:
+ return intr.Execute(node.children[0], value)
+ case ASTLiteral:
+ return node.value, nil
+ case ASTMultiSelectHash:
+ if value == nil {
+ return nil, nil
+ }
+ collected := make(map[string]interface{})
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ key := child.value.(string)
+ collected[key] = current
+ }
+ return collected, nil
+ case ASTMultiSelectList:
+ if value == nil {
+ return nil, nil
+ }
+ collected := []interface{}{}
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ collected = append(collected, current)
+ }
+ return collected, nil
+ case ASTOrExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ matched, err = intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return matched, nil
+ case ASTAndExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return matched, nil
+ }
+ return intr.Execute(node.children[1], value)
+ case ASTNotExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return true, nil
+ }
+ return false, nil
+ case ASTPipe:
+ result := value
+ var err error
+ for _, child := range node.children {
+ result, err = intr.Execute(child, result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+ case ASTProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.projectWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ collected := []interface{}{}
+ var current interface{}
+ for _, element := range sliceType {
+ current, err = intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ case ASTSubexpression, ASTIndexExpression:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(node.children[1], left)
+ case ASTSlice:
+ sliceType, ok := value.([]interface{})
+ if !ok {
+ if isSliceType(value) {
+ return intr.sliceWithReflection(node, value)
+ }
+ return nil, nil
+ }
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ return slice(sliceType, sliceParams)
+ case ASTValueProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ mapType, ok := left.(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+ values := make([]interface{}, len(mapType))
+ for _, value := range mapType {
+ values = append(values, value)
+ }
+ collected := []interface{}{}
+ for _, element := range values {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ }
+ return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+ rv := reflect.ValueOf(value)
+ first, n := utf8.DecodeRuneInString(key)
+ fieldName := string(unicode.ToUpper(first)) + key[n:]
+ if rv.Kind() == reflect.Struct {
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ } else if rv.Kind() == reflect.Ptr {
+ // Handle multiple levels of indirection?
+ if rv.IsNil() {
+ return nil, nil
+ }
+ rv = rv.Elem()
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ }
+ return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ flattened := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ if reflect.TypeOf(element).Kind() == reflect.Slice {
+ // Then insert the contents of the element
+ // slice into the flattened slice,
+ // i.e flattened = append(flattened, mySlice...)
+ elementV := reflect.ValueOf(element)
+ for j := 0; j < elementV.Len(); j++ {
+ flattened = append(
+ flattened, elementV.Index(j).Interface())
+ }
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ final := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ final = append(final, element)
+ }
+ return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if result != nil {
+ collected = append(collected, result)
+ }
+ }
+ return collected, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go
new file mode 100644
index 00000000000..817900c8f52
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/lexer.go
@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ tokenType tokType
+ value string
+ position int
+ length int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+ expression string // The expression provided by the user.
+ currentPos int // The current position in the string.
+ lastWidth int // The width of the current rune. This
+ buf bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+ msg string // Error message displayed to user
+ Expression string // Expression that generated a SyntaxError
+ Offset int // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+ // In the future, it would be good to underline the specific
+ // location where the error occurred.
+ return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+ return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+ tUnknown tokType = iota
+ tStar
+ tDot
+ tFilter
+ tFlatten
+ tLparen
+ tRparen
+ tLbracket
+ tRbracket
+ tLbrace
+ tRbrace
+ tOr
+ tPipe
+ tNumber
+ tUnquotedIdentifier
+ tQuotedIdentifier
+ tComma
+ tColon
+ tLT
+ tLTE
+ tGT
+ tGTE
+ tEQ
+ tNE
+ tJSONLiteral
+ tStringLiteral
+ tCurrent
+ tExpref
+ tAnd
+ tNot
+ tEOF
+)
+
+var basicTokens = map[rune]tokType{
+ '.': tDot,
+ '*': tStar,
+ ',': tComma,
+ ':': tColon,
+ '{': tLbrace,
+ '}': tRbrace,
+ ']': tRbracket, // tLbracket not included because it could be "[]"
+ '(': tLparen,
+ ')': tRparen,
+ '@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+ ' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+ return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+ t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+ lexer := Lexer{}
+ return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+ if lexer.currentPos >= len(lexer.expression) {
+ lexer.lastWidth = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+ lexer.lastWidth = w
+ lexer.currentPos += w
+ return r
+}
+
+func (lexer *Lexer) back() {
+ lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+ t := lexer.next()
+ lexer.back()
+ return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+ var tokens []token
+ lexer.expression = expression
+ lexer.currentPos = 0
+ lexer.lastWidth = 0
+loop:
+ for {
+ r := lexer.next()
+ if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+ t := lexer.consumeUnquotedIdentifier()
+ tokens = append(tokens, t)
+ } else if val, ok := basicTokens[r]; ok {
+ // Basic single char token.
+ t := token{
+ tokenType: val,
+ value: string(r),
+ position: lexer.currentPos - lexer.lastWidth,
+ length: 1,
+ }
+ tokens = append(tokens, t)
+ } else if r == '-' || (r >= '0' && r <= '9') {
+ t := lexer.consumeNumber()
+ tokens = append(tokens, t)
+ } else if r == '[' {
+ t := lexer.consumeLBracket()
+ tokens = append(tokens, t)
+ } else if r == '"' {
+ t, err := lexer.consumeQuotedIdentifier()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '\'' {
+ t, err := lexer.consumeRawStringLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '`' {
+ t, err := lexer.consumeLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '|' {
+ t := lexer.matchOrElse(r, '|', tOr, tPipe)
+ tokens = append(tokens, t)
+ } else if r == '<' {
+ t := lexer.matchOrElse(r, '=', tLTE, tLT)
+ tokens = append(tokens, t)
+ } else if r == '>' {
+ t := lexer.matchOrElse(r, '=', tGTE, tGT)
+ tokens = append(tokens, t)
+ } else if r == '!' {
+ t := lexer.matchOrElse(r, '=', tNE, tNot)
+ tokens = append(tokens, t)
+ } else if r == '=' {
+ t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+ tokens = append(tokens, t)
+ } else if r == '&' {
+ t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+ tokens = append(tokens, t)
+ } else if r == eof {
+ break loop
+ } else if _, ok := whiteSpace[r]; ok {
+ // Ignore whitespace
+ } else {
+ return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+ }
+ }
+ tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+ return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+ start := lexer.currentPos
+ current := lexer.next()
+ for current != end && current != eof {
+ if current == '\\' && lexer.peek() != eof {
+ lexer.next()
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return "", SyntaxError{
+ msg: "Unclosed delimiter: " + string(end),
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('`')
+ if err != nil {
+ return token{}, err
+ }
+ value = strings.Replace(value, "\\`", "`", -1)
+ return token{
+ tokenType: tJSONLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+ start := lexer.currentPos
+ currentIndex := start
+ current := lexer.next()
+ for current != '\'' && lexer.peek() != eof {
+ if current == '\\' && lexer.peek() == '\'' {
+ chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+ lexer.buf.WriteString(chunk)
+ lexer.buf.WriteString("'")
+ lexer.next()
+ currentIndex = lexer.currentPos
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return token{}, SyntaxError{
+ msg: "Unclosed delimiter: '",
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ if currentIndex < lexer.currentPos {
+ lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+ }
+ value := lexer.buf.String()
+ // Reset the buffer so it can reused again.
+ lexer.buf.Reset()
+ return token{
+ tokenType: tStringLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: lexer.expression,
+ Offset: lexer.currentPos - 1,
+ }
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == second {
+ t = token{
+ tokenType: matchedType,
+ value: string(first) + string(second),
+ position: start,
+ length: 2,
+ }
+ } else {
+ lexer.back()
+ t = token{
+ tokenType: singleCharType,
+ value: string(first),
+ position: start,
+ length: 1,
+ }
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+ // There's three options here:
+ // 1. A filter expression "[?"
+ // 2. A flatten operator "[]"
+ // 3. A bare rbracket "["
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == '?' {
+ t = token{
+ tokenType: tFilter,
+ value: "[?",
+ position: start,
+ length: 2,
+ }
+ } else if nextRune == ']' {
+ t = token{
+ tokenType: tFlatten,
+ value: "[]",
+ position: start,
+ length: 2,
+ }
+ } else {
+ t = token{
+ tokenType: tLbracket,
+ value: "[",
+ position: start,
+ length: 1,
+ }
+ lexer.back()
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('"')
+ if err != nil {
+ return token{}, err
+ }
+ var decoded string
+ asJSON := []byte("\"" + value + "\"")
+ if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+ return token{}, err
+ }
+ return token{
+ tokenType: tQuotedIdentifier,
+ value: decoded,
+ position: start - 1,
+ length: len(decoded),
+ }, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+ // Consume runes until we reach the end of an unquoted
+ // identifier.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tUnquotedIdentifier,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
+
+func (lexer *Lexer) consumeNumber() token {
+ // Consume runes until we reach something that's not a number.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < '0' || r > '9' {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tNumber,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go
new file mode 100644
index 00000000000..1240a175521
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/parser.go
@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+ ASTEmpty astNodeType = iota
+ ASTComparator
+ ASTCurrentNode
+ ASTExpRef
+ ASTFunctionExpression
+ ASTField
+ ASTFilterProjection
+ ASTFlatten
+ ASTIdentity
+ ASTIndex
+ ASTIndexExpression
+ ASTKeyValPair
+ ASTLiteral
+ ASTMultiSelectHash
+ ASTMultiSelectList
+ ASTOrExpression
+ ASTAndExpression
+ ASTNotExpression
+ ASTPipe
+ ASTProjection
+ ASTSubexpression
+ ASTSlice
+ ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+ nodeType astNodeType
+ value interface{}
+ children []ASTNode
+}
+
+func (node ASTNode) String() string {
+ return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging. You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+ spaces := strings.Repeat(" ", indent)
+ output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+ nextIndent := indent + 2
+ if node.value != nil {
+ if converted, ok := node.value.(fmt.Stringer); ok {
+ // Account for things like comparator nodes
+ // that are enums with a String() method.
+ output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+ } else {
+ output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+ }
+ }
+ lastIndex := len(node.children)
+ if lastIndex > 0 {
+ output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+ childIndent := nextIndent + 2
+ for _, elem := range node.children {
+ output += elem.PrettyPrint(childIndent)
+ }
+ }
+ output += fmt.Sprintf("%s}\n", spaces)
+ return output
+}
+
+var bindingPowers = map[tokType]int{
+ tEOF: 0,
+ tUnquotedIdentifier: 0,
+ tQuotedIdentifier: 0,
+ tRbracket: 0,
+ tRparen: 0,
+ tComma: 0,
+ tRbrace: 0,
+ tNumber: 0,
+ tCurrent: 0,
+ tExpref: 0,
+ tColon: 0,
+ tPipe: 1,
+ tOr: 2,
+ tAnd: 3,
+ tEQ: 5,
+ tLT: 5,
+ tLTE: 5,
+ tGT: 5,
+ tGTE: 5,
+ tNE: 5,
+ tFlatten: 9,
+ tStar: 20,
+ tFilter: 21,
+ tDot: 40,
+ tNot: 45,
+ tLbrace: 50,
+ tLbracket: 55,
+ tLparen: 60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+ expression string
+ tokens []token
+ index int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+ p := Parser{}
+ return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+ lexer := NewLexer()
+ p.expression = expression
+ p.index = 0
+ tokens, err := lexer.tokenize(expression)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ p.tokens = tokens
+ parsed, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() != tEOF {
+ return ASTNode{}, p.syntaxError(fmt.Sprintf(
+ "Unexpected token at the end of the expresssion: %s", p.current()))
+ }
+ return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+ var err error
+ leftToken := p.lookaheadToken(0)
+ p.advance()
+ leftNode, err := p.nud(leftToken)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken := p.current()
+ for bindingPower < bindingPowers[currentToken] {
+ p.advance()
+ leftNode, err = p.led(currentToken, leftNode)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken = p.current()
+ }
+ return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+ if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+ return p.parseSliceExpression()
+ }
+ indexStr := p.lookaheadToken(0).value
+ parsedInt, err := strconv.Atoi(indexStr)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+ p.advance()
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+ parts := []*int{nil, nil, nil}
+ index := 0
+ current := p.current()
+ for current != tRbracket && index < 3 {
+ if current == tColon {
+ index++
+ p.advance()
+ } else if current == tNumber {
+ parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ parts[index] = &parsedInt
+ p.advance()
+ } else {
+ return ASTNode{}, p.syntaxError(
+ "Expected tColon or tNumber" + ", received: " + p.current().String())
+ }
+ current = p.current()
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTSlice,
+ value: parts,
+ }, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+ if p.current() == tokenType {
+ p.advance()
+ return nil
+ }
+ return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+ switch tokenType {
+ case tDot:
+ if p.current() != tStar {
+ right, err := p.parseDotRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTSubexpression,
+ children: []ASTNode{node, right},
+ }, err
+ }
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTValueProjection,
+ children: []ASTNode{node, right},
+ }, err
+ case tPipe:
+ right, err := p.parseExpression(bindingPowers[tPipe])
+ return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+ case tOr:
+ right, err := p.parseExpression(bindingPowers[tOr])
+ return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+ case tAnd:
+ right, err := p.parseExpression(bindingPowers[tAnd])
+ return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+ case tLparen:
+ name := node.value
+ var args []ASTNode
+ for p.current() != tRparen {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tComma {
+ if err := p.match(tComma); err != nil {
+ return ASTNode{}, err
+ }
+ }
+ args = append(args, expression)
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTFunctionExpression,
+ value: name,
+ children: args,
+ }, nil
+ case tFilter:
+ return p.parseFilter(node)
+ case tFlatten:
+ left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{left, right},
+ }, err
+ case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+ right, err := p.parseExpression(bindingPowers[tokenType])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTComparator,
+ value: tokenType,
+ children: []ASTNode{node, right},
+ }, nil
+ case tLbracket:
+ tokenType := p.current()
+ var right ASTNode
+ var err error
+ if tokenType == tNumber || tokenType == tColon {
+ right, err = p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.projectIfSlice(node, right)
+ }
+ // Otherwise this is a projection.
+ if err := p.match(tStar); err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{node, right},
+ }, nil
+ }
+ return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+ switch token.tokenType {
+ case tJSONLiteral:
+ var parsed interface{}
+ err := json.Unmarshal([]byte(token.value), &parsed)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+ case tStringLiteral:
+ return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+ case tUnquotedIdentifier:
+ return ASTNode{
+ nodeType: ASTField,
+ value: token.value,
+ }, nil
+ case tQuotedIdentifier:
+ node := ASTNode{nodeType: ASTField, value: token.value}
+ if p.current() == tLparen {
+ return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+ }
+ return node, nil
+ case tStar:
+ left := ASTNode{nodeType: ASTIdentity}
+ var right ASTNode
+ var err error
+ if p.current() == tRbracket {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ }
+ return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+ case tFilter:
+ return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+ case tLbrace:
+ return p.parseMultiSelectHash()
+ case tFlatten:
+ left := ASTNode{
+ nodeType: ASTFlatten,
+ children: []ASTNode{{nodeType: ASTIdentity}},
+ }
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+ case tLbracket:
+ tokenType := p.current()
+ //var right ASTNode
+ if tokenType == tNumber || tokenType == tColon {
+ right, err := p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+ } else if tokenType == tStar && p.lookahead(1) == tRbracket {
+ p.advance()
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{{nodeType: ASTIdentity}, right},
+ }, nil
+ } else {
+ return p.parseMultiSelectList()
+ }
+ case tCurrent:
+ return ASTNode{nodeType: ASTCurrentNode}, nil
+ case tExpref:
+ expression, err := p.parseExpression(bindingPowers[tExpref])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+ case tNot:
+ expression, err := p.parseExpression(bindingPowers[tNot])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+ case tLparen:
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return expression, nil
+ case tEOF:
+ return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+ }
+
+ return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+ var expressions []ASTNode
+ for {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ expressions = append(expressions, expression)
+ if p.current() == tRbracket {
+ break
+ }
+ err = p.match(tComma)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+ err := p.match(tRbracket)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectList,
+ children: expressions,
+ }, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+ var children []ASTNode
+ for {
+ keyToken := p.lookaheadToken(0)
+ if err := p.match(tUnquotedIdentifier); err != nil {
+ if err := p.match(tQuotedIdentifier); err != nil {
+ return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+ }
+ }
+ keyName := keyToken.value
+ err := p.match(tColon)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ value, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ node := ASTNode{
+ nodeType: ASTKeyValPair,
+ value: keyName,
+ children: []ASTNode{value},
+ }
+ children = append(children, node)
+ if p.current() == tComma {
+ err := p.match(tComma)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ } else if p.current() == tRbrace {
+ err := p.match(tRbrace)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ break
+ }
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectHash,
+ children: children,
+ }, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+ indexExpr := ASTNode{
+ nodeType: ASTIndexExpression,
+ children: []ASTNode{left, right},
+ }
+ if right.nodeType == ASTSlice {
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{indexExpr, right},
+ }, err
+ }
+ return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+ var right, condition ASTNode
+ var err error
+ condition, err = p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tFlatten {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+
+ return ASTNode{
+ nodeType: ASTFilterProjection,
+ children: []ASTNode{node, right, condition},
+ }, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+ lookahead := p.current()
+ if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+ return p.parseExpression(bindingPower)
+ } else if lookahead == tLbracket {
+ if err := p.match(tLbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectList()
+ } else if lookahead == tLbrace {
+ if err := p.match(tLbrace); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectHash()
+ }
+ return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+ current := p.current()
+ if bindingPowers[current] < 10 {
+ return ASTNode{nodeType: ASTIdentity}, nil
+ } else if current == tLbracket {
+ return p.parseExpression(bindingPower)
+ } else if current == tFilter {
+ return p.parseExpression(bindingPower)
+ } else if current == tDot {
+ err := p.match(tDot)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseDotRHS(bindingPower)
+ } else {
+ return ASTNode{}, p.syntaxError("Error")
+ }
+}
+
+func (p *Parser) lookahead(number int) tokType {
+ return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+ return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+ return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+ p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+ for _, elem := range elements {
+ if elem == token {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: p.lookaheadToken(0).position,
+ }
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: t.position,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100644
index 00000000000..dae79cbdf33
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+ if i < 0 || i >= tokType(len(_tokType_index)-1) {
+ return fmt.Sprintf("tokType(%d)", i)
+ }
+ return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go
new file mode 100644
index 00000000000..ddc1b7d7d46
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/util.go
@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+ switch v := value.(type) {
+ case bool:
+ return !v
+ case []interface{}:
+ return len(v) == 0
+ case map[string]interface{}:
+ return len(v) == 0
+ case string:
+ return len(v) == 0
+ case nil:
+ return true
+ }
+ // Try the reflection cases before returning false.
+ rv := reflect.ValueOf(value)
+ switch rv.Kind() {
+ case reflect.Struct:
+ // A struct type will never be false, even if
+ // all of its values are the zero type.
+ return false
+ case reflect.Slice, reflect.Map:
+ return rv.Len() == 0
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return true
+ }
+ // If it's a pointer type, we'll try to deref the pointer
+ // and evaluate the pointer value for isFalse.
+ element := rv.Elem()
+ return isFalse(element.Interface())
+ }
+ return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+ return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+ N int
+ Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+ computed, err := computeSliceParams(len(slice), parts)
+ if err != nil {
+ return nil, err
+ }
+ start, stop, step := computed[0], computed[1], computed[2]
+ result := []interface{}{}
+ if step > 0 {
+ for i := start; i < stop; i += step {
+ result = append(result, slice[i])
+ }
+ } else {
+ for i := start; i > stop; i += step {
+ result = append(result, slice[i])
+ }
+ }
+ return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+ var start, stop, step int
+ if !parts[2].Specified {
+ step = 1
+ } else if parts[2].N == 0 {
+ return nil, errors.New("Invalid slice, step cannot be 0")
+ } else {
+ step = parts[2].N
+ }
+ var stepValueNegative bool
+ if step < 0 {
+ stepValueNegative = true
+ } else {
+ stepValueNegative = false
+ }
+
+ if !parts[0].Specified {
+ if stepValueNegative {
+ start = length - 1
+ } else {
+ start = 0
+ }
+ } else {
+ start = capSlice(length, parts[0].N, step)
+ }
+
+ if !parts[1].Specified {
+ if stepValueNegative {
+ stop = -1
+ } else {
+ stop = length
+ }
+ } else {
+ stop = capSlice(length, parts[1].N, step)
+ }
+ return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+ if actual < 0 {
+ actual += length
+ if actual < 0 {
+ if step < 0 {
+ actual = -1
+ } else {
+ actual = 0
+ }
+ }
+ } else if actual >= length {
+ if step < 0 {
+ actual = length - 1
+ } else {
+ actual = length
+ }
+ }
+ return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]float64, len(d))
+ for i, el := range d {
+ item, ok := el.(float64)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false. If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]string, len(d))
+ for i, el := range d {
+ item, ok := el.(string)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+ if v == nil {
+ return false
+ }
+ return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/vendor/github.com/knative/build/AUTHORS b/vendor/github.com/knative/build/AUTHORS
deleted file mode 100644
index 9c2b57e2ca4..00000000000
--- a/vendor/github.com/knative/build/AUTHORS
+++ /dev/null
@@ -1,8 +0,0 @@
-# This is the list of Knative authors for copyright purposes.
-#
-# This does not necessarily list everyone who has contributed code, since in
-# some cases, their employer may be the copyright holder. To see the full list
-# of contributors, see the revision history in source control.
-Google LLC
-Pivotal Software, Inc.
-Red Hat, Inc.
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/controller/kodata/HEAD b/vendor/github.com/knative/build/cmd/controller/kodata/HEAD
deleted file mode 120000
index 8f63681d362..00000000000
--- a/vendor/github.com/knative/build/cmd/controller/kodata/HEAD
+++ /dev/null
@@ -1 +0,0 @@
-../../../.git/HEAD
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/controller/kodata/LICENSE b/vendor/github.com/knative/build/cmd/controller/kodata/LICENSE
deleted file mode 120000
index 5853aaea53b..00000000000
--- a/vendor/github.com/knative/build/cmd/controller/kodata/LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/controller/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/controller/kodata/VENDOR-LICENSE
deleted file mode 120000
index 3cc89764519..00000000000
--- a/vendor/github.com/knative/build/cmd/controller/kodata/VENDOR-LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/creds-init/kodata/HEAD b/vendor/github.com/knative/build/cmd/creds-init/kodata/HEAD
deleted file mode 120000
index 8f63681d362..00000000000
--- a/vendor/github.com/knative/build/cmd/creds-init/kodata/HEAD
+++ /dev/null
@@ -1 +0,0 @@
-../../../.git/HEAD
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/creds-init/kodata/LICENSE b/vendor/github.com/knative/build/cmd/creds-init/kodata/LICENSE
deleted file mode 120000
index 5853aaea53b..00000000000
--- a/vendor/github.com/knative/build/cmd/creds-init/kodata/LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/creds-init/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/creds-init/kodata/VENDOR-LICENSE
deleted file mode 120000
index 3cc89764519..00000000000
--- a/vendor/github.com/knative/build/cmd/creds-init/kodata/VENDOR-LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/git-init/kodata/HEAD b/vendor/github.com/knative/build/cmd/git-init/kodata/HEAD
deleted file mode 120000
index 8f63681d362..00000000000
--- a/vendor/github.com/knative/build/cmd/git-init/kodata/HEAD
+++ /dev/null
@@ -1 +0,0 @@
-../../../.git/HEAD
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/git-init/kodata/LICENSE b/vendor/github.com/knative/build/cmd/git-init/kodata/LICENSE
deleted file mode 120000
index 5853aaea53b..00000000000
--- a/vendor/github.com/knative/build/cmd/git-init/kodata/LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/git-init/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/git-init/kodata/VENDOR-LICENSE
deleted file mode 120000
index 3cc89764519..00000000000
--- a/vendor/github.com/knative/build/cmd/git-init/kodata/VENDOR-LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/logs/kodata/HEAD b/vendor/github.com/knative/build/cmd/logs/kodata/HEAD
deleted file mode 120000
index 8f63681d362..00000000000
--- a/vendor/github.com/knative/build/cmd/logs/kodata/HEAD
+++ /dev/null
@@ -1 +0,0 @@
-../../../.git/HEAD
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/logs/kodata/LICENSE b/vendor/github.com/knative/build/cmd/logs/kodata/LICENSE
deleted file mode 120000
index 5853aaea53b..00000000000
--- a/vendor/github.com/knative/build/cmd/logs/kodata/LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/logs/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/logs/kodata/VENDOR-LICENSE
deleted file mode 120000
index 3cc89764519..00000000000
--- a/vendor/github.com/knative/build/cmd/logs/kodata/VENDOR-LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/nop/kodata/HEAD b/vendor/github.com/knative/build/cmd/nop/kodata/HEAD
deleted file mode 120000
index 8f63681d362..00000000000
--- a/vendor/github.com/knative/build/cmd/nop/kodata/HEAD
+++ /dev/null
@@ -1 +0,0 @@
-../../../.git/HEAD
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/nop/kodata/LICENSE b/vendor/github.com/knative/build/cmd/nop/kodata/LICENSE
deleted file mode 120000
index 5853aaea53b..00000000000
--- a/vendor/github.com/knative/build/cmd/nop/kodata/LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/nop/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/nop/kodata/VENDOR-LICENSE
deleted file mode 120000
index 3cc89764519..00000000000
--- a/vendor/github.com/knative/build/cmd/nop/kodata/VENDOR-LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/webhook/kodata/HEAD b/vendor/github.com/knative/build/cmd/webhook/kodata/HEAD
deleted file mode 120000
index 8f63681d362..00000000000
--- a/vendor/github.com/knative/build/cmd/webhook/kodata/HEAD
+++ /dev/null
@@ -1 +0,0 @@
-../../../.git/HEAD
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/webhook/kodata/LICENSE b/vendor/github.com/knative/build/cmd/webhook/kodata/LICENSE
deleted file mode 120000
index 5853aaea53b..00000000000
--- a/vendor/github.com/knative/build/cmd/webhook/kodata/LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/cmd/webhook/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/cmd/webhook/kodata/VENDOR-LICENSE
deleted file mode 120000
index 3cc89764519..00000000000
--- a/vendor/github.com/knative/build/cmd/webhook/kodata/VENDOR-LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/config/300-imagecache.yaml b/vendor/github.com/knative/build/config/300-imagecache.yaml
deleted file mode 120000
index f10d6dacf6a..00000000000
--- a/vendor/github.com/knative/build/config/300-imagecache.yaml
+++ /dev/null
@@ -1 +0,0 @@
-../vendor/github.com/knative/caching/config/image.yaml
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_types.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_types.go
deleted file mode 100644
index 250c4a0fa7f..00000000000
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_types.go
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- "context"
-
- "github.com/knative/pkg/apis"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
-
- "github.com/knative/pkg/kmeta"
-)
-
-// Template is an interface for accessing the BuildTemplateSpec
-// from various forms of template (namespace-/cluster-scoped).
-type Template interface {
- TemplateSpec() BuildTemplateSpec
-}
-
-// +genclient
-// +genclient:noStatus
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// BuildTemplate is a template that can used to easily create Builds.
-type BuildTemplate struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec BuildTemplateSpec `json:"spec"`
-}
-
-// Check that our resource implements several interfaces.
-var _ kmeta.OwnerRefable = (*BuildTemplate)(nil)
-var _ Template = (*BuildTemplate)(nil)
-var _ BuildTemplateInterface = (*BuildTemplate)(nil)
-
-// Check that BuildTemplate may be validated and defaulted.
-var _ apis.Validatable = (*BuildTemplate)(nil)
-var _ apis.Defaultable = (*BuildTemplate)(nil)
-
-// BuildTemplateSpec is the spec for a BuildTemplate.
-type BuildTemplateSpec struct {
- // TODO(dprotaso) Metadata.Generation should increment so we
- // can drop this property when conversion webhooks enable us
- // to migrate
- // +optional
- DeprecatedGeneration int64 `json:"generation,omitempty"`
-
- // Parameters defines the parameters that can be populated in a template.
- Parameters []ParameterSpec `json:"parameters,omitempty"`
-
- // Steps are the steps of the build; each step is run sequentially with the
- // source mounted into /workspace.
- Steps []corev1.Container `json:"steps"`
-
- // Volumes is a collection of volumes that are available to mount into the
- // steps of the build.
- Volumes []corev1.Volume `json:"volumes"`
-}
-
-// ParameterSpec defines the possible parameters that can be populated in a
-// template.
-type ParameterSpec struct {
- // Name is the unique name of this template parameter.
- Name string `json:"name"`
-
- // Description is a human-readable explanation of this template parameter.
- Description string `json:"description,omitempty"`
-
- // Default, if specified, defines the default value that should be applied if
- // the build does not specify the value for this parameter.
- Default *string `json:"default,omitempty"`
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// BuildTemplateList is a list of BuildTemplate resources.
-type BuildTemplateList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []BuildTemplate `json:"items"`
-}
-
-// TemplateSpec returnes the Spec used by the template
-func (bt *BuildTemplate) TemplateSpec() BuildTemplateSpec {
- return bt.Spec
-}
-
-// Copy performes a deep copy
-func (bt *BuildTemplate) Copy() BuildTemplateInterface {
- return bt.DeepCopy()
-}
-
-// GetGroupVersionKind gives kind
-func (bt *BuildTemplate) GetGroupVersionKind() schema.GroupVersionKind {
- return SchemeGroupVersion.WithKind("BuildTemplate")
-}
-
-// SetDefaults for build template
-func (bt *BuildTemplate) SetDefaults(ctx context.Context) {}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_validation.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_validation.go
deleted file mode 100644
index d90b8fde506..00000000000
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_validation.go
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- "context"
-
- "github.com/knative/pkg/apis"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/util/sets"
-)
-
-// Validate build template
-func (b *BuildTemplate) Validate(ctx context.Context) *apis.FieldError {
- return validateObjectMetadata(b.GetObjectMeta()).ViaField("metadata").Also(b.Spec.Validate(ctx).ViaField("spec"))
-}
-
-// Validate Build Template
-func (b *BuildTemplateSpec) Validate(ctx context.Context) *apis.FieldError {
- if err := validateSteps(b.Steps); err != nil {
- return err
- }
- if err := ValidateVolumes(b.Volumes); err != nil {
- return err
- }
- if err := validateParameters(b.Parameters); err != nil {
- return err
- }
- return nil
-}
-
-//ValidateVolumes validates collection of volumes that are available to mount into the
-// steps of the build ot build template.
-func ValidateVolumes(volumes []corev1.Volume) *apis.FieldError {
- // Build must not duplicate volume names.
- vols := sets.NewString()
- for _, v := range volumes {
- if vols.Has(v.Name) {
- return apis.ErrMultipleOneOf("name")
- }
- vols.Insert(v.Name)
- }
- return nil
-}
-
-func validateSteps(steps []corev1.Container) *apis.FieldError {
- // Build must not duplicate step names.
- names := sets.NewString()
- for _, s := range steps {
- if s.Image == "" {
- return apis.ErrMissingField("Image")
- }
-
- if s.Name == "" {
- continue
- }
- if names.Has(s.Name) {
- return apis.ErrMultipleOneOf("name")
- }
- names.Insert(s.Name)
- }
- return nil
-}
-
-func validateParameters(params []ParameterSpec) *apis.FieldError {
- // Template must not duplicate parameter names.
- seen := sets.NewString()
- for _, p := range params {
- if seen.Has(p.Name) {
- return apis.ErrInvalidKeyName("ParamName", "b.spec.params")
- }
- seen.Insert(p.Name)
- }
- return nil
-}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_types.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_types.go
deleted file mode 100644
index 6f18d1786e9..00000000000
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_types.go
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
-
- "github.com/knative/pkg/apis"
- duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
- "github.com/knative/pkg/kmeta"
-)
-
-// +genclient
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Build represents a build of a container image. A Build is made up of a
-// source, and a set of steps. Steps can mount volumes to share data between
-// themselves. A build may be created by instantiating a BuildTemplate.
-type Build struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec BuildSpec `json:"spec"`
- Status BuildStatus `json:"status"`
-}
-
-// Check that our resource implements several interfaces.
-var _ kmeta.OwnerRefable = (*Build)(nil)
-
-// Check that Build may be validated and defaulted.
-var _ apis.Validatable = (*Build)(nil)
-var _ apis.Defaultable = (*Build)(nil)
-
-// BuildSpec is the spec for a Build resource.
-type BuildSpec struct {
- // TODO(dprotaso) Metadata.Generation should increment so we
- // can drop this property when conversion webhooks enable us
- // to migrate
- // +optional
- DeprecatedGeneration int64 `json:"generation,omitempty"`
-
- // Source specifies the input to the build.
- // +optional
- Source *SourceSpec `json:"source,omitempty"`
-
- // Sources specifies the inputs to the build.
- // +optional
- Sources []SourceSpec `json:"sources,omitempty"`
-
- // Steps are the steps of the build; each step is run sequentially with the
- // source mounted into /workspace.
- // +optional
- Steps []corev1.Container `json:"steps,omitempty"`
-
- // Volumes is a collection of volumes that are available to mount into the
- // steps of the build.
- // +optional
- Volumes []corev1.Volume `json:"volumes,omitempty"`
-
- // The name of the service account as which to run this build.
- // +optional
- ServiceAccountName string `json:"serviceAccountName,omitempty"`
-
- // Template, if specified, references a BuildTemplate resource to use to
- // populate fields in the build, and optional Arguments to pass to the
- // template. The default Kind of template is BuildTemplate
- // +optional
- Template *TemplateInstantiationSpec `json:"template,omitempty"`
-
- // NodeSelector is a selector which must be true for the pod to fit on a node.
- // Selector which must match a node's labels for the pod to be scheduled on that node.
- // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
- // +optional
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
-
- // Time after which the build times out. Defaults to 10 minutes.
- // Specified build timeout should be less than 24h.
- // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration
- // +optional
- Timeout *metav1.Duration `json:"timeout,omitempty"`
-
- // If specified, the pod's scheduling constraints
- // +optional
- Affinity *corev1.Affinity `json:"affinity,omitempty"`
-
- // Used for cancelling a job (and maybe more later on)
- // +optional
- Status BuildSpecStatus
-}
-
-// BuildSpecStatus defines the build spec status the user can provide
-type BuildSpecStatus string
-
-const (
- // BuildSpecStatusCancelled indicates that the user wants to cancel the build,
- // if not already cancelled or terminated
- BuildSpecStatusCancelled = "BuildCancelled"
-)
-
-// TemplateKind defines the type of BuildTemplate used by the build.
-type TemplateKind string
-
-const (
- // BuildTemplateKind indicates that the template type has a namepace scope.
- BuildTemplateKind TemplateKind = "BuildTemplate"
- // ClusterBuildTemplateKind indicates that template type has a cluster scope.
- ClusterBuildTemplateKind TemplateKind = "ClusterBuildTemplate"
-)
-
-// TemplateInstantiationSpec specifies how a BuildTemplate is instantiated into
-// a Build.
-type TemplateInstantiationSpec struct {
- // Name references the BuildTemplate resource to use.
- // The template is assumed to exist in the Build's namespace.
- Name string `json:"name"`
-
- // The Kind of the template to be used, possible values are BuildTemplate
- // or ClusterBuildTemplate. If nothing is specified, the default if is BuildTemplate
- // +optional
- Kind TemplateKind `json:"kind,omitempty"`
-
- // Arguments, if specified, lists values that should be applied to the
- // parameters specified by the template.
- // +optional
- Arguments []ArgumentSpec `json:"arguments,omitempty"`
-
- // Env, if specified will provide variables to all build template steps.
- // This will override any of the template's steps environment variables.
- // +optional
- Env []corev1.EnvVar `json:"env,omitempty"`
-}
-
-// ArgumentSpec defines the actual values to use to populate a template's
-// parameters.
-type ArgumentSpec struct {
- // Name is the name of the argument.
- Name string `json:"name"`
- // Value is the value of the argument.
- Value string `json:"value"`
- // TODO(jasonhall): ValueFrom?
-}
-
-// SourceSpec defines the input to the Build
-type SourceSpec struct {
- // Git represents source in a Git repository.
- // +optional
- Git *GitSourceSpec `json:"git,omitempty"`
-
- // GCS represents source in Google Cloud Storage.
- // +optional
- GCS *GCSSourceSpec `json:"gcs,omitempty"`
-
- // Custom indicates that source should be retrieved using a custom
- // process defined in a container invocation.
- // +optional
- Custom *corev1.Container `json:"custom,omitempty"`
-
- // SubPath specifies a path within the fetched source which should be
- // built. This option makes parent directories *inaccessible* to the
- // build steps. (The specific source type may, in fact, not even fetch
- // files not in the SubPath.)
- // +optional
- SubPath string `json:"subPath,omitempty"`
-
- // Name is the name of source. This field is used to uniquely identify the
- // source init containers
- // Restrictions on the allowed charatcers
- // Must be a basename (no /)
- // Must be a valid DNS name (only alphanumeric characters, no _)
- // https://tools.ietf.org/html/rfc1123#section-2
- // +optional
- Name string `json:"name,omitempty"`
-
- // TargetPath is the path in workspace directory where the source will be copied.
- // TargetPath is optional and if its not set source will be copied under workspace.
- // TargetPath should not be set for custom source.
- TargetPath string `json:"targetPath,omitempty"`
-}
-
-// GitSourceSpec describes a Git repo source input to the Build.
-type GitSourceSpec struct {
- // URL of the Git repository to clone from.
- Url string `json:"url"`
-
- // Git revision (branch, tag, commit SHA or ref) to clone. See
- // https://git-scm.com/docs/gitrevisions#_specifying_revisions for more
- // information.
- Revision string `json:"revision"`
-}
-
-// GCSSourceSpec describes source input to the Build in the form of an archive,
-// or a source manifest describing files to fetch.
-type GCSSourceSpec struct {
- // Type declares the style of source to fetch.
- Type GCSSourceType `json:"type,omitempty"`
-
- // Location specifies the location of the source archive or manifest file.
- Location string `json:"location,omitempty"`
-}
-
-// GCSSourceType defines a type of GCS source fetch.
-type GCSSourceType string
-
-const (
- // GCSArchive indicates that source should be fetched from a typical archive file.
- GCSArchive GCSSourceType = "Archive"
-
- // GCSManifest indicates that source should be fetched using a
- // manifest-based protocol which enables incremental source upload.
- GCSManifest GCSSourceType = "Manifest"
-)
-
-// BuildProvider defines a build execution implementation.
-type BuildProvider string
-
-const (
- // GoogleBuildProvider indicates that this build was performed with Google Cloud Build.
- GoogleBuildProvider BuildProvider = "Google"
- // ClusterBuildProvider indicates that this build was performed on-cluster.
- ClusterBuildProvider BuildProvider = "Cluster"
-)
-
-// BuildStatus is the status for a Build resource
-type BuildStatus struct {
- duckv1alpha1.Status `json:",inline"`
-
- // +optional
- Builder BuildProvider `json:"builder,omitempty"`
-
- // Cluster provides additional information if the builder is Cluster.
- // +optional
- Cluster *ClusterSpec `json:"cluster,omitempty"`
-
- // Google provides additional information if the builder is Google.
- // +optional
- Google *GoogleSpec `json:"google,omitempty"`
-
- // StartTime is the time the build is actually started.
- // +optional
- StartTime *metav1.Time `json:"startTime,omitempty"`
-
- // CompletionTime is the time the build completed.
- // +optional
- CompletionTime *metav1.Time `json:"completionTime,omitempty"`
-
- // StepStates describes the state of each build step container.
- // +optional
- StepStates []corev1.ContainerState `json:"stepStates,omitempty"`
-
- // StepsCompleted lists the name of build steps completed.
- // +optional
- StepsCompleted []string `json:"stepsCompleted",omitempty`
-}
-
-// Check that BuildStatus may have its conditions managed.
-var _ duckv1alpha1.ConditionsAccessor = (*BuildStatus)(nil)
-
-// ClusterSpec provides information about the on-cluster build, if applicable.
-type ClusterSpec struct {
- // Namespace is the namespace in which the pod is running.
- Namespace string `json:"namespace"`
- // PodName is the name of the pod responsible for executing this build's steps.
- PodName string `json:"podName"`
-}
-
-// GoogleSpec provides information about the GCB build, if applicable.
-type GoogleSpec struct {
- // Operation is the unique name of the GCB API Operation for the build.
- Operation string `json:"operation"`
-}
-
-// BuildSucceeded is set when the build is running, and becomes True when the
-// build finishes successfully.
-//
-// If the build is ongoing, its status will be Unknown. If it fails, its status
-// will be False.
-const BuildSucceeded = duckv1alpha1.ConditionSucceeded
-
-const BuildCancelled duckv1alpha1.ConditionType = "Cancelled"
-
-var buildCondSet = duckv1alpha1.NewBatchConditionSet()
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// BuildList is a list of Build resources
-type BuildList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- // Items is the list of Build items in this list.
- Items []Build `json:"items"`
-}
-
-// GetCondition returns the Condition matching the given type.
-func (bs *BuildStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha1.Condition {
- return buildCondSet.Manage(bs).GetCondition(t)
-}
-
-// SetCondition sets the condition, unsetting previous conditions with the same
-// type as necessary.
-func (bs *BuildStatus) SetCondition(newCond *duckv1alpha1.Condition) {
- if newCond != nil {
- buildCondSet.Manage(bs).SetCondition(*newCond)
- }
-}
-
-// GetConditions returns the Conditions array. This enables generic handling of
-// conditions by implementing the duckv1alpha1.Conditions interface.
-func (bs *BuildStatus) GetConditions() duckv1alpha1.Conditions {
- return bs.Conditions
-}
-
-// SetConditions sets the Conditions array. This enables generic handling of
-// conditions by implementing the duckv1alpha1.Conditions interface.
-func (bs *BuildStatus) SetConditions(conditions duckv1alpha1.Conditions) {
- bs.Conditions = conditions
-}
-
-func (b *Build) GetGroupVersionKind() schema.GroupVersionKind {
- return SchemeGroupVersion.WithKind("Build")
-}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_validation.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_validation.go
deleted file mode 100644
index ee6a2c34eb9..00000000000
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_validation.go
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- "context"
- "time"
-
- "github.com/knative/pkg/apis"
-)
-
-// Validate Build
-func (b *Build) Validate(ctx context.Context) *apis.FieldError {
- return validateObjectMetadata(b.GetObjectMeta()).ViaField("metadata").Also(b.Spec.Validate(ctx).ViaField("spec"))
-}
-
-// Validate for build spec
-func (bs *BuildSpec) Validate(ctx context.Context) *apis.FieldError {
- if bs.Template == nil && len(bs.Steps) == 0 {
- return apis.ErrMissingOneOf("template", "steps")
- }
- if bs.Template != nil && len(bs.Steps) > 0 {
- return apis.ErrMultipleOneOf("template", "steps")
- }
-
- // If a build specifies a template, all the template's parameters without
- // defaults must be satisfied by the build's parameters.
- if bs.Template != nil {
- return bs.Template.Validate(ctx).ViaField("template")
- }
-
- // Below method potentially has a bug:
- // It does not Validate if only a "Source" has been set, it only validates if multiple sources have been set
- return bs.validateSources().
- Also(ValidateVolumes(bs.Volumes).ViaField("volumes")).
- Also(bs.validateTimeout()).
- Also(validateSteps(bs.Steps).ViaField("steps"))
-}
-
-// Validate template
-func (b *TemplateInstantiationSpec) Validate(ctx context.Context) *apis.FieldError {
- if b == nil {
- return nil
- }
- if b.Name == "" {
- return apis.ErrMissingField("name")
- }
- if b.Kind != "" {
- switch b.Kind {
- case ClusterBuildTemplateKind,
- BuildTemplateKind:
- return nil
- default:
- return apis.ErrInvalidValue(string(b.Kind), "kind")
- }
- }
- return nil
-}
-
-// Validate build timeout
-func (bs *BuildSpec) validateTimeout() *apis.FieldError {
- if bs.Timeout == nil {
- return nil
- }
- maxTimeout := time.Duration(24 * time.Hour)
-
- if bs.Timeout.Duration > maxTimeout || bs.Timeout.Duration < 0 {
- return apis.ErrOutOfBoundsValue(bs.Timeout.Duration.String(), "0", "24", "timeout")
- }
- return nil
-}
-
-// Validate source
-func (bs BuildSpec) validateSources() *apis.FieldError {
- var subPathExists bool
- var emptyTargetPath bool
- names := map[string]string{}
- pathtree := pathTree{
- nodeMap: map[string]map[string]string{},
- }
-
- // Both source and sources cannot be defined in build
- if len(bs.Sources) > 0 && bs.Source != nil {
- return apis.ErrMultipleOneOf("source", "sources")
- }
- for _, source := range bs.Sources {
- // Check all source have unique names
- if _, ok := names[source.Name]; ok {
- return apis.ErrMultipleOneOf("name").ViaField("sources")
- }
- // Multiple sources cannot have subpath defined
- if source.SubPath != "" {
- if subPathExists {
- return apis.ErrMultipleOneOf("subpath").ViaField("sources")
- }
- subPathExists = true
- }
- names[source.Name] = ""
-
- if source.TargetPath == "" {
- if source.Custom != nil {
- continue
- }
- if emptyTargetPath {
- return apis.ErrInvalidValue("Empty Target Path", "targetPath").ViaField("sources")
- }
- emptyTargetPath = true
- } else {
- if source.Custom != nil {
- return apis.ErrInvalidValue(source.TargetPath, "targetPath").ViaField("sources")
- }
- if err := insertNode(source.TargetPath, pathtree).ViaField("sources"); err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_types.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_types.go
deleted file mode 100644
index dbe8fed60b2..00000000000
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_types.go
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- "context"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
-
- "github.com/knative/pkg/apis"
- "github.com/knative/pkg/kmeta"
-)
-
-// +genclient
-// +genclient:noStatus
-// +genclient:nonNamespaced
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ClusterBuildTemplate is a template that can used to easily create Builds.
-type ClusterBuildTemplate struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata,omitempty"`
-
- Spec BuildTemplateSpec `json:"spec"`
-}
-
-// Check that our resource implements several interfaces.
-var _ kmeta.OwnerRefable = (*ClusterBuildTemplate)(nil)
-var _ Template = (*ClusterBuildTemplate)(nil)
-var _ BuildTemplateInterface = (*ClusterBuildTemplate)(nil)
-
-// Check that ClusterBuildTemplate may be validated and defaulted.
-var _ apis.Validatable = (*ClusterBuildTemplate)(nil)
-var _ apis.Defaultable = (*ClusterBuildTemplate)(nil)
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// ClusterBuildTemplateList is a list of BuildTemplate resources.
-type ClusterBuildTemplateList struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ListMeta `json:"metadata"`
-
- Items []ClusterBuildTemplate `json:"items"`
-}
-
-// TemplateSpec returnes the Spec used by the template
-func (bt *ClusterBuildTemplate) TemplateSpec() BuildTemplateSpec {
- return bt.Spec
-}
-
-// Copy performes a deep copy
-func (bt *ClusterBuildTemplate) Copy() BuildTemplateInterface {
- return bt.DeepCopy()
-}
-
-func (bt *ClusterBuildTemplate) GetGroupVersionKind() schema.GroupVersionKind {
- return SchemeGroupVersion.WithKind("ClusterBuildTemplate")
-}
-
-// SetDefaults
-func (b *ClusterBuildTemplate) SetDefaults(ctx context.Context) {}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/metadata_validation.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/metadata_validation.go
deleted file mode 100644
index 8801c3fca6f..00000000000
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/metadata_validation.go
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1alpha1
-
-import (
- "strings"
-
- "github.com/knative/pkg/apis"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-const (
- maxLength = 63
-)
-
-func validateObjectMetadata(meta metav1.Object) *apis.FieldError {
- name := meta.GetName()
-
- if strings.Contains(name, ".") {
- return &apis.FieldError{
- Message: "Invalid resource name: special character . must not be present",
- Paths: []string{"name"},
- }
- }
-
- if len(name) > maxLength {
- return &apis.FieldError{
- Message: "Invalid resource name: length must be no more than 63 characters",
- Paths: []string{"name"},
- }
- }
- return nil
-}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/target_path_validation.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/target_path_validation.go
deleted file mode 100644
index 3439a87ebaf..00000000000
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/target_path_validation.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package v1alpha1
-
-import (
- "strings"
-
- "github.com/knative/pkg/apis"
-)
-
-type pathTree struct {
- nodeMap map[string]map[string]string
-}
-
-// insertNode functions checks the path does not have overlap with existing
-// paths in path.nodeMap. If not it creates a key for path and adds
-func insertNode(path string, pathtree pathTree) *apis.FieldError {
- err := &apis.FieldError{
- Message: "Overlapping Target Paths",
- Paths: []string{"targetPath"},
- }
- path = strings.Trim(path, "/")
- parts := strings.Split(path, "/")
-
- for nodePath, nodeMap := range pathtree.nodeMap {
- if len(nodeMap) > len(parts) {
- if strings.HasPrefix(nodePath, path) {
- return err
- }
- }
-
- if len(nodeMap) == len(parts) {
- if path == nodePath {
- return err
- }
- }
- if len(nodeMap) < len(parts) {
- if strings.HasPrefix(path, nodePath) {
- return err
- }
- }
- }
- // path is trimmed with "/"
- addNode(path, pathtree)
- return nil
-}
-
-func addNode(path string, tree pathTree) {
- parts := strings.Split(path, "/")
- nm := map[string]string{}
-
- for _, part := range parts {
- nm[part] = part
- }
- tree.nodeMap[path] = nm
-}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go
deleted file mode 100644
index 2201ea706f5..00000000000
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,566 +0,0 @@
-// +build !ignore_autogenerated
-
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// This file was autogenerated by deepcopy-gen. Do not edit it manually!
-
-package v1alpha1
-
-import (
- v1 "k8s.io/api/core/v1"
- meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ArgumentSpec) DeepCopyInto(out *ArgumentSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArgumentSpec.
-func (in *ArgumentSpec) DeepCopy() *ArgumentSpec {
- if in == nil {
- return nil
- }
- out := new(ArgumentSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Build) DeepCopyInto(out *Build) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build.
-func (in *Build) DeepCopy() *Build {
- if in == nil {
- return nil
- }
- out := new(Build)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Build) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildList) DeepCopyInto(out *BuildList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Build, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList.
-func (in *BuildList) DeepCopy() *BuildList {
- if in == nil {
- return nil
- }
- out := new(BuildList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *BuildList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildSpec) DeepCopyInto(out *BuildSpec) {
- *out = *in
- if in.Source != nil {
- in, out := &in.Source, &out.Source
- if *in == nil {
- *out = nil
- } else {
- *out = new(SourceSpec)
- (*in).DeepCopyInto(*out)
- }
- }
- if in.Sources != nil {
- in, out := &in.Sources, &out.Sources
- *out = make([]SourceSpec, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Steps != nil {
- in, out := &in.Steps, &out.Steps
- *out = make([]v1.Container, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Volumes != nil {
- in, out := &in.Volumes, &out.Volumes
- *out = make([]v1.Volume, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Template != nil {
- in, out := &in.Template, &out.Template
- if *in == nil {
- *out = nil
- } else {
- *out = new(TemplateInstantiationSpec)
- (*in).DeepCopyInto(*out)
- }
- }
- if in.NodeSelector != nil {
- in, out := &in.NodeSelector, &out.NodeSelector
- *out = make(map[string]string, len(*in))
- for key, val := range *in {
- (*out)[key] = val
- }
- }
- if in.Timeout != nil {
- in, out := &in.Timeout, &out.Timeout
- if *in == nil {
- *out = nil
- } else {
- *out = new(meta_v1.Duration)
- **out = **in
- }
- }
- if in.Affinity != nil {
- in, out := &in.Affinity, &out.Affinity
- if *in == nil {
- *out = nil
- } else {
- *out = new(v1.Affinity)
- (*in).DeepCopyInto(*out)
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec.
-func (in *BuildSpec) DeepCopy() *BuildSpec {
- if in == nil {
- return nil
- }
- out := new(BuildSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildStatus) DeepCopyInto(out *BuildStatus) {
- *out = *in
- in.Status.DeepCopyInto(&out.Status)
- if in.Cluster != nil {
- in, out := &in.Cluster, &out.Cluster
- if *in == nil {
- *out = nil
- } else {
- *out = new(ClusterSpec)
- **out = **in
- }
- }
- if in.Google != nil {
- in, out := &in.Google, &out.Google
- if *in == nil {
- *out = nil
- } else {
- *out = new(GoogleSpec)
- **out = **in
- }
- }
- if in.StartTime != nil {
- in, out := &in.StartTime, &out.StartTime
- if *in == nil {
- *out = nil
- } else {
- *out = new(meta_v1.Time)
- (*in).DeepCopyInto(*out)
- }
- }
- if in.CompletionTime != nil {
- in, out := &in.CompletionTime, &out.CompletionTime
- if *in == nil {
- *out = nil
- } else {
- *out = new(meta_v1.Time)
- (*in).DeepCopyInto(*out)
- }
- }
- if in.StepStates != nil {
- in, out := &in.StepStates, &out.StepStates
- *out = make([]v1.ContainerState, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.StepsCompleted != nil {
- in, out := &in.StepsCompleted, &out.StepsCompleted
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatus.
-func (in *BuildStatus) DeepCopy() *BuildStatus {
- if in == nil {
- return nil
- }
- out := new(BuildStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildTemplate) DeepCopyInto(out *BuildTemplate) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTemplate.
-func (in *BuildTemplate) DeepCopy() *BuildTemplate {
- if in == nil {
- return nil
- }
- out := new(BuildTemplate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *BuildTemplate) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildTemplateList) DeepCopyInto(out *BuildTemplateList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]BuildTemplate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTemplateList.
-func (in *BuildTemplateList) DeepCopy() *BuildTemplateList {
- if in == nil {
- return nil
- }
- out := new(BuildTemplateList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *BuildTemplateList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *BuildTemplateSpec) DeepCopyInto(out *BuildTemplateSpec) {
- *out = *in
- if in.Parameters != nil {
- in, out := &in.Parameters, &out.Parameters
- *out = make([]ParameterSpec, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Steps != nil {
- in, out := &in.Steps, &out.Steps
- *out = make([]v1.Container, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.Volumes != nil {
- in, out := &in.Volumes, &out.Volumes
- *out = make([]v1.Volume, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTemplateSpec.
-func (in *BuildTemplateSpec) DeepCopy() *BuildTemplateSpec {
- if in == nil {
- return nil
- }
- out := new(BuildTemplateSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterBuildTemplate) DeepCopyInto(out *ClusterBuildTemplate) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBuildTemplate.
-func (in *ClusterBuildTemplate) DeepCopy() *ClusterBuildTemplate {
- if in == nil {
- return nil
- }
- out := new(ClusterBuildTemplate)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterBuildTemplate) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterBuildTemplateList) DeepCopyInto(out *ClusterBuildTemplateList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]ClusterBuildTemplate, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBuildTemplateList.
-func (in *ClusterBuildTemplateList) DeepCopy() *ClusterBuildTemplateList {
- if in == nil {
- return nil
- }
- out := new(ClusterBuildTemplateList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ClusterBuildTemplateList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
-func (in *ClusterSpec) DeepCopy() *ClusterSpec {
- if in == nil {
- return nil
- }
- out := new(ClusterSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GCSSourceSpec) DeepCopyInto(out *GCSSourceSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCSSourceSpec.
-func (in *GCSSourceSpec) DeepCopy() *GCSSourceSpec {
- if in == nil {
- return nil
- }
- out := new(GCSSourceSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GitSourceSpec) DeepCopyInto(out *GitSourceSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitSourceSpec.
-func (in *GitSourceSpec) DeepCopy() *GitSourceSpec {
- if in == nil {
- return nil
- }
- out := new(GitSourceSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GoogleSpec) DeepCopyInto(out *GoogleSpec) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleSpec.
-func (in *GoogleSpec) DeepCopy() *GoogleSpec {
- if in == nil {
- return nil
- }
- out := new(GoogleSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ParameterSpec) DeepCopyInto(out *ParameterSpec) {
- *out = *in
- if in.Default != nil {
- in, out := &in.Default, &out.Default
- if *in == nil {
- *out = nil
- } else {
- *out = new(string)
- **out = **in
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParameterSpec.
-func (in *ParameterSpec) DeepCopy() *ParameterSpec {
- if in == nil {
- return nil
- }
- out := new(ParameterSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *SourceSpec) DeepCopyInto(out *SourceSpec) {
- *out = *in
- if in.Git != nil {
- in, out := &in.Git, &out.Git
- if *in == nil {
- *out = nil
- } else {
- *out = new(GitSourceSpec)
- **out = **in
- }
- }
- if in.GCS != nil {
- in, out := &in.GCS, &out.GCS
- if *in == nil {
- *out = nil
- } else {
- *out = new(GCSSourceSpec)
- **out = **in
- }
- }
- if in.Custom != nil {
- in, out := &in.Custom, &out.Custom
- if *in == nil {
- *out = nil
- } else {
- *out = new(v1.Container)
- (*in).DeepCopyInto(*out)
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceSpec.
-func (in *SourceSpec) DeepCopy() *SourceSpec {
- if in == nil {
- return nil
- }
- out := new(SourceSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *TemplateInstantiationSpec) DeepCopyInto(out *TemplateInstantiationSpec) {
- *out = *in
- if in.Arguments != nil {
- in, out := &in.Arguments, &out.Arguments
- *out = make([]ArgumentSpec, len(*in))
- copy(*out, *in)
- }
- if in.Env != nil {
- in, out := &in.Env, &out.Env
- *out = make([]v1.EnvVar, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstantiationSpec.
-func (in *TemplateInstantiationSpec) DeepCopy() *TemplateInstantiationSpec {
- if in == nil {
- return nil
- }
- out := new(TemplateInstantiationSpec)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/vendor/github.com/knative/build/test/panic/kodata/LICENSE b/vendor/github.com/knative/build/test/panic/kodata/LICENSE
deleted file mode 120000
index 5853aaea53b..00000000000
--- a/vendor/github.com/knative/build/test/panic/kodata/LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/test/panic/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/test/panic/kodata/VENDOR-LICENSE
deleted file mode 120000
index 3cc89764519..00000000000
--- a/vendor/github.com/knative/build/test/panic/kodata/VENDOR-LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/test/workingdir/kodata/LICENSE b/vendor/github.com/knative/build/test/workingdir/kodata/LICENSE
deleted file mode 120000
index 5853aaea53b..00000000000
--- a/vendor/github.com/knative/build/test/workingdir/kodata/LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/test/workingdir/kodata/VENDOR-LICENSE b/vendor/github.com/knative/build/test/workingdir/kodata/VENDOR-LICENSE
deleted file mode 120000
index 3cc89764519..00000000000
--- a/vendor/github.com/knative/build/test/workingdir/kodata/VENDOR-LICENSE
+++ /dev/null
@@ -1 +0,0 @@
-../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/vendor/github.com/knative/build/pkg/apis/build/register.go b/vendor/github.com/knative/pkg/apis/istio/authentication/register.go
similarity index 82%
rename from vendor/github.com/knative/build/pkg/apis/build/register.go
rename to vendor/github.com/knative/pkg/apis/istio/authentication/register.go
index 379817b13ca..f54c7742d1b 100644
--- a/vendor/github.com/knative/build/pkg/apis/build/register.go
+++ b/vendor/github.com/knative/pkg/apis/istio/authentication/register.go
@@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package build
+package authentication
-// GroupName is the Kubernetes resource group name for Build types.
-const GroupName = "build.knative.dev"
+const (
+ GroupName = "authentication.istio.io"
+)
diff --git a/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go
new file mode 100644
index 00000000000..07b17599c05
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Api versions allow the api contract for a resource to be changed while keeping
+// backward compatibility by support multiple concurrent versions
+// of the same resource
+// +k8s:deepcopy-gen=package
+// +groupName=authentication.istio.io
+package v1alpha1
diff --git a/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go
new file mode 100644
index 00000000000..882b1fc3fb3
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go
@@ -0,0 +1,345 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "github.com/knative/pkg/apis/istio/common/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// VirtualService
+type Policy struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+ Spec PolicySpec `json:"spec"`
+}
+
+// Policy defines what authentication methods can be accepted on workload(s),
+// and if authenticated, which method/certificate will set the request principal
+// (i.e request.auth.principal attribute).
+//
+// Authentication policy is composed of 2-part authentication:
+// - peer: verify caller service credentials. This part will set source.user
+// (peer identity).
+// - origin: verify the origin credentials. This part will set request.auth.user
+// (origin identity), as well as other attributes like request.auth.presenter,
+// request.auth.audiences and raw claims. Note that the identity could be
+// end-user, service account, device etc.
+//
+// Last but not least, the principal binding rule defines which identity (peer
+// or origin) should be used as principal. By default, it uses peer.
+//
+// Examples:
+//
+// Policy to enable mTLS for all services in namespace frod
+//
+// apiVersion: authentication.istio.io/v1alpha1
+// kind: Policy
+// metadata:
+// name: mTLS_enable
+// namespace: frod
+// spec:
+// peers:
+// - mtls:
+//
+// Policy to disable mTLS for "productpage" service
+//
+// apiVersion: authentication.istio.io/v1alpha1
+// kind: Policy
+// metadata:
+// name: mTLS_disable
+// namespace: frod
+// spec:
+// targets:
+// - name: productpage
+//
+// Policy to require mTLS for peer authentication, and JWT for origin authenticationn
+// for productpage:9000. Principal is set from origin identity.
+//
+// apiVersion: authentication.istio.io/v1alpha1
+// kind: Policy
+// metadata:
+// name: mTLS_enable
+// namespace: frod
+// spec:
+// target:
+// - name: productpage
+// ports:
+// - number: 9000
+// peers:
+// - mtls:
+// origins:
+// - jwt:
+// issuer: "https://securetoken.google.com"
+// audiences:
+// - "productpage"
+// jwksUri: "https://www.googleapis.com/oauth2/v1/certs"
+// jwt_headers:
+// - "x-goog-iap-jwt-assertion"
+// principaBinding: USE_ORIGIN
+//
+// Policy to require mTLS for peer authentication, and JWT for origin authenticationn
+// for productpage:9000, but allow origin authentication failed. Principal is set
+// from origin identity.
+// Note: this example can be used for use cases when we want to allow request from
+// certain peers, given it comes with an approperiate authorization poicy to check
+// and reject request accoridingly.
+//
+// apiVersion: authentication.istio.io/v1alpha1
+// kind: Policy
+// metadata:
+// name: mTLS_enable
+// namespace: frod
+// spec:
+// target:
+// - name: productpage
+// ports:
+// - number: 9000
+// peers:
+// - mtls:
+// origins:
+// - jwt:
+// issuer: "https://securetoken.google.com"
+// audiences:
+// - "productpage"
+// jwksUri: "https://www.googleapis.com/oauth2/v1/certs"
+// jwt_headers:
+// - "x-goog-iap-jwt-assertion"
+// originIsOptional: true
+// principalBinding: USE_ORIGIN
+type PolicySpec struct {
+ // List rules to select destinations that the policy should be applied on.
+ // If empty, policy will be used on all destinations in the same namespace.
+ Targets []TargetSelector `json:"targets,omitempty"`
+
+ // List of authentication methods that can be used for peer authentication.
+ // They will be evaluated in order; the first validate one will be used to
+ // set peer identity (source.user) and other peer attributes. If none of
+ // these methods pass, and peer_is_optional flag is false (see below),
+ // request will be rejected with authentication failed error (401).
+ // Leave the list empty if peer authentication is not required
+ Peers []PeerAuthenticationMethod `json:"peers,omitempty"`
+
+ // Set this flag to true to accept request (for peer authentication perspective),
+ // even when none of the peer authentication methods defined above satisfied.
+ // Typically, this is used to delay the rejection decision to next layer (e.g
+ // authorization).
+ // This flag is ignored if no authentication defined for peer (peers field is empty).
+ PeerIsOptional bool `json:"peerIsOptional,omitempty"`
+
+ // List of authentication methods that can be used for origin authentication.
+ // Similar to peers, these will be evaluated in order; the first validate one
+ // will be used to set origin identity and attributes (i.e request.auth.user,
+ // request.auth.issuer etc). If none of these methods pass, and origin_is_optional
+ // is false (see below), request will be rejected with authentication failed
+ // error (401).
+ // Leave the list empty if origin authentication is not required.
+ Origins []OriginAuthenticationMethod `json:"origins,omitempty"`
+
+ // Set this flag to true to accept request (for origin authentication perspective),
+ // even when none of the origin authentication methods defined above satisfied.
+ // Typically, this is used to delay the rejection decision to next layer (e.g
+ // authorization).
+ // This flag is ignored if no authentication defined for origin (origins field is empty).
+ OriginIsOptional bool `json:"originIsOptional,omitempty"`
+
+ // Define whether peer or origin identity should be use for principal. Default
+ // value is USE_PEER.
+ // If peer (or origin) identity is not available, either because of peer/origin
+ // authentication is not defined, or failed, principal will be left unset.
+ // In other words, binding rule does not affect the decision to accept or
+ // reject request.
+ PrincipalBinding PrincipalBinding `json:"principalBinding,omitempty"`
+}
+
+// TargetSelector defines a matching rule to a service/destination.
+type TargetSelector struct {
+ // REQUIRED. The name must be a short name from the service registry. The
+ // fully qualified domain name will be resolved in a platform specific manner.
+ Name string `json:"name"`
+
+ // Specifies the ports on the destination. Leave empty to match all ports
+ // that are exposed.
+ Ports []PortSelector `json:"ports,omitempty"`
+}
+
+// PortSelector specifies the name or number of a port to be used for
+// matching targets for authenticationn policy. This is copied from
+// networking API to avoid dependency.
+type PortSelector struct {
+ // It is required to specify exactly one of the fields:
+ // Number or Name
+
+ // Valid port number
+ Number uint32 `json:"number,omitempty"`
+
+ // Port name
+ Name string `json:"name,omitempty"`
+}
+
+// PeerAuthenticationMethod defines one particular type of authentication, e.g
+// mutual TLS, JWT etc, (no authentication is one type by itself) that can
+// be used for peer authentication.
+// The type can be progammatically determine by checking the type of the
+// "params" field.
+type PeerAuthenticationMethod struct {
+ // It is required to specify exactly one of the fields:
+ // Mtls or Jwt
+ // Set if mTLS is used.
+ Mtls *MutualTLS `json:"mtls,omitempty"`
+
+ // Set if JWT is used. This option is not yet available.
+ Jwt *Jwt `json:"jwt,omitempty"`
+}
+
+// Defines the acceptable connection TLS mode.
+type Mode string
+
+const (
+ // Client cert must be presented, connection is in TLS.
+ ModeStrict Mode = "STRICT"
+
+ // Connection can be either plaintext or TLS, and client cert can be omitted.
+ ModePermissive Mode = "PERMISSIVE"
+)
+
+// TLS authentication params.
+type MutualTLS struct {
+
+ // WILL BE DEPRECATED, if set, will translates to `TLS_PERMISSIVE` mode.
+ // Set this flag to true to allow regular TLS (i.e without client x509
+ // certificate). If request carries client certificate, identity will be
+ // extracted and used (set to peer identity). Otherwise, peer identity will
+ // be left unset.
+ // When the flag is false (default), request must have client certificate.
+ AllowTLS bool `json:"allowTls,omitempty"`
+
+ // Defines the mode of mTLS authentication.
+ Mode Mode `json:"mode,omitempty"`
+}
+
+// JSON Web Token (JWT) token format for authentication as defined by
+// https://tools.ietf.org/html/rfc7519. See [OAuth
+// 2.0](https://tools.ietf.org/html/rfc6749) and [OIDC
+// 1.0](http://openid.net/connect) for how this is used in the whole
+// authentication flow.
+//
+// Example,
+//
+// issuer: https://example.com
+// audiences:
+// - bookstore_android.apps.googleusercontent.com
+// bookstore_web.apps.googleusercontent.com
+// jwksUri: https://example.com/.well-known/jwks.json
+//
+type Jwt struct {
+ // Identifies the issuer that issued the JWT. See
+ // [issuer](https://tools.ietf.org/html/rfc7519#section-4.1.1)
+ // Usually a URL or an email address.
+ //
+ // Example: https://securetoken.google.com
+ // Example: 1234567-compute@developer.gserviceaccount.com
+ Issuer string `json:"issuer,omitempty"`
+
+ // The list of JWT
+ // [audiences](https://tools.ietf.org/html/rfc7519#section-4.1.3).
+ // that are allowed to access. A JWT containing any of these
+ // audiences will be accepted.
+ //
+ // The service name will be accepted if audiences is empty.
+ //
+ // Example:
+ //
+ // ```yaml
+ // audiences:
+ // - bookstore_android.apps.googleusercontent.com
+ // bookstore_web.apps.googleusercontent.com
+ // ```
+ Audiences []string `json:"audiences,omitempty"`
+
+ // URL of the provider's public key set to validate signature of the
+ // JWT. See [OpenID
+ // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata).
+ //
+ // Optional if the key set document can either (a) be retrieved from
+ // [OpenID
+ // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html) of
+ // the issuer or (b) inferred from the email domain of the issuer (e.g. a
+ // Google service account).
+ //
+ // Example: https://www.googleapis.com/oauth2/v1/certs
+ JwksURI string `json:"jwksUri,omitempty"`
+
+ // Two fields below define where to extract the JWT from an HTTP request.
+ //
+ // If no explicit location is specified the following default
+ // locations are tried in order:
+ //
+ // 1) The Authorization header using the Bearer schema,
+ // e.g. Authorization: Bearer . (see
+ // [Authorization Request Header
+ // Field](https://tools.ietf.org/html/rfc6750#section-2.1))
+ //
+ // 2) `access_token` query parameter (see
+ // [URI Query Parameter](https://tools.ietf.org/html/rfc6750#section-2.3))
+ // JWT is sent in a request header. `header` represents the
+ // header name.
+ //
+ // For example, if `header=x-goog-iap-jwt-assertion`, the header
+ // format will be x-goog-iap-jwt-assertion: .
+ JwtHeaders []string `json:"jwtHeaders,omitempty"`
+
+ // JWT is sent in a query parameter. `query` represents the
+ // query parameter name.
+ //
+ // For example, `query=jwt_token`.
+ JwtParams []string `json:"jwtParams,omitempty"`
+
+ // URL paths that should be excluded from the JWT validation. If the request path is matched,
+ // the JWT validation will be skipped and the request will proceed regardless.
+ // This is useful to keep a couple of URLs public for external health checks.
+ // Example: "/health_check", "/status/cpu_usage".
+ ExcludedPaths []v1alpha1.StringMatch `json:"excludedPaths,omitempty"`
+}
+
+// OriginAuthenticationMethod defines authentication method/params for origin
+// authentication. Origin could be end-user, device, delegate service etc.
+// Currently, only JWT is supported for origin authentication.
+type OriginAuthenticationMethod struct {
+ // Jwt params for the method.
+ Jwt *Jwt `json:"jwt,omitempty"`
+}
+
+// Associates authentication with request principal.
+type PrincipalBinding string
+
+const (
+ // Principal will be set to the identity from peer authentication.
+ PrincipalBindingUserPeer PrincipalBinding = "USE_PEER"
+ // Principal will be set to the identity from peer authentication.
+ PrincipalBindingUserOrigin PrincipalBinding = "USE_ORIGIN"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// PolicyLIst is a list of Policy resources
+type PolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+ Items []Policy `json:"items"`
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/register.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go
similarity index 74%
rename from vendor/github.com/knative/build/pkg/apis/build/v1alpha1/register.go
rename to vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go
index c2b2c6512c7..7809d1cd970 100644
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/register.go
+++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -17,15 +17,14 @@ limitations under the License.
package v1alpha1
import (
+ "github.com/knative/pkg/apis/istio/authentication"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
-
- "github.com/knative/build/pkg/apis/build"
)
// SchemeGroupVersion is group version used to register these objects
-var SchemeGroupVersion = schema.GroupVersion{Group: build.GroupName, Version: "v1alpha1"}
+var SchemeGroupVersion = schema.GroupVersion{Group: authentication.GroupName, Version: "v1alpha1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
@@ -38,21 +37,15 @@ func Resource(resource string) schema.GroupResource {
}
var (
- schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
-
- // AddToScheme adds Build types to the scheme.
- AddToScheme = schemeBuilder.AddToScheme
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
- &Build{},
- &BuildList{},
- &BuildTemplate{},
- &BuildTemplateList{},
- &ClusterBuildTemplate{},
- &ClusterBuildTemplateList{},
+ &Policy{},
+ &PolicyList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
diff --git a/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000000..20f900c9b6e
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,259 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ commonv1alpha1 "github.com/knative/pkg/apis/istio/common/v1alpha1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Jwt) DeepCopyInto(out *Jwt) {
+ *out = *in
+ if in.Audiences != nil {
+ in, out := &in.Audiences, &out.Audiences
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.JwtHeaders != nil {
+ in, out := &in.JwtHeaders, &out.JwtHeaders
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.JwtParams != nil {
+ in, out := &in.JwtParams, &out.JwtParams
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExcludedPaths != nil {
+ in, out := &in.ExcludedPaths, &out.ExcludedPaths
+ *out = make([]commonv1alpha1.StringMatch, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Jwt.
+func (in *Jwt) DeepCopy() *Jwt {
+ if in == nil {
+ return nil
+ }
+ out := new(Jwt)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MutualTLS) DeepCopyInto(out *MutualTLS) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualTLS.
+func (in *MutualTLS) DeepCopy() *MutualTLS {
+ if in == nil {
+ return nil
+ }
+ out := new(MutualTLS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OriginAuthenticationMethod) DeepCopyInto(out *OriginAuthenticationMethod) {
+ *out = *in
+ if in.Jwt != nil {
+ in, out := &in.Jwt, &out.Jwt
+ *out = new(Jwt)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginAuthenticationMethod.
+func (in *OriginAuthenticationMethod) DeepCopy() *OriginAuthenticationMethod {
+ if in == nil {
+ return nil
+ }
+ out := new(OriginAuthenticationMethod)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PeerAuthenticationMethod) DeepCopyInto(out *PeerAuthenticationMethod) {
+ *out = *in
+ if in.Mtls != nil {
+ in, out := &in.Mtls, &out.Mtls
+ *out = new(MutualTLS)
+ **out = **in
+ }
+ if in.Jwt != nil {
+ in, out := &in.Jwt, &out.Jwt
+ *out = new(Jwt)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerAuthenticationMethod.
+func (in *PeerAuthenticationMethod) DeepCopy() *PeerAuthenticationMethod {
+ if in == nil {
+ return nil
+ }
+ out := new(PeerAuthenticationMethod)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Policy) DeepCopyInto(out *Policy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
+func (in *Policy) DeepCopy() *Policy {
+ if in == nil {
+ return nil
+ }
+ out := new(Policy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Policy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyList) DeepCopyInto(out *PolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Policy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList.
+func (in *PolicyList) DeepCopy() *PolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicySpec) DeepCopyInto(out *PolicySpec) {
+ *out = *in
+ if in.Targets != nil {
+ in, out := &in.Targets, &out.Targets
+ *out = make([]TargetSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Peers != nil {
+ in, out := &in.Peers, &out.Peers
+ *out = make([]PeerAuthenticationMethod, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Origins != nil {
+ in, out := &in.Origins, &out.Origins
+ *out = make([]OriginAuthenticationMethod, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec.
+func (in *PolicySpec) DeepCopy() *PolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortSelector) DeepCopyInto(out *PortSelector) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSelector.
+func (in *PortSelector) DeepCopy() *PortSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(PortSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TargetSelector) DeepCopyInto(out *TargetSelector) {
+ *out = *in
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]PortSelector, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSelector.
+func (in *TargetSelector) DeepCopy() *TargetSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(TargetSelector)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go
index 2a557270d9e..5b2c708b673 100644
--- a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go
+++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go
@@ -156,7 +156,7 @@ type HTTPRoute struct {
// forwarding target can be one of several versions of a service (see
// glossary in beginning of document). Weights associated with the
// service version determine the proportion of traffic it receives.
- Route []DestinationWeight `json:"route,omitempty"`
+ Route []HTTPRouteDestination `json:"route,omitempty"`
// A http rule can either redirect or forward (default) traffic. If
// traffic passthrough option is specified in the rule,
@@ -196,7 +196,10 @@ type HTTPRoute struct {
// Additional HTTP headers to add before forwarding a request to the
// destination service.
- AppendHeaders map[string]string `json:"appendHeaders,omitempty"`
+ DeprecatedAppendHeaders map[string]string `json:"appendHeaders,omitempty"`
+
+ // Header manipulation rules
+ Headers *Headers `json:"headers,omitempty"`
// Http headers to remove before returning the response to the caller
RemoveResponseHeaders map[string]string `json:"removeResponseHeaders,omitempty"`
@@ -205,6 +208,30 @@ type HTTPRoute struct {
CorsPolicy *CorsPolicy `json:"corsPolicy,omitempty"`
}
+// Headers describes header manipulation rules.
+type Headers struct {
+ // Header manipulation rules to apply before forwarding a request
+ // to the destination service
+ Request *HeaderOperations `json:"request,omitempty"`
+
+ // Header manipulation rules to apply before returning a response
+ // to the caller
+ Response *HeaderOperations `json:"response,omitempty"`
+}
+
+// HeaderOperations Describes the header manipulations to apply
+type HeaderOperations struct {
+ // Overwrite the headers specified by key with the given values
+ Set map[string]string `json:"set,omitempty"`
+
+ // Append the given values to the headers specified by keys
+ // (will create a comma-separated list of values)
+ Add map[string]string `json:"add,omitempty"`
+
+ // Remove a the specified headers
+ Remove []string `json:"remove,omitempty"`
+}
+
// HttpMatchRequest specifies a set of criterion to be met in order for the
// rule to be applied to the HTTP request. For example, the following
// restricts the rule to match only requests where the URL path
@@ -306,7 +333,7 @@ type HTTPMatchRequest struct {
Gateways []string `json:"gateways,omitempty"`
}
-type DestinationWeight struct {
+type HTTPRouteDestination struct {
// REQUIRED. Destination uniquely identifies the instances of a service
// to which the request/connection should be forwarded to.
Destination Destination `json:"destination"`
@@ -316,6 +343,9 @@ type DestinationWeight struct {
// If there is only destination in a rule, the weight value is assumed to
// be 100.
Weight int `json:"weight"`
+
+ // Header manipulation rules
+ Headers *Headers `json:"headers,omitempty"`
}
// Destination indicates the network addressable service to which the
@@ -493,7 +523,7 @@ type TCPRoute struct {
// The destinations to which the connection should be forwarded to. Weights
// must add to 100%.
- Route []DestinationWeight `json:"route"`
+ Route []HTTPRouteDestination `json:"route"`
}
// Describes match conditions and actions for routing unterminated TLS
@@ -534,7 +564,7 @@ type TLSRoute struct {
Match []TLSMatchAttributes `json:"match"`
// The destination to which the connection should be forwarded to.
- Route []DestinationWeight `json:"route"`
+ Route []HTTPRouteDestination `json:"route"`
}
// L4 connection match attributes. Note that L4 connection matching support
@@ -625,7 +655,7 @@ type HTTPRedirect struct {
// HTTPRewrite can be used to rewrite specific parts of a HTTP request
// before forwarding the request to the destination. Rewrite primitive can
-// be used only with the DestinationWeights. The following example
+// be used only with the HTTPRouteDestinations. The following example
// demonstrates how to rewrite the URL prefix for api call (/ratings) to
// ratings service before making the actual API call.
//
@@ -835,7 +865,7 @@ type InjectDelay struct {
// not specified, all requests are aborted.
type InjectAbort struct {
// Percentage of requests to be aborted with the error code provided (0-100).
- Perecent int `json:"percent,omitempty"`
+ Percent int `json:"percent,omitempty"`
// REQUIRED. HTTP status code to use to abort the Http request.
HTTPStatus int `json:"httpStatus"`
diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go
index 4f19b654527..67cf72128f5 100644
--- a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go
+++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go
@@ -213,23 +213,6 @@ func (in *DestinationRuleSpec) DeepCopy() *DestinationRuleSpec {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DestinationWeight) DeepCopyInto(out *DestinationWeight) {
- *out = *in
- out.Destination = in.Destination
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationWeight.
-func (in *DestinationWeight) DeepCopy() *DestinationWeight {
- if in == nil {
- return nil
- }
- out := new(DestinationWeight)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Gateway) DeepCopyInto(out *Gateway) {
*out = *in
@@ -477,8 +460,10 @@ func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) {
}
if in.Route != nil {
in, out := &in.Route, &out.Route
- *out = make([]DestinationWeight, len(*in))
- copy(*out, *in)
+ *out = make([]HTTPRouteDestination, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
}
if in.Redirect != nil {
in, out := &in.Redirect, &out.Redirect
@@ -505,13 +490,18 @@ func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) {
*out = new(Destination)
**out = **in
}
- if in.AppendHeaders != nil {
- in, out := &in.AppendHeaders, &out.AppendHeaders
+ if in.DeprecatedAppendHeaders != nil {
+ in, out := &in.DeprecatedAppendHeaders, &out.DeprecatedAppendHeaders
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
+ if in.Headers != nil {
+ in, out := &in.Headers, &out.Headers
+ *out = new(Headers)
+ (*in).DeepCopyInto(*out)
+ }
if in.RemoveResponseHeaders != nil {
in, out := &in.RemoveResponseHeaders, &out.RemoveResponseHeaders
*out = make(map[string]string, len(*in))
@@ -537,6 +527,28 @@ func (in *HTTPRoute) DeepCopy() *HTTPRoute {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPRouteDestination) DeepCopyInto(out *HTTPRouteDestination) {
+ *out = *in
+ out.Destination = in.Destination
+ if in.Headers != nil {
+ in, out := &in.Headers, &out.Headers
+ *out = new(Headers)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteDestination.
+func (in *HTTPRouteDestination) DeepCopy() *HTTPRouteDestination {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTPRouteDestination)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPSettings) DeepCopyInto(out *HTTPSettings) {
*out = *in
@@ -553,6 +565,67 @@ func (in *HTTPSettings) DeepCopy() *HTTPSettings {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HeaderOperations) DeepCopyInto(out *HeaderOperations) {
+ *out = *in
+ if in.Set != nil {
+ in, out := &in.Set, &out.Set
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Add != nil {
+ in, out := &in.Add, &out.Add
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Remove != nil {
+ in, out := &in.Remove, &out.Remove
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderOperations.
+func (in *HeaderOperations) DeepCopy() *HeaderOperations {
+ if in == nil {
+ return nil
+ }
+ out := new(HeaderOperations)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Headers) DeepCopyInto(out *Headers) {
+ *out = *in
+ if in.Request != nil {
+ in, out := &in.Request, &out.Request
+ *out = new(HeaderOperations)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Response != nil {
+ in, out := &in.Response, &out.Response
+ *out = new(HeaderOperations)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Headers.
+func (in *Headers) DeepCopy() *Headers {
+ if in == nil {
+ return nil
+ }
+ out := new(Headers)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InjectAbort) DeepCopyInto(out *InjectAbort) {
*out = *in
@@ -791,8 +864,10 @@ func (in *TCPRoute) DeepCopyInto(out *TCPRoute) {
}
if in.Route != nil {
in, out := &in.Route, &out.Route
- *out = make([]DestinationWeight, len(*in))
- copy(*out, *in)
+ *out = make([]HTTPRouteDestination, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
}
return
}
@@ -894,8 +969,10 @@ func (in *TLSRoute) DeepCopyInto(out *TLSRoute) {
}
if in.Route != nil {
in, out := &in.Route, &out.Route
- *out = make([]DestinationWeight, len(*in))
- copy(*out, *in)
+ *out = make([]HTTPRouteDestination, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
}
return
}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go
new file mode 100644
index 00000000000..5c00fdb6a88
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go
@@ -0,0 +1,120 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package versioned
+
+import (
+ authenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
+ networkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface
+ // Deprecated: please explicitly pick a version if possible.
+ Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface
+ NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface
+ // Deprecated: please explicitly pick a version if possible.
+ Networking() networkingv1alpha3.NetworkingV1alpha3Interface
+}
+
+// Clientset contains the clients for groups. Each group has exactly one
+// version included in a Clientset.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ authenticationV1alpha1 *authenticationv1alpha1.AuthenticationV1alpha1Client
+ networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client
+}
+
+// AuthenticationV1alpha1 retrieves the AuthenticationV1alpha1Client
+func (c *Clientset) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface {
+ return c.authenticationV1alpha1
+}
+
+// Deprecated: Authentication retrieves the default version of AuthenticationClient.
+// Please explicitly pick a version.
+func (c *Clientset) Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface {
+ return c.authenticationV1alpha1
+}
+
+// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client
+func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface {
+ return c.networkingV1alpha3
+}
+
+// Deprecated: Networking retrieves the default version of NetworkingClient.
+// Please explicitly pick a version.
+func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface {
+ return c.networkingV1alpha3
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+ var cs Clientset
+ var err error
+ cs.authenticationV1alpha1, err = authenticationv1alpha1.NewForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+ cs.networkingV1alpha3, err = networkingv1alpha3.NewForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ var cs Clientset
+ cs.authenticationV1alpha1 = authenticationv1alpha1.NewForConfigOrDie(c)
+ cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
+ return &cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.authenticationV1alpha1 = authenticationv1alpha1.New(c)
+ cs.networkingV1alpha3 = networkingv1alpha3.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/doc.go
new file mode 100644
index 00000000000..1122e50bfc3
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated clientset.
+package versioned
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/clientset_generated.go
new file mode 100644
index 00000000000..df8348fcf93
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/clientset_generated.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ clientset "github.com/knative/pkg/client/clientset/versioned"
+ authenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
+ fakeauthenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake"
+ networkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3"
+ fakenetworkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/discovery"
+ fakediscovery "k8s.io/client-go/discovery/fake"
+ "k8s.io/client-go/testing"
+)
+
+// NewSimpleClientset returns a clientset that will respond with the provided objects.
+// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
+// without applying any validations and/or defaults. It shouldn't be considered a replacement
+// for a real clientset and is mostly useful in simple unit tests.
+func NewSimpleClientset(objects ...runtime.Object) *Clientset {
+ o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
+ for _, obj := range objects {
+ if err := o.Add(obj); err != nil {
+ panic(err)
+ }
+ }
+
+ cs := &Clientset{}
+ cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
+ cs.AddReactor("*", "*", testing.ObjectReaction(o))
+ cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
+ gvr := action.GetResource()
+ ns := action.GetNamespace()
+ watch, err := o.Watch(gvr, ns)
+ if err != nil {
+ return false, nil, err
+ }
+ return true, watch, nil
+ })
+
+ return cs
+}
+
+// Clientset implements clientset.Interface. Meant to be embedded into a
+// struct to get a default implementation. This makes faking out just the method
+// you want to test easier.
+type Clientset struct {
+ testing.Fake
+ discovery *fakediscovery.FakeDiscovery
+}
+
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ return c.discovery
+}
+
+var _ clientset.Interface = &Clientset{}
+
+// AuthenticationV1alpha1 retrieves the AuthenticationV1alpha1Client
+func (c *Clientset) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface {
+ return &fakeauthenticationv1alpha1.FakeAuthenticationV1alpha1{Fake: &c.Fake}
+}
+
+// Authentication retrieves the AuthenticationV1alpha1Client
+func (c *Clientset) Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface {
+ return &fakeauthenticationv1alpha1.FakeAuthenticationV1alpha1{Fake: &c.Fake}
+}
+
+// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client
+func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface {
+ return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake}
+}
+
+// Networking retrieves the NetworkingV1alpha3Client
+func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface {
+ return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake}
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/fake/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/doc.go
new file mode 100644
index 00000000000..87f3c3e0b01
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated fake clientset.
+package fake
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/fake/register.go b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/register.go
new file mode 100644
index 00000000000..f00f2c9fe60
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/fake/register.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
+ networkingv1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var scheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(scheme)
+var parameterCodec = runtime.NewParameterCodec(scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ authenticationv1alpha1.AddToScheme,
+ networkingv1alpha3.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(scheme))
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go
new file mode 100644
index 00000000000..7d76538485b
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go
new file mode 100644
index 00000000000..cca6f278821
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
+ networkingv1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ authenticationv1alpha1.AddToScheme,
+ networkingv1alpha3.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go
new file mode 100644
index 00000000000..918f9cd7846
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
+ "github.com/knative/pkg/client/clientset/versioned/scheme"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ rest "k8s.io/client-go/rest"
+)
+
+type AuthenticationV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ PoliciesGetter
+}
+
+// AuthenticationV1alpha1Client is used to interact with features provided by the authentication.istio.io group.
+type AuthenticationV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *AuthenticationV1alpha1Client) Policies(namespace string) PolicyInterface {
+ return newPolicies(c, namespace)
+}
+
+// NewForConfig creates a new AuthenticationV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*AuthenticationV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &AuthenticationV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AuthenticationV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AuthenticationV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new AuthenticationV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *AuthenticationV1alpha1Client {
+ return &AuthenticationV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AuthenticationV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go
similarity index 78%
rename from vendor/github.com/knative/build/pkg/apis/build/v1alpha1/doc.go
rename to vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go
index df551590c00..a1c6bb9fe8f 100644
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/doc.go
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,8 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// +k8s:deepcopy-gen=package
+// Code generated by client-gen. DO NOT EDIT.
-// Package v1alpha1 is the v1alpha1 version of the API.
-// +groupName=build.knative.dev
+// This package has the automatically generated typed clients.
package v1alpha1
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/doc.go
new file mode 100644
index 00000000000..a00e5d7b21a
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_authentication_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_authentication_client.go
new file mode 100644
index 00000000000..f947ca535d7
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_authentication_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAuthenticationV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAuthenticationV1alpha1) Policies(namespace string) v1alpha1.PolicyInterface {
+ return &FakePolicies{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAuthenticationV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_policy.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_policy.go
new file mode 100644
index 00000000000..f5d54444e97
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/fake/fake_policy.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePolicies implements PolicyInterface
+type FakePolicies struct {
+ Fake *FakeAuthenticationV1alpha1
+ ns string
+}
+
+var policiesResource = schema.GroupVersionResource{Group: "authentication.istio.io", Version: "v1alpha1", Resource: "policies"}
+
+var policiesKind = schema.GroupVersionKind{Group: "authentication.istio.io", Version: "v1alpha1", Kind: "Policy"}
+
+// Get takes name of the policy, and returns the corresponding policy object, and an error if there is any.
+func (c *FakePolicies) Get(name string, options v1.GetOptions) (result *v1alpha1.Policy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(policiesResource, c.ns, name), &v1alpha1.Policy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Policy), err
+}
+
+// List takes label and field selectors, and returns the list of Policies that match those selectors.
+func (c *FakePolicies) List(opts v1.ListOptions) (result *v1alpha1.PolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(policiesResource, policiesKind, c.ns, opts), &v1alpha1.PolicyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.PolicyList{ListMeta: obj.(*v1alpha1.PolicyList).ListMeta}
+ for _, item := range obj.(*v1alpha1.PolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested policies.
+func (c *FakePolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(policiesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a policy and creates it. Returns the server's representation of the policy, and an error, if there is any.
+func (c *FakePolicies) Create(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(policiesResource, c.ns, policy), &v1alpha1.Policy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Policy), err
+}
+
+// Update takes the representation of a policy and updates it. Returns the server's representation of the policy, and an error, if there is any.
+func (c *FakePolicies) Update(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(policiesResource, c.ns, policy), &v1alpha1.Policy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Policy), err
+}
+
+// Delete takes name of the policy and deletes it. Returns an error if one occurs.
+func (c *FakePolicies) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(policiesResource, c.ns, name), &v1alpha1.Policy{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(policiesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.PolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched policy.
+func (c *FakePolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Policy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(policiesResource, c.ns, name, data, subresources...), &v1alpha1.Policy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Policy), err
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go
new file mode 100644
index 00000000000..25a1998a2a3
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type PolicyExpansion interface{}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go
new file mode 100644
index 00000000000..961aaf00859
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go
@@ -0,0 +1,157 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1"
+ scheme "github.com/knative/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// PoliciesGetter has a method to return a PolicyInterface.
+// A group's client should implement this interface.
+type PoliciesGetter interface {
+ Policies(namespace string) PolicyInterface
+}
+
+// PolicyInterface has methods to work with Policy resources.
+type PolicyInterface interface {
+ Create(*v1alpha1.Policy) (*v1alpha1.Policy, error)
+ Update(*v1alpha1.Policy) (*v1alpha1.Policy, error)
+ Delete(name string, options *v1.DeleteOptions) error
+ DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+ Get(name string, options v1.GetOptions) (*v1alpha1.Policy, error)
+ List(opts v1.ListOptions) (*v1alpha1.PolicyList, error)
+ Watch(opts v1.ListOptions) (watch.Interface, error)
+ Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Policy, err error)
+ PolicyExpansion
+}
+
+// policies implements PolicyInterface
+type policies struct {
+ client rest.Interface
+ ns string
+}
+
+// newPolicies returns a Policies
+func newPolicies(c *AuthenticationV1alpha1Client, namespace string) *policies {
+ return &policies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the policy, and returns the corresponding policy object, and an error if there is any.
+func (c *policies) Get(name string, options v1.GetOptions) (result *v1alpha1.Policy, err error) {
+ result = &v1alpha1.Policy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("policies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Policies that match those selectors.
+func (c *policies) List(opts v1.ListOptions) (result *v1alpha1.PolicyList, err error) {
+ result = &v1alpha1.PolicyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("policies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested policies.
+func (c *policies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("policies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Watch()
+}
+
+// Create takes the representation of a policy and creates it. Returns the server's representation of the policy, and an error, if there is any.
+func (c *policies) Create(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) {
+ result = &v1alpha1.Policy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("policies").
+ Body(policy).
+ Do().
+ Into(result)
+ return
+}
+
+// Update takes the representation of a policy and updates it. Returns the server's representation of the policy, and an error, if there is any.
+func (c *policies) Update(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) {
+ result = &v1alpha1.Policy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("policies").
+ Name(policy.Name).
+ Body(policy).
+ Do().
+ Into(result)
+ return
+}
+
+// Delete takes name of the policy and deletes it. Returns an error if one occurs.
+func (c *policies) Delete(name string, options *v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("policies").
+ Name(name).
+ Body(options).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *policies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("policies").
+ VersionedParams(&listOptions, scheme.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Patch applies the patch and returns the patched policy.
+func (c *policies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Policy, err error) {
+ result = &v1alpha1.Policy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("policies").
+ SubResource(subresources...).
+ Name(name).
+ Body(data).
+ Do().
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go
new file mode 100644
index 00000000000..242f213f3c3
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go
@@ -0,0 +1,157 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ scheme "github.com/knative/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// DestinationRulesGetter has a method to return a DestinationRuleInterface.
+// A group's client should implement this interface.
+type DestinationRulesGetter interface {
+ DestinationRules(namespace string) DestinationRuleInterface
+}
+
+// DestinationRuleInterface has methods to work with DestinationRule resources.
+type DestinationRuleInterface interface {
+ Create(*v1alpha3.DestinationRule) (*v1alpha3.DestinationRule, error)
+ Update(*v1alpha3.DestinationRule) (*v1alpha3.DestinationRule, error)
+ Delete(name string, options *v1.DeleteOptions) error
+ DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+ Get(name string, options v1.GetOptions) (*v1alpha3.DestinationRule, error)
+ List(opts v1.ListOptions) (*v1alpha3.DestinationRuleList, error)
+ Watch(opts v1.ListOptions) (watch.Interface, error)
+ Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error)
+ DestinationRuleExpansion
+}
+
+// destinationRules implements DestinationRuleInterface
+type destinationRules struct {
+ client rest.Interface
+ ns string
+}
+
+// newDestinationRules returns a DestinationRules
+func newDestinationRules(c *NetworkingV1alpha3Client, namespace string) *destinationRules {
+ return &destinationRules{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the destinationRule, and returns the corresponding destinationRule object, and an error if there is any.
+func (c *destinationRules) Get(name string, options v1.GetOptions) (result *v1alpha3.DestinationRule, err error) {
+ result = &v1alpha3.DestinationRule{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("destinationrules").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of DestinationRules that match those selectors.
+func (c *destinationRules) List(opts v1.ListOptions) (result *v1alpha3.DestinationRuleList, err error) {
+ result = &v1alpha3.DestinationRuleList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("destinationrules").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested destinationRules.
+func (c *destinationRules) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("destinationrules").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Watch()
+}
+
+// Create takes the representation of a destinationRule and creates it. Returns the server's representation of the destinationRule, and an error, if there is any.
+func (c *destinationRules) Create(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) {
+ result = &v1alpha3.DestinationRule{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("destinationrules").
+ Body(destinationRule).
+ Do().
+ Into(result)
+ return
+}
+
+// Update takes the representation of a destinationRule and updates it. Returns the server's representation of the destinationRule, and an error, if there is any.
+func (c *destinationRules) Update(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) {
+ result = &v1alpha3.DestinationRule{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("destinationrules").
+ Name(destinationRule.Name).
+ Body(destinationRule).
+ Do().
+ Into(result)
+ return
+}
+
+// Delete takes name of the destinationRule and deletes it. Returns an error if one occurs.
+func (c *destinationRules) Delete(name string, options *v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("destinationrules").
+ Name(name).
+ Body(options).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *destinationRules) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("destinationrules").
+ VersionedParams(&listOptions, scheme.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Patch applies the patch and returns the patched destinationRule.
+func (c *destinationRules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) {
+ result = &v1alpha3.DestinationRule{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("destinationrules").
+ SubResource(subresources...).
+ Name(name).
+ Body(data).
+ Do().
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go
new file mode 100644
index 00000000000..6046d1467fb
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha3
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/doc.go
new file mode 100644
index 00000000000..a00e5d7b21a
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_destinationrule.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_destinationrule.go
new file mode 100644
index 00000000000..e493908eb9e
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_destinationrule.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDestinationRules implements DestinationRuleInterface
+type FakeDestinationRules struct {
+ Fake *FakeNetworkingV1alpha3
+ ns string
+}
+
+var destinationrulesResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "destinationrules"}
+
+var destinationrulesKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "DestinationRule"}
+
+// Get takes name of the destinationRule, and returns the corresponding destinationRule object, and an error if there is any.
+func (c *FakeDestinationRules) Get(name string, options v1.GetOptions) (result *v1alpha3.DestinationRule, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(destinationrulesResource, c.ns, name), &v1alpha3.DestinationRule{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.DestinationRule), err
+}
+
+// List takes label and field selectors, and returns the list of DestinationRules that match those selectors.
+func (c *FakeDestinationRules) List(opts v1.ListOptions) (result *v1alpha3.DestinationRuleList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(destinationrulesResource, destinationrulesKind, c.ns, opts), &v1alpha3.DestinationRuleList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha3.DestinationRuleList{ListMeta: obj.(*v1alpha3.DestinationRuleList).ListMeta}
+ for _, item := range obj.(*v1alpha3.DestinationRuleList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested destinationRules.
+func (c *FakeDestinationRules) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(destinationrulesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a destinationRule and creates it. Returns the server's representation of the destinationRule, and an error, if there is any.
+func (c *FakeDestinationRules) Create(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(destinationrulesResource, c.ns, destinationRule), &v1alpha3.DestinationRule{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.DestinationRule), err
+}
+
+// Update takes the representation of a destinationRule and updates it. Returns the server's representation of the destinationRule, and an error, if there is any.
+func (c *FakeDestinationRules) Update(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(destinationrulesResource, c.ns, destinationRule), &v1alpha3.DestinationRule{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.DestinationRule), err
+}
+
+// Delete takes name of the destinationRule and deletes it. Returns an error if one occurs.
+func (c *FakeDestinationRules) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(destinationrulesResource, c.ns, name), &v1alpha3.DestinationRule{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDestinationRules) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(destinationrulesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha3.DestinationRuleList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched destinationRule.
+func (c *FakeDestinationRules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(destinationrulesResource, c.ns, name, data, subresources...), &v1alpha3.DestinationRule{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.DestinationRule), err
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_gateway.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_gateway.go
new file mode 100644
index 00000000000..2494149480b
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_gateway.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeGateways implements GatewayInterface
+type FakeGateways struct {
+ Fake *FakeNetworkingV1alpha3
+ ns string
+}
+
+var gatewaysResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "gateways"}
+
+var gatewaysKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "Gateway"}
+
+// Get takes name of the gateway, and returns the corresponding gateway object, and an error if there is any.
+func (c *FakeGateways) Get(name string, options v1.GetOptions) (result *v1alpha3.Gateway, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(gatewaysResource, c.ns, name), &v1alpha3.Gateway{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.Gateway), err
+}
+
+// List takes label and field selectors, and returns the list of Gateways that match those selectors.
+func (c *FakeGateways) List(opts v1.ListOptions) (result *v1alpha3.GatewayList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(gatewaysResource, gatewaysKind, c.ns, opts), &v1alpha3.GatewayList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha3.GatewayList{ListMeta: obj.(*v1alpha3.GatewayList).ListMeta}
+ for _, item := range obj.(*v1alpha3.GatewayList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested gateways.
+func (c *FakeGateways) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(gatewaysResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a gateway and creates it. Returns the server's representation of the gateway, and an error, if there is any.
+func (c *FakeGateways) Create(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(gatewaysResource, c.ns, gateway), &v1alpha3.Gateway{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.Gateway), err
+}
+
+// Update takes the representation of a gateway and updates it. Returns the server's representation of the gateway, and an error, if there is any.
+func (c *FakeGateways) Update(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(gatewaysResource, c.ns, gateway), &v1alpha3.Gateway{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.Gateway), err
+}
+
+// Delete takes name of the gateway and deletes it. Returns an error if one occurs.
+func (c *FakeGateways) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(gatewaysResource, c.ns, name), &v1alpha3.Gateway{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeGateways) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(gatewaysResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha3.GatewayList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched gateway.
+func (c *FakeGateways) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(gatewaysResource, c.ns, name, data, subresources...), &v1alpha3.Gateway{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.Gateway), err
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_istio_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_istio_client.go
new file mode 100644
index 00000000000..fddb1d4757a
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_istio_client.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeNetworkingV1alpha3 struct {
+ *testing.Fake
+}
+
+func (c *FakeNetworkingV1alpha3) DestinationRules(namespace string) v1alpha3.DestinationRuleInterface {
+ return &FakeDestinationRules{c, namespace}
+}
+
+func (c *FakeNetworkingV1alpha3) Gateways(namespace string) v1alpha3.GatewayInterface {
+ return &FakeGateways{c, namespace}
+}
+
+func (c *FakeNetworkingV1alpha3) VirtualServices(namespace string) v1alpha3.VirtualServiceInterface {
+ return &FakeVirtualServices{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeNetworkingV1alpha3) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_virtualservice.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_virtualservice.go
new file mode 100644
index 00000000000..98b25259ab7
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake/fake_virtualservice.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeVirtualServices implements VirtualServiceInterface
+type FakeVirtualServices struct {
+ Fake *FakeNetworkingV1alpha3
+ ns string
+}
+
+var virtualservicesResource = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "virtualservices"}
+
+var virtualservicesKind = schema.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "VirtualService"}
+
+// Get takes name of the virtualService, and returns the corresponding virtualService object, and an error if there is any.
+func (c *FakeVirtualServices) Get(name string, options v1.GetOptions) (result *v1alpha3.VirtualService, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(virtualservicesResource, c.ns, name), &v1alpha3.VirtualService{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.VirtualService), err
+}
+
+// List takes label and field selectors, and returns the list of VirtualServices that match those selectors.
+func (c *FakeVirtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualServiceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(virtualservicesResource, virtualservicesKind, c.ns, opts), &v1alpha3.VirtualServiceList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha3.VirtualServiceList{ListMeta: obj.(*v1alpha3.VirtualServiceList).ListMeta}
+ for _, item := range obj.(*v1alpha3.VirtualServiceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested virtualServices.
+func (c *FakeVirtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(virtualservicesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a virtualService and creates it. Returns the server's representation of the virtualService, and an error, if there is any.
+func (c *FakeVirtualServices) Create(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(virtualservicesResource, c.ns, virtualService), &v1alpha3.VirtualService{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.VirtualService), err
+}
+
+// Update takes the representation of a virtualService and updates it. Returns the server's representation of the virtualService, and an error, if there is any.
+func (c *FakeVirtualServices) Update(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(virtualservicesResource, c.ns, virtualService), &v1alpha3.VirtualService{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.VirtualService), err
+}
+
+// Delete takes name of the virtualService and deletes it. Returns an error if one occurs.
+func (c *FakeVirtualServices) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(virtualservicesResource, c.ns, name), &v1alpha3.VirtualService{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeVirtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(virtualservicesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha3.VirtualServiceList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched virtualService.
+func (c *FakeVirtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(virtualservicesResource, c.ns, name, data, subresources...), &v1alpha3.VirtualService{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha3.VirtualService), err
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go
new file mode 100644
index 00000000000..151bfef4c0e
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go
@@ -0,0 +1,157 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ scheme "github.com/knative/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// GatewaysGetter has a method to return a GatewayInterface.
+// A group's client should implement this interface.
+type GatewaysGetter interface {
+ Gateways(namespace string) GatewayInterface
+}
+
+// GatewayInterface has methods to work with Gateway resources.
+type GatewayInterface interface {
+ Create(*v1alpha3.Gateway) (*v1alpha3.Gateway, error)
+ Update(*v1alpha3.Gateway) (*v1alpha3.Gateway, error)
+ Delete(name string, options *v1.DeleteOptions) error
+ DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+ Get(name string, options v1.GetOptions) (*v1alpha3.Gateway, error)
+ List(opts v1.ListOptions) (*v1alpha3.GatewayList, error)
+ Watch(opts v1.ListOptions) (watch.Interface, error)
+ Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error)
+ GatewayExpansion
+}
+
+// gateways implements GatewayInterface
+type gateways struct {
+ client rest.Interface
+ ns string
+}
+
+// newGateways returns a Gateways
+func newGateways(c *NetworkingV1alpha3Client, namespace string) *gateways {
+ return &gateways{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the gateway, and returns the corresponding gateway object, and an error if there is any.
+func (c *gateways) Get(name string, options v1.GetOptions) (result *v1alpha3.Gateway, err error) {
+ result = &v1alpha3.Gateway{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("gateways").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Gateways that match those selectors.
+func (c *gateways) List(opts v1.ListOptions) (result *v1alpha3.GatewayList, err error) {
+ result = &v1alpha3.GatewayList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("gateways").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested gateways.
+func (c *gateways) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("gateways").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Watch()
+}
+
+// Create takes the representation of a gateway and creates it. Returns the server's representation of the gateway, and an error, if there is any.
+func (c *gateways) Create(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) {
+ result = &v1alpha3.Gateway{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("gateways").
+ Body(gateway).
+ Do().
+ Into(result)
+ return
+}
+
+// Update takes the representation of a gateway and updates it. Returns the server's representation of the gateway, and an error, if there is any.
+func (c *gateways) Update(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) {
+ result = &v1alpha3.Gateway{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("gateways").
+ Name(gateway.Name).
+ Body(gateway).
+ Do().
+ Into(result)
+ return
+}
+
+// Delete takes name of the gateway and deletes it. Returns an error if one occurs.
+func (c *gateways) Delete(name string, options *v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("gateways").
+ Name(name).
+ Body(options).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *gateways) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("gateways").
+ VersionedParams(&listOptions, scheme.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Patch applies the patch and returns the patched gateway.
+func (c *gateways) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) {
+ result = &v1alpha3.Gateway{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("gateways").
+ SubResource(subresources...).
+ Name(name).
+ Body(data).
+ Do().
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go
new file mode 100644
index 00000000000..05e76516855
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha3
+
+type DestinationRuleExpansion interface{}
+
+type GatewayExpansion interface{}
+
+type VirtualServiceExpansion interface{}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go
new file mode 100644
index 00000000000..33bda021c3e
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ "github.com/knative/pkg/client/clientset/versioned/scheme"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ rest "k8s.io/client-go/rest"
+)
+
+type NetworkingV1alpha3Interface interface {
+ RESTClient() rest.Interface
+ DestinationRulesGetter
+ GatewaysGetter
+ VirtualServicesGetter
+}
+
+// NetworkingV1alpha3Client is used to interact with features provided by the networking.istio.io group.
+type NetworkingV1alpha3Client struct {
+ restClient rest.Interface
+}
+
+func (c *NetworkingV1alpha3Client) DestinationRules(namespace string) DestinationRuleInterface {
+ return newDestinationRules(c, namespace)
+}
+
+func (c *NetworkingV1alpha3Client) Gateways(namespace string) GatewayInterface {
+ return newGateways(c, namespace)
+}
+
+func (c *NetworkingV1alpha3Client) VirtualServices(namespace string) VirtualServiceInterface {
+ return newVirtualServices(c, namespace)
+}
+
+// NewForConfig creates a new NetworkingV1alpha3Client for the given config.
+func NewForConfig(c *rest.Config) (*NetworkingV1alpha3Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &NetworkingV1alpha3Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new NetworkingV1alpha3Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *NetworkingV1alpha3Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new NetworkingV1alpha3Client for the given RESTClient.
+func New(c rest.Interface) *NetworkingV1alpha3Client {
+ return &NetworkingV1alpha3Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha3.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *NetworkingV1alpha3Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go
new file mode 100644
index 00000000000..11bbb624241
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go
@@ -0,0 +1,157 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ scheme "github.com/knative/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// VirtualServicesGetter has a method to return a VirtualServiceInterface.
+// A group's client should implement this interface.
+type VirtualServicesGetter interface {
+ VirtualServices(namespace string) VirtualServiceInterface
+}
+
+// VirtualServiceInterface has methods to work with VirtualService resources.
+type VirtualServiceInterface interface {
+ Create(*v1alpha3.VirtualService) (*v1alpha3.VirtualService, error)
+ Update(*v1alpha3.VirtualService) (*v1alpha3.VirtualService, error)
+ Delete(name string, options *v1.DeleteOptions) error
+ DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+ Get(name string, options v1.GetOptions) (*v1alpha3.VirtualService, error)
+ List(opts v1.ListOptions) (*v1alpha3.VirtualServiceList, error)
+ Watch(opts v1.ListOptions) (watch.Interface, error)
+ Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error)
+ VirtualServiceExpansion
+}
+
+// virtualServices implements VirtualServiceInterface
+type virtualServices struct {
+ client rest.Interface
+ ns string
+}
+
+// newVirtualServices returns a VirtualServices
+func newVirtualServices(c *NetworkingV1alpha3Client, namespace string) *virtualServices {
+ return &virtualServices{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the virtualService, and returns the corresponding virtualService object, and an error if there is any.
+func (c *virtualServices) Get(name string, options v1.GetOptions) (result *v1alpha3.VirtualService, err error) {
+ result = &v1alpha3.VirtualService{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("virtualservices").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of VirtualServices that match those selectors.
+func (c *virtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualServiceList, err error) {
+ result = &v1alpha3.VirtualServiceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("virtualservices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested virtualServices.
+func (c *virtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("virtualservices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Watch()
+}
+
+// Create takes the representation of a virtualService and creates it. Returns the server's representation of the virtualService, and an error, if there is any.
+func (c *virtualServices) Create(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) {
+ result = &v1alpha3.VirtualService{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("virtualservices").
+ Body(virtualService).
+ Do().
+ Into(result)
+ return
+}
+
+// Update takes the representation of a virtualService and updates it. Returns the server's representation of the virtualService, and an error, if there is any.
+func (c *virtualServices) Update(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) {
+ result = &v1alpha3.VirtualService{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("virtualservices").
+ Name(virtualService.Name).
+ Body(virtualService).
+ Do().
+ Into(result)
+ return
+}
+
+// Delete takes name of the virtualService and deletes it. Returns an error if one occurs.
+func (c *virtualServices) Delete(name string, options *v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("virtualservices").
+ Name(name).
+ Body(options).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *virtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("virtualservices").
+ VersionedParams(&listOptions, scheme.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Patch applies the patch and returns the patched virtualService.
+func (c *virtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) {
+ result = &v1alpha3.VirtualService{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("virtualservices").
+ SubResource(subresources...).
+ Name(name).
+ Body(data).
+ Do().
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go
new file mode 100644
index 00000000000..ec1ff75565a
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// DestinationRuleLister helps list DestinationRules.
+type DestinationRuleLister interface {
+ // List lists all DestinationRules in the indexer.
+ List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error)
+ // DestinationRules returns an object that can list and get DestinationRules.
+ DestinationRules(namespace string) DestinationRuleNamespaceLister
+ DestinationRuleListerExpansion
+}
+
+// destinationRuleLister implements the DestinationRuleLister interface.
+type destinationRuleLister struct {
+ indexer cache.Indexer
+}
+
+// NewDestinationRuleLister returns a new DestinationRuleLister.
+func NewDestinationRuleLister(indexer cache.Indexer) DestinationRuleLister {
+ return &destinationRuleLister{indexer: indexer}
+}
+
+// List lists all DestinationRules in the indexer.
+func (s *destinationRuleLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha3.DestinationRule))
+ })
+ return ret, err
+}
+
+// DestinationRules returns an object that can list and get DestinationRules.
+func (s *destinationRuleLister) DestinationRules(namespace string) DestinationRuleNamespaceLister {
+ return destinationRuleNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// DestinationRuleNamespaceLister helps list and get DestinationRules.
+type DestinationRuleNamespaceLister interface {
+ // List lists all DestinationRules in the indexer for a given namespace.
+ List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error)
+ // Get retrieves the DestinationRule from the indexer for a given namespace and name.
+ Get(name string) (*v1alpha3.DestinationRule, error)
+ DestinationRuleNamespaceListerExpansion
+}
+
+// destinationRuleNamespaceLister implements the DestinationRuleNamespaceLister
+// interface.
+type destinationRuleNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all DestinationRules in the indexer for a given namespace.
+func (s destinationRuleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha3.DestinationRule))
+ })
+ return ret, err
+}
+
+// Get retrieves the DestinationRule from the indexer for a given namespace and name.
+func (s destinationRuleNamespaceLister) Get(name string) (*v1alpha3.DestinationRule, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha3.Resource("destinationrule"), name)
+ }
+ return obj.(*v1alpha3.DestinationRule), nil
+}
diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go
new file mode 100644
index 00000000000..f3e2ec937f7
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha3
+
+// DestinationRuleListerExpansion allows custom methods to be added to
+// DestinationRuleLister.
+type DestinationRuleListerExpansion interface{}
+
+// DestinationRuleNamespaceListerExpansion allows custom methods to be added to
+// DestinationRuleNamespaceLister.
+type DestinationRuleNamespaceListerExpansion interface{}
+
+// GatewayListerExpansion allows custom methods to be added to
+// GatewayLister.
+type GatewayListerExpansion interface{}
+
+// GatewayNamespaceListerExpansion allows custom methods to be added to
+// GatewayNamespaceLister.
+type GatewayNamespaceListerExpansion interface{}
+
+// VirtualServiceListerExpansion allows custom methods to be added to
+// VirtualServiceLister.
+type VirtualServiceListerExpansion interface{}
+
+// VirtualServiceNamespaceListerExpansion allows custom methods to be added to
+// VirtualServiceNamespaceLister.
+type VirtualServiceNamespaceListerExpansion interface{}
diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go
new file mode 100644
index 00000000000..62a78893a63
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// GatewayLister helps list Gateways.
+type GatewayLister interface {
+ // List lists all Gateways in the indexer.
+ List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error)
+ // Gateways returns an object that can list and get Gateways.
+ Gateways(namespace string) GatewayNamespaceLister
+ GatewayListerExpansion
+}
+
+// gatewayLister implements the GatewayLister interface.
+type gatewayLister struct {
+ indexer cache.Indexer
+}
+
+// NewGatewayLister returns a new GatewayLister.
+func NewGatewayLister(indexer cache.Indexer) GatewayLister {
+ return &gatewayLister{indexer: indexer}
+}
+
+// List lists all Gateways in the indexer.
+func (s *gatewayLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha3.Gateway))
+ })
+ return ret, err
+}
+
+// Gateways returns an object that can list and get Gateways.
+func (s *gatewayLister) Gateways(namespace string) GatewayNamespaceLister {
+ return gatewayNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// GatewayNamespaceLister helps list and get Gateways.
+type GatewayNamespaceLister interface {
+ // List lists all Gateways in the indexer for a given namespace.
+ List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error)
+ // Get retrieves the Gateway from the indexer for a given namespace and name.
+ Get(name string) (*v1alpha3.Gateway, error)
+ GatewayNamespaceListerExpansion
+}
+
+// gatewayNamespaceLister implements the GatewayNamespaceLister
+// interface.
+type gatewayNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Gateways in the indexer for a given namespace.
+func (s gatewayNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha3.Gateway))
+ })
+ return ret, err
+}
+
+// Get retrieves the Gateway from the indexer for a given namespace and name.
+func (s gatewayNamespaceLister) Get(name string) (*v1alpha3.Gateway, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha3.Resource("gateway"), name)
+ }
+ return obj.(*v1alpha3.Gateway), nil
+}
diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go
new file mode 100644
index 00000000000..3284cda81bd
--- /dev/null
+++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// VirtualServiceLister helps list VirtualServices.
+type VirtualServiceLister interface {
+ // List lists all VirtualServices in the indexer.
+ List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error)
+ // VirtualServices returns an object that can list and get VirtualServices.
+ VirtualServices(namespace string) VirtualServiceNamespaceLister
+ VirtualServiceListerExpansion
+}
+
+// virtualServiceLister implements the VirtualServiceLister interface.
+type virtualServiceLister struct {
+ indexer cache.Indexer
+}
+
+// NewVirtualServiceLister returns a new VirtualServiceLister.
+func NewVirtualServiceLister(indexer cache.Indexer) VirtualServiceLister {
+ return &virtualServiceLister{indexer: indexer}
+}
+
+// List lists all VirtualServices in the indexer.
+func (s *virtualServiceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha3.VirtualService))
+ })
+ return ret, err
+}
+
+// VirtualServices returns an object that can list and get VirtualServices.
+func (s *virtualServiceLister) VirtualServices(namespace string) VirtualServiceNamespaceLister {
+ return virtualServiceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// VirtualServiceNamespaceLister helps list and get VirtualServices.
+type VirtualServiceNamespaceLister interface {
+ // List lists all VirtualServices in the indexer for a given namespace.
+ List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error)
+ // Get retrieves the VirtualService from the indexer for a given namespace and name.
+ Get(name string) (*v1alpha3.VirtualService, error)
+ VirtualServiceNamespaceListerExpansion
+}
+
+// virtualServiceNamespaceLister implements the VirtualServiceNamespaceLister
+// interface.
+type virtualServiceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all VirtualServices in the indexer for a given namespace.
+func (s virtualServiceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha3.VirtualService))
+ })
+ return ret, err
+}
+
+// Get retrieves the VirtualService from the indexer for a given namespace and name.
+func (s virtualServiceNamespaceLister) Get(name string) (*v1alpha3.VirtualService, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha3.Resource("virtualservice"), name)
+ }
+ return obj.(*v1alpha3.VirtualService), nil
+}
diff --git a/vendor/github.com/knative/pkg/controller/controller.go b/vendor/github.com/knative/pkg/controller/controller.go
new file mode 100644
index 00000000000..5131f3e70d9
--- /dev/null
+++ b/vendor/github.com/knative/pkg/controller/controller.go
@@ -0,0 +1,379 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+
+ "github.com/knative/pkg/kmeta"
+ "github.com/knative/pkg/logging"
+ "github.com/knative/pkg/logging/logkey"
+)
+
+const (
+ falseString = "false"
+ trueString = "true"
+)
+
+var (
+ // DefaultThreadsPerController is the number of threads to use
+ // when processing the controller's workqueue. Controller binaries
+ // may adjust this process-wide default. For finer control, invoke
+ // Run on the controller directly.
+ DefaultThreadsPerController = 2
+)
+
+// Reconciler is the interface that controller implementations are expected
+// to implement, so that the shared controller.Impl can drive work through it.
+type Reconciler interface {
+ Reconcile(ctx context.Context, key string) error
+}
+
+// PassNew makes it simple to create an UpdateFunc for use with
+// cache.ResourceEventHandlerFuncs that can delegate the same methods
+// as AddFunc/DeleteFunc but passing through only the second argument
+// (which is the "new" object).
+func PassNew(f func(interface{})) func(interface{}, interface{}) {
+ return func(first, second interface{}) {
+ f(second)
+ }
+}
+
+// Filter makes it simple to create FilterFunc's for use with
+// cache.FilteringResourceEventHandler that filter based on the
+// schema.GroupVersionKind of the controlling resources.
+func Filter(gvk schema.GroupVersionKind) func(obj interface{}) bool {
+ return func(obj interface{}) bool {
+ if object, ok := obj.(metav1.Object); ok {
+ owner := metav1.GetControllerOf(object)
+ return owner != nil &&
+ owner.APIVersion == gvk.GroupVersion().String() &&
+ owner.Kind == gvk.Kind
+ }
+ return false
+ }
+}
+
+// Impl is our core controller implementation. It handles queuing and feeding work
+// from the queue to an implementation of Reconciler.
+type Impl struct {
+ // Reconciler is the workhorse of this controller, it is fed the keys
+ // from the workqueue to process. Public for testing.
+ Reconciler Reconciler
+
+ // WorkQueue is a rate limited work queue. This is used to queue work to be
+ // processed instead of performing it as soon as a change happens. This
+ // means we can ensure we only process a fixed amount of resources at a
+ // time, and makes it easy to ensure we are never processing the same item
+ // simultaneously in two different workers.
+ WorkQueue workqueue.RateLimitingInterface
+
+ // Sugared logger is easier to use but is not as performant as the
+ // raw logger. In performance critical paths, call logger.Desugar()
+ // and use the returned raw logger instead. In addition to the
+ // performance benefits, raw logger also preserves type-safety at
+ // the expense of slightly greater verbosity.
+ logger *zap.SugaredLogger
+
+ // StatsReporter is used to send common controller metrics.
+ statsReporter StatsReporter
+}
+
+// NewImpl instantiates an instance of our controller that will feed work to the
+// provided Reconciler as it is enqueued.
+func NewImpl(r Reconciler, logger *zap.SugaredLogger, workQueueName string, reporter StatsReporter) *Impl {
+ return &Impl{
+ Reconciler: r,
+ WorkQueue: workqueue.NewNamedRateLimitingQueue(
+ workqueue.DefaultControllerRateLimiter(),
+ workQueueName,
+ ),
+ logger: logger,
+ statsReporter: reporter,
+ }
+}
+
+// Enqueue takes a resource, converts it into a namespace/name string,
+// and passes it to EnqueueKey.
+func (c *Impl) Enqueue(obj interface{}) {
+ key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
+ if err != nil {
+ c.logger.Errorw("Enqueue", zap.Error(err))
+ return
+ }
+ c.EnqueueKey(key)
+}
+
+// EnqueueControllerOf takes a resource, identifies its controller resource,
+// converts it into a namespace/name string, and passes that to EnqueueKey.
+func (c *Impl) EnqueueControllerOf(obj interface{}) {
+ object, err := kmeta.DeletionHandlingAccessor(obj)
+ if err != nil {
+ c.logger.Error(err)
+ return
+ }
+
+ // If we can determine the controller ref of this object, then
+ // add that object to our workqueue.
+ if owner := metav1.GetControllerOf(object); owner != nil {
+ c.EnqueueKey(object.GetNamespace() + "/" + owner.Name)
+ }
+}
+
+// EnqueueLabelOfNamespaceScopedResource returns with an Enqueue func that
+// takes a resource, identifies its controller resource through given namespace
+// and name labels, converts it into a namespace/name string, and passes that
+// to EnqueueKey. The controller resource must be of namespace-scoped.
+func (c *Impl) EnqueueLabelOfNamespaceScopedResource(namespaceLabel, nameLabel string) func(obj interface{}) {
+ return func(obj interface{}) {
+ object, err := kmeta.DeletionHandlingAccessor(obj)
+ if err != nil {
+ c.logger.Error(err)
+ return
+ }
+
+ labels := object.GetLabels()
+ controllerKey, ok := labels[nameLabel]
+ if !ok {
+ c.logger.Debugf("Object %s/%s does not have a referring name label %s",
+ object.GetNamespace(), object.GetName(), nameLabel)
+ return
+ }
+
+ if namespaceLabel != "" {
+ controllerNamespace, ok := labels[namespaceLabel]
+ if !ok {
+ c.logger.Debugf("Object %s/%s does not have a referring namespace label %s",
+ object.GetNamespace(), object.GetName(), namespaceLabel)
+ return
+ }
+
+ c.EnqueueKey(fmt.Sprintf("%s/%s", controllerNamespace, controllerKey))
+ return
+ }
+
+ // Pass through namespace of the object itself if no namespace label specified.
+ // This is for the scenario that object and the parent resource are of same namespace,
+ // e.g. to enqueue the revision of an endpoint.
+ c.EnqueueKey(fmt.Sprintf("%s/%s", object.GetNamespace(), controllerKey))
+ }
+}
+
+// EnqueueLabelOfClusterScopedResource returns with an Enqueue func
+// that takes a resource, identifies its controller resource through
+// given name label, and passes it to EnqueueKey.
+// The controller resource must be of cluster-scoped.
+func (c *Impl) EnqueueLabelOfClusterScopedResource(nameLabel string) func(obj interface{}) {
+ return func(obj interface{}) {
+ object, err := kmeta.DeletionHandlingAccessor(obj)
+ if err != nil {
+ c.logger.Error(err)
+ return
+ }
+
+ labels := object.GetLabels()
+ controllerKey, ok := labels[nameLabel]
+ if !ok {
+ c.logger.Debugf("Object %s/%s does not have a referring name label %s",
+ object.GetNamespace(), object.GetName(), nameLabel)
+ return
+ }
+
+ c.EnqueueKey(controllerKey)
+ }
+}
+
+// EnqueueKey takes a namespace/name string and puts it onto the work queue.
+func (c *Impl) EnqueueKey(key string) {
+ c.WorkQueue.Add(key)
+}
+
+// Run starts the controller's worker threads, the number of which is threadiness.
+// It then blocks until stopCh is closed, at which point it shuts down its internal
+// work queue and waits for workers to finish processing their current work items.
+func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error {
+ defer runtime.HandleCrash()
+ sg := sync.WaitGroup{}
+ defer sg.Wait()
+ defer c.WorkQueue.ShutDown()
+
+ // Launch workers to process resources that get enqueued to our workqueue.
+ logger := c.logger
+ logger.Info("Starting controller and workers")
+ for i := 0; i < threadiness; i++ {
+ sg.Add(1)
+ go func() {
+ defer sg.Done()
+ for c.processNextWorkItem() {
+ }
+ }()
+ }
+
+ logger.Info("Started workers")
+ <-stopCh
+ logger.Info("Shutting down workers")
+
+ return nil
+}
+
+// processNextWorkItem will read a single work item off the workqueue and
+// attempt to process it, by calling Reconcile on our Reconciler.
+func (c *Impl) processNextWorkItem() bool {
+ obj, shutdown := c.WorkQueue.Get()
+ if shutdown {
+ return false
+ }
+ key := obj.(string)
+
+ startTime := time.Now()
+ // Send the metrics for the current queue depth
+ c.statsReporter.ReportQueueDepth(int64(c.WorkQueue.Len()))
+
+ // We call Done here so the workqueue knows we have finished
+ // processing this item. We also must remember to call Forget if
+ // reconcile succeeds. If a transient error occurs, we do not call
+ // Forget and put the item back to the queue with an increased
+ // delay.
+ defer c.WorkQueue.Done(key)
+
+ var err error
+ defer func() {
+ status := trueString
+ if err != nil {
+ status = falseString
+ }
+ c.statsReporter.ReportReconcile(time.Since(startTime), key, status)
+ }()
+
+ // Embed the key into the logger and attach that to the context we pass
+ // to the Reconciler.
+ logger := c.logger.With(zap.String(logkey.Key, key))
+ ctx := logging.WithLogger(context.TODO(), logger)
+
+ // Run Reconcile, passing it the namespace/name string of the
+ // resource to be synced.
+ if err = c.Reconciler.Reconcile(ctx, key); err != nil {
+ c.handleErr(err, key)
+ logger.Infof("Reconcile failed. Time taken: %v.", time.Since(startTime))
+ return true
+ }
+
+ // Finally, if no error occurs we Forget this item so it does not
+ // have any delay when another change happens.
+ c.WorkQueue.Forget(key)
+ logger.Infof("Reconcile succeeded. Time taken: %v.", time.Since(startTime))
+
+ return true
+}
+
+func (c *Impl) handleErr(err error, key string) {
+ c.logger.Errorw("Reconcile error", zap.Error(err))
+
+ // Re-queue the key if it's an transient error.
+ if !IsPermanentError(err) {
+ c.WorkQueue.AddRateLimited(key)
+ return
+ }
+
+ c.WorkQueue.Forget(key)
+}
+
+// GlobalResync enqueues all objects from the passed SharedInformer
+func (c *Impl) GlobalResync(si cache.SharedInformer) {
+ for _, key := range si.GetStore().ListKeys() {
+ c.EnqueueKey(key)
+ }
+}
+
+// NewPermanentError returns a new instance of permanentError.
+// Users can wrap an error as permanentError with this in reconcile,
+// when he does not expect the key to get re-queued.
+func NewPermanentError(err error) error {
+ return permanentError{e: err}
+}
+
+// permanentError is an error that is considered not transient.
+// We should not re-queue keys when it returns with thus error in reconcile.
+type permanentError struct {
+ e error
+}
+
+// IsPermanentError returns true if given error is permanentError
+func IsPermanentError(err error) bool {
+ switch err.(type) {
+ case permanentError:
+ return true
+ default:
+ return false
+ }
+}
+
+// Error implements the Error() interface of error.
+func (err permanentError) Error() string {
+ if err.e == nil {
+ return ""
+ }
+
+ return err.e.Error()
+}
+
+// Informer is the group of methods that a type must implement to be passed to
+// StartInformers.
+type Informer interface {
+ Run(<-chan struct{})
+ HasSynced() bool
+}
+
+// StartInformers kicks off all of the passed informers and then waits for all
+// of them to synchronize.
+func StartInformers(stopCh <-chan struct{}, informers ...Informer) error {
+ for _, informer := range informers {
+ informer := informer
+ go informer.Run(stopCh)
+ }
+
+ for i, informer := range informers {
+ if ok := cache.WaitForCacheSync(stopCh, informer.HasSynced); !ok {
+ return fmt.Errorf("Failed to wait for cache at index %d to sync", i)
+ }
+ }
+ return nil
+}
+
+// StartAll kicks off all of the passed controllers with DefaultThreadsPerController.
+func StartAll(stopCh <-chan struct{}, controllers ...*Impl) {
+ wg := sync.WaitGroup{}
+ // Start all of the controllers.
+ for _, ctrlr := range controllers {
+ wg.Add(1)
+ go func(c *Impl) {
+ defer wg.Done()
+ c.Run(DefaultThreadsPerController, stopCh)
+ }(ctrlr)
+ }
+ wg.Wait()
+}
diff --git a/vendor/github.com/knative/pkg/controller/helper.go b/vendor/github.com/knative/pkg/controller/helper.go
new file mode 100644
index 00000000000..887d715eaec
--- /dev/null
+++ b/vendor/github.com/knative/pkg/controller/helper.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/knative/pkg/kmeta"
+)
+
+type Callback func(interface{})
+
+func EnsureTypeMeta(f Callback, gvk schema.GroupVersionKind) Callback {
+ apiVersion, kind := gvk.ToAPIVersionAndKind()
+
+ return func(untyped interface{}) {
+ typed, err := kmeta.DeletionHandlingAccessor(untyped)
+ if err != nil {
+ // TODO: We should consider logging here.
+ return
+ }
+ // We need to populated TypeMeta, but cannot trample the
+ // informer's copy.
+ // TODO(mattmoor): Avoid the copy if TypeMeta is set.
+ copy := typed.DeepCopyObject()
+
+ accessor, err := meta.TypeAccessor(copy)
+ if err != nil {
+ return
+ }
+ accessor.SetAPIVersion(apiVersion)
+ accessor.SetKind(kind)
+
+ // Pass in the mutated copy (accessor is not just a type cast)
+ f(copy)
+ }
+}
+
+// SendGlobalUpdates triggers an update event for all objects from the
+// passed SharedInformer.
+//
+// Since this is triggered not by a real update of these objects
+// themselves, we have no way of knowing the change to these objects
+// if any, so we call handler.OnUpdate(obj, obj) for all of them
+// regardless if they have changes or not.
+func SendGlobalUpdates(si cache.SharedInformer, handler cache.ResourceEventHandler) {
+ store := si.GetStore()
+ for _, obj := range store.List() {
+ handler.OnUpdate(obj, obj)
+ }
+}
diff --git a/vendor/github.com/knative/pkg/controller/stats_reporter.go b/vendor/github.com/knative/pkg/controller/stats_reporter.go
new file mode 100644
index 00000000000..2b0cc823182
--- /dev/null
+++ b/vendor/github.com/knative/pkg/controller/stats_reporter.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controller
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/knative/pkg/metrics"
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+)
+
+var (
+ workQueueDepthStat = stats.Int64("work_queue_depth", "Depth of the work queue", stats.UnitNone)
+ reconcileCountStat = stats.Int64("reconcile_count", "Number of reconcile operations", stats.UnitNone)
+ reconcileLatencyStat = stats.Int64("reconcile_latency", "Latency of reconcile operations", stats.UnitMilliseconds)
+
+ // reconcileDistribution defines the bucket boundaries for the histogram of reconcile latency metric.
+ // Bucket boundaries are 10ms, 100ms, 1s, 10s, 30s and 60s.
+ reconcileDistribution = view.Distribution(10, 100, 1000, 10000, 30000, 60000)
+
+ // Create the tag keys that will be used to add tags to our measurements.
+ // Tag keys must conform to the restrictions described in
+ // go.opencensus.io/tag/validate.go. Currently those restrictions are:
+ // - length between 1 and 255 inclusive
+ // - characters are printable US-ASCII
+ reconcilerTagKey = mustNewTagKey("reconciler")
+ keyTagKey = mustNewTagKey("key")
+ successTagKey = mustNewTagKey("success")
+)
+
+func init() {
+ // Create views to see our measurements. This can return an error if
+ // a previously-registered view has the same name with a different value.
+ // View name defaults to the measure name if unspecified.
+ err := view.Register(
+ &view.View{
+ Description: "Depth of the work queue",
+ Measure: workQueueDepthStat,
+ Aggregation: view.LastValue(),
+ TagKeys: []tag.Key{reconcilerTagKey},
+ },
+ &view.View{
+ Description: "Number of reconcile operations",
+ Measure: reconcileCountStat,
+ Aggregation: view.Count(),
+ TagKeys: []tag.Key{reconcilerTagKey, keyTagKey, successTagKey},
+ },
+ &view.View{
+ Description: "Latency of reconcile operations",
+ Measure: reconcileLatencyStat,
+ Aggregation: reconcileDistribution,
+ TagKeys: []tag.Key{reconcilerTagKey, keyTagKey, successTagKey},
+ },
+ )
+ if err != nil {
+ panic(err)
+ }
+}
+
+// StatsReporter defines the interface for sending metrics
+type StatsReporter interface {
+ // ReportQueueDepth reports the queue depth metric
+ ReportQueueDepth(v int64) error
+
+ // ReportReconcile reports the count and latency metrics for a reconcile operation
+ ReportReconcile(duration time.Duration, key, success string) error
+}
+
+// Reporter holds cached metric objects to report metrics
+type reporter struct {
+ reconciler string
+ globalCtx context.Context
+}
+
+// NewStatsReporter creates a reporter that collects and reports metrics
+func NewStatsReporter(reconciler string) (StatsReporter, error) {
+ // Reconciler tag is static. Create a context containing that and cache it.
+ ctx, err := tag.New(
+ context.Background(),
+ tag.Insert(reconcilerTagKey, reconciler))
+ if err != nil {
+ return nil, err
+ }
+
+ return &reporter{reconciler: reconciler, globalCtx: ctx}, nil
+}
+
+// ReportQueueDepth reports the queue depth metric
+func (r *reporter) ReportQueueDepth(v int64) error {
+ if r.globalCtx == nil {
+ return errors.New("reporter is not initialized correctly")
+ }
+ metrics.Record(r.globalCtx, workQueueDepthStat.M(v))
+ return nil
+}
+
+// ReportReconcile reports the count and latency metrics for a reconcile operation
+func (r *reporter) ReportReconcile(duration time.Duration, key, success string) error {
+ ctx, err := tag.New(
+ context.Background(),
+ tag.Insert(reconcilerTagKey, r.reconciler),
+ tag.Insert(keyTagKey, key),
+ tag.Insert(successTagKey, success))
+ if err != nil {
+ return err
+ }
+
+ metrics.Record(ctx, reconcileCountStat.M(1))
+ metrics.Record(ctx, reconcileLatencyStat.M(int64(duration/time.Millisecond)))
+ return nil
+}
+
+func mustNewTagKey(s string) tag.Key {
+ tagKey, err := tag.NewKey(s)
+ if err != nil {
+ panic(err)
+ }
+ return tagKey
+}
diff --git a/vendor/github.com/knative/pkg/logging/testing/util.go b/vendor/github.com/knative/pkg/logging/testing/util.go
new file mode 100644
index 00000000000..05179648fb8
--- /dev/null
+++ b/vendor/github.com/knative/pkg/logging/testing/util.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2018 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+ "sync"
+ "testing"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zaptest"
+
+ "github.com/knative/pkg/logging"
+)
+
+var (
+ loggers = make(map[string]*zap.SugaredLogger)
+ m sync.Mutex
+)
+
+// TestLogger gets a logger to use in unit and end to end tests
+func TestLogger(t *testing.T) *zap.SugaredLogger {
+ m.Lock()
+ defer m.Unlock()
+
+ logger, ok := loggers[t.Name()]
+
+ if ok {
+ return logger
+ }
+
+ opts := zaptest.WrapOptions(
+ zap.AddCaller(),
+ zap.Development(),
+ )
+
+ logger = zaptest.NewLogger(t, opts).Sugar().Named(t.Name())
+ loggers[t.Name()] = logger
+
+ return logger
+}
+
+// ClearAll removes all the testing loggers.
+// `go test -count=X` executes runs in the same process, thus the map
+// persists between the runs, but the `t` will no longer be valid and will
+// cause a panic deep inside testing code.
+func ClearAll() {
+ loggers = make(map[string]*zap.SugaredLogger)
+}
+
+// TestContextWithLogger returns a context with a logger to be used in tests
+func TestContextWithLogger(t *testing.T) context.Context {
+ return logging.WithLogger(context.TODO(), TestLogger(t))
+}
diff --git a/vendor/github.com/knative/pkg/metrics/config.go b/vendor/github.com/knative/pkg/metrics/config.go
new file mode 100644
index 00000000000..e57aec821c6
--- /dev/null
+++ b/vendor/github.com/knative/pkg/metrics/config.go
@@ -0,0 +1,254 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+)
+
+// metricsBackend specifies the backend to use for metrics
+type metricsBackend string
+
+const (
+ // The following keys are used to configure metrics reporting.
+ // See https://github.com/knative/serving/blob/master/config/config-observability.yaml
+ // for details.
+ AllowStackdriverCustomMetricsKey = "metrics.allow-stackdriver-custom-metrics"
+ BackendDestinationKey = "metrics.backend-destination"
+ ReportingPeriodKey = "metrics.reporting-period-seconds"
+ StackdriverProjectIDKey = "metrics.stackdriver-project-id"
+
+ // Stackdriver is used for Stackdriver backend
+ Stackdriver metricsBackend = "stackdriver"
+ // Prometheus is used for Prometheus backend
+ Prometheus metricsBackend = "prometheus"
+
+ defaultBackendEnvName = "DEFAULT_METRICS_BACKEND"
+
+ defaultPrometheusPort = 9090
+ maxPrometheusPort = 65535
+ minPrometheusPort = 1024
+)
+
+// ExporterOptions contains options for configuring the exporter.
+type ExporterOptions struct {
+ // Domain is the metrics domain. e.g. "knative.dev". Must be present.
+ Domain string
+
+ // Component is the name of the component that emits the metrics. e.g.
+ // "activator", "queue_proxy". Should only contains alphabets and underscore.
+ // Must be present.
+ Component string
+
+ // PrometheusPort is the port to expose metrics if metrics backend is Prometheus.
+ // It should be between maxPrometheusPort and maxPrometheusPort. 0 value means
+ // using the default 9090 value. If is ignored if metrics backend is not
+ // Prometheus.
+ PrometheusPort int
+
+ // ConfigMap is the data from config map config-observability. Must be present.
+ // See https://github.com/knative/serving/blob/master/config/config-observability.yaml
+ // for details.
+ ConfigMap map[string]string
+}
+
+type metricsConfig struct {
+ // The metrics domain. e.g. "serving.knative.dev" or "build.knative.dev".
+ domain string
+ // The component that emits the metrics. e.g. "activator", "autoscaler".
+ component string
+ // The metrics backend destination.
+ backendDestination metricsBackend
+ // reportingPeriod specifies the interval between reporting aggregated views.
+ // If duration is less than or equal to zero, it enables the default behavior.
+ reportingPeriod time.Duration
+
+ // ---- Prometheus specific below ----
+ // prometheusPort is the port where metrics are exposed in Prometheus
+ // format. It defaults to 9090.
+ prometheusPort int
+
+ // ---- Stackdriver specific below ----
+ // stackdriverProjectID is the stackdriver project ID where the stats data are
+ // uploaded to. This is not the GCP project ID.
+ stackdriverProjectID string
+ // allowStackdriverCustomMetrics indicates whether it is allowed to send metrics to
+ // Stackdriver using "global" resource type and custom metric type if the
+ // metrics are not supported by "knative_revision" resource type. Setting this
+ // flag to "true" could cause extra Stackdriver charge.
+ // If backendDestination is not Stackdriver, this is ignored.
+ allowStackdriverCustomMetrics bool
+ // True if backendDestination equals to "stackdriver". Store this in a variable
+ // to reduce string comparison operations.
+ isStackdriverBackend bool
+ // stackdriverMetricTypePrefix is the metric domain joins component, e.g.
+ // "knative.dev/serving/activator". Store this in a variable to reduce string
+ // join operations.
+ stackdriverMetricTypePrefix string
+ // stackdriverCustomMetricTypePrefix is "custom.googleapis.com/knative.dev" joins
+ // component, e.g. "custom.googleapis.com/knative.dev/serving/activator".
+ // Store this in a variable to reduce string join operations.
+ stackdriverCustomMetricTypePrefix string
+}
+
+func getMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metricsConfig, error) {
+ var mc metricsConfig
+
+ if ops.Domain == "" {
+ return nil, errors.New("metrics domain cannot be empty")
+ }
+ mc.domain = ops.Domain
+
+ if ops.Component == "" {
+ return nil, errors.New("metrics component name cannot be empty")
+ }
+ mc.component = ops.Component
+
+ if ops.ConfigMap == nil {
+ return nil, errors.New("metrics config map cannot be empty")
+ }
+ m := ops.ConfigMap
+ // Read backend setting from environment variable first
+ backend := os.Getenv(defaultBackendEnvName)
+ if backend == "" {
+ // Use Prometheus if DEFAULT_METRICS_BACKEND does not exist or is empty
+ backend = string(Prometheus)
+ }
+ // Override backend if it is setting in config map.
+ if backendFromConfig, ok := m[BackendDestinationKey]; ok {
+ backend = backendFromConfig
+ }
+ lb := metricsBackend(strings.ToLower(backend))
+ switch lb {
+ case Stackdriver, Prometheus:
+ mc.backendDestination = lb
+ default:
+ return nil, fmt.Errorf("unsupported metrics backend value %q", backend)
+ }
+
+ if mc.backendDestination == Prometheus {
+ pp := ops.PrometheusPort
+ if pp == 0 {
+ pp = defaultPrometheusPort
+ }
+ if pp < minPrometheusPort || pp > maxPrometheusPort {
+ return nil, fmt.Errorf("invalid port %v, should between %v and %v", pp, minPrometheusPort, maxPrometheusPort)
+ }
+ mc.prometheusPort = pp
+ }
+
+ // If stackdriverProjectIDKey is not provided for stackdriver backend destination, OpenCensus will try to
+ // use the application default credentials. If that is not available, Opencensus would fail to create the
+ // metrics exporter.
+ if mc.backendDestination == Stackdriver {
+ mc.stackdriverProjectID = m[StackdriverProjectIDKey]
+ mc.isStackdriverBackend = true
+ mc.stackdriverMetricTypePrefix = path.Join(mc.domain, mc.component)
+ mc.stackdriverCustomMetricTypePrefix = path.Join(customMetricTypePrefix, mc.component)
+ if ascmStr, ok := m[AllowStackdriverCustomMetricsKey]; ok && ascmStr != "" {
+ ascmBool, err := strconv.ParseBool(ascmStr)
+ if err != nil {
+ return nil, fmt.Errorf("invalid %s value %q", AllowStackdriverCustomMetricsKey, ascmStr)
+ }
+ mc.allowStackdriverCustomMetrics = ascmBool
+ }
+ }
+
+ // If reporting period is specified, use the value from the configuration.
+ // If not, set a default value based on the selected backend.
+ // Each exporter makes different promises about what the lowest supported
+ // reporting period is. For Stackdriver, this value is 1 minute.
+ // For Prometheus, we will use a lower value since the exporter doesn't
+ // push anything but just responds to pull requests, and shorter durations
+ // do not really hurt the performance and we rely on the scraping configuration.
+ if repStr, ok := m[ReportingPeriodKey]; ok && repStr != "" {
+ repInt, err := strconv.Atoi(repStr)
+ if err != nil {
+ return nil, fmt.Errorf("invalid %s value %q", ReportingPeriodKey, repStr)
+ }
+ mc.reportingPeriod = time.Duration(repInt) * time.Second
+ } else if mc.backendDestination == Stackdriver {
+ mc.reportingPeriod = 60 * time.Second
+ } else if mc.backendDestination == Prometheus {
+ mc.reportingPeriod = 5 * time.Second
+ }
+
+ return &mc, nil
+}
+
+// UpdateExporterFromConfigMap returns a helper func that can be used to update the exporter
+// when a config map is updated.
+// DEPRECATED. Use UpdateExporter instead.
+func UpdateExporterFromConfigMap(domain string, component string, logger *zap.SugaredLogger) func(configMap *corev1.ConfigMap) {
+ return func(configMap *corev1.ConfigMap) {
+ UpdateExporter(ExporterOptions{
+ Domain: domain,
+ Component: component,
+ ConfigMap: configMap.Data,
+ }, logger)
+ }
+}
+
+// UpdateExporter updates the exporter based on the given ExporterOptions.
+func UpdateExporter(ops ExporterOptions, logger *zap.SugaredLogger) error {
+ newConfig, err := getMetricsConfig(ops, logger)
+ if err != nil {
+ if ce := getCurMetricsExporter(); ce == nil {
+ // Fail the process if there doesn't exist an exporter.
+ logger.Errorw("Failed to get a valid metrics config", zap.Error(err))
+ } else {
+ logger.Errorw("Failed to get a valid metrics config; Skip updating the metrics exporter", zap.Error(err))
+ }
+ return err
+ }
+
+ if isNewExporterRequired(newConfig) {
+ e, err := newMetricsExporter(newConfig, logger)
+ if err != nil {
+ logger.Errorf("Failed to update a new metrics exporter based on metric config %v. error: %v", newConfig, err)
+ return err
+ }
+ existingConfig := getCurMetricsConfig()
+ setCurMetricsExporter(e)
+ logger.Infof("Successfully updated the metrics exporter; old config: %v; new config %v", existingConfig, newConfig)
+ }
+
+ setCurMetricsConfig(newConfig)
+ return nil
+}
+
+// isNewExporterRequired compares the non-nil newConfig against curMetricsConfig. When backend changes,
+// or stackdriver project ID changes for stackdriver backend, we need to update the metrics exporter.
+func isNewExporterRequired(newConfig *metricsConfig) bool {
+ cc := getCurMetricsConfig()
+ if cc == nil || newConfig.backendDestination != cc.backendDestination {
+ return true
+ } else if newConfig.backendDestination == Stackdriver && newConfig.stackdriverProjectID != cc.stackdriverProjectID {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_interface.go b/vendor/github.com/knative/pkg/metrics/doc.go
similarity index 72%
rename from vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_interface.go
rename to vendor/github.com/knative/pkg/metrics/doc.go
index a14dba3b734..631bb5966cf 100644
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/build_template_interface.go
+++ b/vendor/github.com/knative/pkg/metrics/doc.go
@@ -1,12 +1,9 @@
/*
Copyright 2018 The Knative Authors
-
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
http://www.apache.org/licenses/LICENSE-2.0
-
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -14,10 +11,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
-
-// BuildTemplateInterface is implemented by BuildTemplate and ClusterBuildTemplate
-type BuildTemplateInterface interface {
- TemplateSpec() BuildTemplateSpec
- Copy() BuildTemplateInterface
-}
+// Package metrics provides Knative utilities for exporting metrics to Stackdriver
+// backend or Prometheus backend based on config-observability settings.
+package metrics
diff --git a/vendor/github.com/knative/pkg/metrics/exporter.go b/vendor/github.com/knative/pkg/metrics/exporter.go
new file mode 100644
index 00000000000..238f400f06c
--- /dev/null
+++ b/vendor/github.com/knative/pkg/metrics/exporter.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2018 The Knative Authors
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "fmt"
+ "sync"
+
+ "go.opencensus.io/stats/view"
+ "go.uber.org/zap"
+)
+
+var (
+ curMetricsExporter view.Exporter
+ curMetricsConfig *metricsConfig
+ metricsMux sync.Mutex
+)
+
+// newMetricsExporter gets a metrics exporter based on the config.
+func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) {
+ // If there is a Prometheus Exporter server running, stop it.
+ resetCurPromSrv()
+ ce := getCurMetricsExporter()
+ if ce != nil {
+ // UnregisterExporter is idempotent and it can be called multiple times for the same exporter
+ // without side effects.
+ view.UnregisterExporter(ce)
+ }
+ var err error
+ var e view.Exporter
+ switch config.backendDestination {
+ case Stackdriver:
+ e, err = newStackdriverExporter(config, logger)
+ case Prometheus:
+ e, err = newPrometheusExporter(config, logger)
+ default:
+ err = fmt.Errorf("Unsupported metrics backend %v", config.backendDestination)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return e, nil
+}
+
+func getCurMetricsExporter() view.Exporter {
+ metricsMux.Lock()
+ defer metricsMux.Unlock()
+ return curMetricsExporter
+}
+
+func setCurMetricsExporter(e view.Exporter) {
+ metricsMux.Lock()
+ defer metricsMux.Unlock()
+ view.RegisterExporter(e)
+ curMetricsExporter = e
+}
+
+func getCurMetricsConfig() *metricsConfig {
+ metricsMux.Lock()
+ defer metricsMux.Unlock()
+ return curMetricsConfig
+}
+
+func setCurMetricsConfig(c *metricsConfig) {
+ metricsMux.Lock()
+ defer metricsMux.Unlock()
+ if c != nil {
+ view.SetReportingPeriod(c.reportingPeriod)
+ } else {
+ // Setting to 0 enables the default behavior.
+ view.SetReportingPeriod(0)
+ }
+ curMetricsConfig = c
+}
diff --git a/vendor/github.com/knative/pkg/metrics/gcp_metadata.go b/vendor/github.com/knative/pkg/metrics/gcp_metadata.go
new file mode 100644
index 00000000000..ed64fb73362
--- /dev/null
+++ b/vendor/github.com/knative/pkg/metrics/gcp_metadata.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2019 The Knative Authors
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "cloud.google.com/go/compute/metadata"
+ "github.com/knative/pkg/metrics/metricskey"
+)
+
+func retrieveGCPMetadata() *gcpMetadata {
+ gm := gcpMetadata{
+ project: metricskey.ValueUnknown,
+ location: metricskey.ValueUnknown,
+ cluster: metricskey.ValueUnknown,
+ }
+ project, err := metadata.NumericProjectID()
+ if err == nil && project != "" {
+ gm.project = project
+ }
+ location, err := metadata.Zone()
+ if err == nil && location != "" {
+ gm.location = location
+ }
+ cluster, err := metadata.InstanceAttributeValue("cluster-name")
+ if err == nil && cluster != "" {
+ gm.cluster = cluster
+ }
+ return &gm
+}
diff --git a/vendor/github.com/knative/pkg/metrics/metricskey/constants.go b/vendor/github.com/knative/pkg/metrics/metricskey/constants.go
new file mode 100644
index 00000000000..f941f222ae5
--- /dev/null
+++ b/vendor/github.com/knative/pkg/metrics/metricskey/constants.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2018 The Knative Authors
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metricskey
+
+import "k8s.io/apimachinery/pkg/util/sets"
+
+const (
+ // ResourceTypeKnativeRevision is the Stackdriver resource type for Knative revision
+ ResourceTypeKnativeRevision = "knative_revision"
+
+ // LabelProject is the label for project (e.g. GCP GAIA ID, AWS project name)
+ LabelProject = "project_id"
+
+ // LabelLocation is the label for location (e.g. GCE zone, AWS region) where the service is deployed
+ LabelLocation = "location"
+
+ // LabelClusterName is the label for immutable name of the cluster
+ LabelClusterName = "cluster_name"
+
+ // LabelNamespaceName is the label for immutable name of the namespace that the service is deployed
+ LabelNamespaceName = "namespace_name"
+
+ // LabelServiceName is the label for the deployed service name
+ LabelServiceName = "service_name"
+
+ // LabelRouteName is the label for immutable name of the route that receives the request
+ LabelRouteName = "route_name"
+
+ // LabelConfigurationName is the label for the configuration which created the monitored revision
+ LabelConfigurationName = "configuration_name"
+
+ // LabelRevisionName is the label for the monitored revision
+ LabelRevisionName = "revision_name"
+
+ // ValueUnknown is the default value if the field is unknown, e.g. project will be unknown if Knative
+ // is not running on GKE.
+ ValueUnknown = "unknown"
+)
+
+var (
+ // KnativeRevisionLabels stores the set of resource labels for resource type knative_revision.
+ // LabelRouteName is added as extra label since it is optional, not in this map.
+ KnativeRevisionLabels = sets.NewString(
+ LabelProject,
+ LabelLocation,
+ LabelClusterName,
+ LabelNamespaceName,
+ LabelServiceName,
+ LabelConfigurationName,
+ LabelRevisionName,
+ )
+
+ // KnativeRevisionMetrics stores a set of metric types which are supported
+ // by resource type knative_revision.
+ KnativeRevisionMetrics = sets.NewString(
+ "knative.dev/serving/activator/request_count",
+ "knative.dev/serving/activator/request_latencies",
+ "knative.dev/serving/autoscaler/desired_pods",
+ "knative.dev/serving/autoscaler/requested_pods",
+ "knative.dev/serving/autoscaler/actual_pods",
+ "knative.dev/serving/autoscaler/stable_request_concurrency",
+ "knative.dev/serving/autoscaler/panic_request_concurrency",
+ "knative.dev/serving/autoscaler/target_concurrency_per_pod",
+ "knative.dev/serving/autoscaler/panic_mode",
+ "knative.dev/serving/revision/request_count",
+ "knative.dev/serving/revision/request_latencies",
+ )
+)
diff --git a/vendor/github.com/knative/pkg/metrics/monitored_resources.go b/vendor/github.com/knative/pkg/metrics/monitored_resources.go
new file mode 100644
index 00000000000..295fb778f3c
--- /dev/null
+++ b/vendor/github.com/knative/pkg/metrics/monitored_resources.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2018 The Knative Authors
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "github.com/knative/pkg/metrics/metricskey"
+)
+
+type gcpMetadata struct {
+ project string
+ location string
+ cluster string
+}
+
+type KnativeRevision struct {
+ Project string
+ Location string
+ ClusterName string
+ NamespaceName string
+ ServiceName string
+ ConfigurationName string
+ RevisionName string
+}
+
+func (kr *KnativeRevision) MonitoredResource() (resType string, labels map[string]string) {
+ labels = map[string]string{
+ metricskey.LabelProject: kr.Project,
+ metricskey.LabelLocation: kr.Location,
+ metricskey.LabelClusterName: kr.ClusterName,
+ metricskey.LabelNamespaceName: kr.NamespaceName,
+ metricskey.LabelServiceName: kr.ServiceName,
+ metricskey.LabelConfigurationName: kr.ConfigurationName,
+ metricskey.LabelRevisionName: kr.RevisionName,
+ }
+ return "knative_revision", labels
+}
+
+type Global struct{}
+
+func (g *Global) MonitoredResource() (resType string, labels map[string]string) {
+ return "global", nil
+}
diff --git a/vendor/github.com/knative/pkg/metrics/prometheus_exporter.go b/vendor/github.com/knative/pkg/metrics/prometheus_exporter.go
new file mode 100644
index 00000000000..c3c0d55d07b
--- /dev/null
+++ b/vendor/github.com/knative/pkg/metrics/prometheus_exporter.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2019 The Knative Authors
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ "go.opencensus.io/exporter/prometheus"
+ "go.opencensus.io/stats/view"
+ "go.uber.org/zap"
+)
+
+var (
+ curPromSrv *http.Server
+ curPromSrvMux sync.Mutex
+)
+
+func newPrometheusExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) {
+ e, err := prometheus.NewExporter(prometheus.Options{Namespace: config.component})
+ if err != nil {
+ logger.Errorw("Failed to create the Prometheus exporter.", zap.Error(err))
+ return nil, err
+ }
+ logger.Infof("Created Opencensus Prometheus exporter with config: %v. Start the server for Prometheus exporter.", config)
+ // Start the server for Prometheus scraping
+ go func() {
+ srv := startNewPromSrv(e, config.prometheusPort)
+ srv.ListenAndServe()
+ }()
+ return e, nil
+}
+
+func getCurPromSrv() *http.Server {
+ curPromSrvMux.Lock()
+ defer curPromSrvMux.Unlock()
+ return curPromSrv
+}
+
+func resetCurPromSrv() {
+ curPromSrvMux.Lock()
+ defer curPromSrvMux.Unlock()
+ if curPromSrv != nil {
+ curPromSrv.Close()
+ curPromSrv = nil
+ }
+}
+
+func startNewPromSrv(e *prometheus.Exporter, port int) *http.Server {
+ sm := http.NewServeMux()
+ sm.Handle("/metrics", e)
+ curPromSrvMux.Lock()
+ defer curPromSrvMux.Unlock()
+ if curPromSrv != nil {
+ curPromSrv.Close()
+ }
+ curPromSrv = &http.Server{
+ Addr: fmt.Sprintf(":%v", port),
+ Handler: sm,
+ }
+ return curPromSrv
+}
diff --git a/vendor/github.com/knative/pkg/metrics/record.go b/vendor/github.com/knative/pkg/metrics/record.go
new file mode 100644
index 00000000000..98a007cfea3
--- /dev/null
+++ b/vendor/github.com/knative/pkg/metrics/record.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "context"
+ "path"
+
+ "github.com/knative/pkg/metrics/metricskey"
+ "go.opencensus.io/stats"
+)
+
+// Record decides whether to record one measurement via OpenCensus based on the
+// following conditions:
+// 1) No package level metrics config. In this case it just proxies to OpenCensus
+// based on the assumption that users expect the metrics to be recorded when
+// they call this function. Users must ensure metrics config are set before
+// using this function to get expected behavior.
+// 2) The backend is not Stackdriver.
+// 3) The backend is Stackdriver and it is allowed to use custom metrics.
+// 4) The backend is Stackdriver and the metric is "knative_revison" built-in metric.
+func Record(ctx context.Context, ms stats.Measurement) {
+ mc := getCurMetricsConfig()
+
+ // Condition 1)
+ if mc == nil {
+ stats.Record(ctx, ms)
+ return
+ }
+
+ // Condition 2) and 3)
+ if !mc.isStackdriverBackend || mc.allowStackdriverCustomMetrics {
+ stats.Record(ctx, ms)
+ return
+ }
+
+ // Condition 4)
+ metricType := path.Join(mc.stackdriverMetricTypePrefix, ms.Measure().Name())
+ if metricskey.KnativeRevisionMetrics.Has(metricType) {
+ stats.Record(ctx, ms)
+ }
+}
diff --git a/vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go b/vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go
new file mode 100644
index 00000000000..60bf1d5f34b
--- /dev/null
+++ b/vendor/github.com/knative/pkg/metrics/stackdriver_exporter.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2019 The Knative Authors
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "path"
+
+ "contrib.go.opencensus.io/exporter/stackdriver"
+ "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource"
+ "github.com/knative/pkg/metrics/metricskey"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+ "go.uber.org/zap"
+)
+
+// customMetricTypePrefix is the metric type prefix for unsupported metrics by
+// resource type knative_revision.
+// See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor
+const customMetricTypePrefix = "custom.googleapis.com/knative.dev"
+
+var (
+ // gcpMetadataFunc is the function used to fetch GCP metadata.
+ // In product usage, this is always set to function retrieveGCPMetadata.
+ // In unit tests this is set to a fake one to avoid calling GCP metadata
+ // service.
+ gcpMetadataFunc func() *gcpMetadata
+
+ // newStackdriverExporterFunc is the function used to create new stackdriver
+ // exporter.
+ // In product usage, this is always set to function newOpencensusSDExporter.
+ // In unit tests this is set to a fake one to avoid calling actual Google API
+ // service.
+ newStackdriverExporterFunc func(stackdriver.Options) (view.Exporter, error)
+)
+
+func init() {
+ // Set gcpMetadataFunc to call GCP metadata service.
+ gcpMetadataFunc = retrieveGCPMetadata
+
+ newStackdriverExporterFunc = newOpencensusSDExporter
+}
+
+func newOpencensusSDExporter(o stackdriver.Options) (view.Exporter, error) {
+ return stackdriver.NewExporter(o)
+}
+
+func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) {
+ gm := gcpMetadataFunc()
+ mtf := getMetricTypeFunc(config.stackdriverMetricTypePrefix, config.stackdriverCustomMetricTypePrefix)
+ e, err := newStackdriverExporterFunc(stackdriver.Options{
+ ProjectID: config.stackdriverProjectID,
+ GetMetricDisplayName: mtf, // Use metric type for display name for custom metrics. No impact on built-in metrics.
+ GetMetricType: mtf,
+ GetMonitoredResource: getMonitoredResourceFunc(config.stackdriverMetricTypePrefix, gm),
+ DefaultMonitoringLabels: &stackdriver.Labels{},
+ })
+ if err != nil {
+ logger.Errorw("Failed to create the Stackdriver exporter: ", zap.Error(err))
+ return nil, err
+ }
+ logger.Infof("Created Opencensus Stackdriver exporter with config %v", config)
+ return e, nil
+}
+
+func getMonitoredResourceFunc(metricTypePrefix string, gm *gcpMetadata) func(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
+ return func(view *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
+ metricType := path.Join(metricTypePrefix, view.Measure.Name())
+ if metricskey.KnativeRevisionMetrics.Has(metricType) {
+ return getKnativeRevisionMonitoredResource(view, tags, gm)
+ }
+ // Unsupported metric by knative_revision, use "global" resource type.
+ return getGlobalMonitoredResource(view, tags)
+ }
+}
+
+func getKnativeRevisionMonitoredResource(
+ v *view.View, tags []tag.Tag, gm *gcpMetadata) ([]tag.Tag, monitoredresource.Interface) {
+ tagsMap := getTagsMap(tags)
+ kr := &KnativeRevision{
+ // The first three resource labels are from metadata.
+ Project: gm.project,
+ Location: gm.location,
+ ClusterName: gm.cluster,
+ // The rest resource labels are from metrics labels.
+ NamespaceName: valueOrUnknown(metricskey.LabelNamespaceName, tagsMap),
+ ServiceName: valueOrUnknown(metricskey.LabelServiceName, tagsMap),
+ ConfigurationName: valueOrUnknown(metricskey.LabelConfigurationName, tagsMap),
+ RevisionName: valueOrUnknown(metricskey.LabelRevisionName, tagsMap),
+ }
+
+ var newTags []tag.Tag
+ for _, t := range tags {
+ // Keep the metrics labels that are not resource labels
+ if !metricskey.KnativeRevisionLabels.Has(t.Key.Name()) {
+ newTags = append(newTags, t)
+ }
+ }
+
+ return newTags, kr
+}
+
+func getTagsMap(tags []tag.Tag) map[string]string {
+ tagsMap := map[string]string{}
+ for _, t := range tags {
+ tagsMap[t.Key.Name()] = t.Value
+ }
+ return tagsMap
+}
+
+func valueOrUnknown(key string, tagsMap map[string]string) string {
+ if value, ok := tagsMap[key]; ok {
+ return value
+ }
+ return metricskey.ValueUnknown
+}
+
+func getGlobalMonitoredResource(v *view.View, tags []tag.Tag) ([]tag.Tag, monitoredresource.Interface) {
+ return tags, &Global{}
+}
+
+func getMetricTypeFunc(metricTypePrefix, customMetricTypePrefix string) func(view *view.View) string {
+ return func(view *view.View) string {
+ metricType := path.Join(metricTypePrefix, view.Measure.Name())
+ if metricskey.KnativeRevisionMetrics.Has(metricType) {
+ return metricType
+ }
+ // Unsupported metric by knative_revision, use custom domain.
+ return path.Join(customMetricTypePrefix, view.Measure.Name())
+ }
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/actions.go b/vendor/github.com/knative/pkg/reconciler/testing/actions.go
new file mode 100644
index 00000000000..7dc967b929b
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/actions.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+
+ clientgotesting "k8s.io/client-go/testing"
+)
+
+// Actions stores list of Actions recorded by the reactors.
+type Actions struct {
+ Gets []clientgotesting.GetAction
+ Creates []clientgotesting.CreateAction
+ Updates []clientgotesting.UpdateAction
+ Deletes []clientgotesting.DeleteAction
+ DeleteCollections []clientgotesting.DeleteCollectionAction
+ Patches []clientgotesting.PatchAction
+}
+
+// ActionRecorder contains list of K8s request actions.
+type ActionRecorder interface {
+ Actions() []clientgotesting.Action
+}
+
+// ActionRecorderList is a list of ActionRecorder objects.
+type ActionRecorderList []ActionRecorder
+
+// ActionsByVerb fills in Actions objects, sorting the actions
+// by verb.
+func (l ActionRecorderList) ActionsByVerb() (Actions, error) {
+ var a Actions
+
+ for _, recorder := range l {
+ for _, action := range recorder.Actions() {
+ switch action.GetVerb() {
+ case "get":
+ a.Gets = append(a.Gets,
+ action.(clientgotesting.GetAction))
+ case "create":
+ a.Creates = append(a.Creates,
+ action.(clientgotesting.CreateAction))
+ case "update":
+ a.Updates = append(a.Updates,
+ action.(clientgotesting.UpdateAction))
+ case "delete":
+ a.Deletes = append(a.Deletes,
+ action.(clientgotesting.DeleteAction))
+ case "delete-collection":
+ a.DeleteCollections = append(a.DeleteCollections,
+ action.(clientgotesting.DeleteCollectionAction))
+ case "patch":
+ a.Patches = append(a.Patches,
+ action.(clientgotesting.PatchAction))
+ case "list", "watch": // avoid 'unexpected verb list/watch' error
+ default:
+ return a, fmt.Errorf("unexpected verb %v: %+v", action.GetVerb(), action)
+ }
+ }
+ }
+ return a, nil
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/clock.go b/vendor/github.com/knative/pkg/reconciler/testing/clock.go
new file mode 100644
index 00000000000..44ba77cdb60
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/clock.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2019 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "time"
+)
+
+type FakeClock struct {
+ Time time.Time
+}
+
+func (c FakeClock) Now() time.Time {
+ return c.Time
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/events.go b/vendor/github.com/knative/pkg/reconciler/testing/events.go
new file mode 100644
index 00000000000..498b04f7e6b
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/events.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+
+ "k8s.io/client-go/tools/record"
+)
+
+// EventList exports all events during reconciliation through fake event recorder
+// with event channel with buffer of given size.
+type EventList struct {
+ Recorder *record.FakeRecorder
+}
+
+// Events iterates over events received from channel in fake event recorder and returns all.
+func (l EventList) Events() []string {
+ close(l.Recorder.Events)
+ events := []string{}
+ for e := range l.Recorder.Events {
+ events = append(events, e)
+ }
+ return events
+}
+
+// Eventf formats as FakeRecorder does.
+func Eventf(eventType, reason, messageFmt string, args ...interface{}) string {
+ return fmt.Sprintf(eventType+" "+reason+" "+messageFmt, args...)
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/generate_name_reactor.go b/vendor/github.com/knative/pkg/reconciler/testing/generate_name_reactor.go
new file mode 100644
index 00000000000..52ac4495174
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/generate_name_reactor.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime"
+ clientgotesting "k8s.io/client-go/testing"
+)
+
+// GenerateNameReactor will simulate the k8s API server
+// and generate a name for resources who's metadata.generateName
+// property is set. This happens only for CreateAction types
+//
+// This generator is deterministic (unliked k8s) and uses a global
+// counter to help make test names predictable
+type GenerateNameReactor struct {
+ count int64
+}
+
+// Handles contains all the logic to generate the name and mutates
+// the create action object
+//
+// This is a hack as 'React' is passed a DeepCopy of the action hence
+// this is the only opportunity to 'mutate' the action in the
+// ReactionChain and have to continue executing additional reactors
+//
+// We should push changes upstream to client-go to help us with
+// mocking
+func (r *GenerateNameReactor) Handles(action clientgotesting.Action) bool {
+ create, ok := action.(clientgotesting.CreateAction)
+
+ if !ok {
+ return false
+ }
+
+ objMeta, err := meta.Accessor(create.GetObject())
+
+ if err != nil {
+ return false
+ }
+
+ if objMeta.GetName() != "" {
+ return false
+ }
+
+ if objMeta.GetGenerateName() == "" {
+ return false
+ }
+
+ val := atomic.AddInt64(&r.count, 1)
+
+ objMeta.SetName(fmt.Sprintf("%s%05d", objMeta.GetGenerateName(), val))
+
+ return false
+}
+
+// React is noop-function
+func (r *GenerateNameReactor) React(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
+ return false, nil, nil
+}
+
+var _ clientgotesting.Reactor = (*GenerateNameReactor)(nil)
+
+// PrependGenerateNameReactor will instrument a client-go testing Fake
+// with a reactor that simulates 'generateName' functionality
+func PrependGenerateNameReactor(f *clientgotesting.Fake) {
+ f.ReactionChain = append([]clientgotesting.Reactor{&GenerateNameReactor{}}, f.ReactionChain...)
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/hooks.go b/vendor/github.com/knative/pkg/reconciler/testing/hooks.go
new file mode 100644
index 00000000000..cde3d7d21e9
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/hooks.go
@@ -0,0 +1,183 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package testing includes utilities for testing controllers.
+package testing
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ kubetesting "k8s.io/client-go/testing"
+)
+
+// HookResult is the return value of hook functions.
+type HookResult bool
+
+const (
+ // HookComplete indicates the hook function completed, and WaitForHooks should
+ // not wait for it.
+ HookComplete HookResult = true
+ // HookIncomplete indicates the hook function is incomplete, and WaitForHooks
+ // should wait for it to complete.
+ HookIncomplete HookResult = false
+)
+
+/*
+CreateHookFunc is a function for handling a Create hook. Its runtime.Object
+parameter will be the Kubernetes resource created. The resource can be cast
+to its actual type like this:
+
+ pod := obj.(*v1.Pod)
+
+A return value of true marks the hook as completed. Returning false allows
+the hook to run again when the next resource of the requested type is
+created.
+*/
+type CreateHookFunc func(runtime.Object) HookResult
+
+/*
+UpdateHookFunc is a function for handling an update hook. its runtime.Object
+parameter will be the Kubernetes resource updated. The resource can be cast
+to its actual type like this:
+
+ pod := obj.(*v1.Pod)
+
+A return value of true marks the hook as completed. Returning false allows
+the hook to run again when the next resource of the requested type is
+updated.
+*/
+type UpdateHookFunc func(runtime.Object) HookResult
+
+/*
+DeleteHookFunc is a function for handling a delete hook. Its name parameter will
+be the name of the resource deleted. The resource itself is not available to
+the reactor.
+*/
+type DeleteHookFunc func(string) HookResult
+
+/*
+Hooks is a utility struct that simplifies controller testing with fake
+clients. A Hooks struct allows attaching hook functions to actions (create,
+update, delete) on a specified resource type within a fake client and ensuring
+that all hooks complete in a timely manner.
+*/
+type Hooks struct {
+ completionCh chan int32
+ completionIndex int32
+
+ // Denotes whether or not the registered hooks should no longer be called
+ // because they have already been waited upon.
+ // This uses a Mutex over a channel to guarantee that after WaitForHooks
+ // returns no hooked functions will be called.
+ closed bool
+ mutex sync.RWMutex
+}
+
+// NewHooks returns a Hooks struct that can be used to attach hooks to one or
+// more fake clients and wait for all hooks to complete.
+// TODO(grantr): Allow validating that a hook never fires
+func NewHooks() *Hooks {
+ return &Hooks{
+ completionCh: make(chan int32, 100),
+ completionIndex: -1,
+ }
+}
+
+// OnCreate attaches a create hook to the given Fake. The hook function is
+// executed every time a resource of the given type is created.
+func (h *Hooks) OnCreate(fake *kubetesting.Fake, resource string, rf CreateHookFunc) {
+ index := atomic.AddInt32(&h.completionIndex, 1)
+ fake.PrependReactor("create", resource, func(a kubetesting.Action) (bool, runtime.Object, error) {
+ obj := a.(kubetesting.CreateActionImpl).Object
+
+ h.mutex.RLock()
+ defer h.mutex.RUnlock()
+ if !h.closed && rf(obj) == HookComplete {
+ h.completionCh <- index
+ }
+ return false, nil, nil
+ })
+}
+
+// OnUpdate attaches an update hook to the given Fake. The hook function is
+// executed every time a resource of the given type is updated.
+func (h *Hooks) OnUpdate(fake *kubetesting.Fake, resource string, rf UpdateHookFunc) {
+ index := atomic.AddInt32(&h.completionIndex, 1)
+ fake.PrependReactor("update", resource, func(a kubetesting.Action) (bool, runtime.Object, error) {
+ obj := a.(kubetesting.UpdateActionImpl).Object
+
+ h.mutex.RLock()
+ defer h.mutex.RUnlock()
+ if !h.closed && rf(obj) == HookComplete {
+ h.completionCh <- index
+ }
+ return false, nil, nil
+ })
+}
+
+// OnDelete attaches a delete hook to the given Fake. The hook function is
+// executed every time a resource of the given type is deleted.
+func (h *Hooks) OnDelete(fake *kubetesting.Fake, resource string, rf DeleteHookFunc) {
+ index := atomic.AddInt32(&h.completionIndex, 1)
+ fake.PrependReactor("delete", resource, func(a kubetesting.Action) (bool, runtime.Object, error) {
+ name := a.(kubetesting.DeleteActionImpl).Name
+
+ h.mutex.RLock()
+ defer h.mutex.RUnlock()
+ if !h.closed && rf(name) == HookComplete {
+ h.completionCh <- index
+ }
+ return false, nil, nil
+ })
+}
+
+// WaitForHooks waits until all attached hooks have returned true at least once.
+// If the given timeout expires before that happens, an error is returned.
+// The registered actions will no longer be executed after WaitForHooks has
+// returned.
+func (h *Hooks) WaitForHooks(timeout time.Duration) error {
+ defer func() {
+ h.mutex.Lock()
+ defer h.mutex.Unlock()
+ h.closed = true
+ }()
+
+ ci := int(atomic.LoadInt32(&h.completionIndex))
+ if ci == -1 {
+ return nil
+ }
+
+ // Convert index to count.
+ ci++
+ timer := time.After(timeout)
+ hookCompletions := map[int32]HookResult{}
+ for {
+ select {
+ case i := <-h.completionCh:
+ hookCompletions[i] = HookComplete
+ if len(hookCompletions) == ci {
+ atomic.StoreInt32(&h.completionIndex, -1)
+ return nil
+ }
+ case <-timer:
+ return errors.New("timed out waiting for hooks to complete")
+ }
+ }
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/reactions.go b/vendor/github.com/knative/pkg/reconciler/testing/reactions.go
new file mode 100644
index 00000000000..9ce0e40440e
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/reactions.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2019 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ clientgotesting "k8s.io/client-go/testing"
+
+ "github.com/knative/pkg/apis"
+)
+
+// InduceFailure is used in conjunction with TableTest's WithReactors field.
+// Tests that want to induce a failure in a row of a TableTest would add:
+// WithReactors: []clientgotesting.ReactionFunc{
+// // Makes calls to create revisions return an error.
+// InduceFailure("create", "revisions"),
+// },
+func InduceFailure(verb, resource string) clientgotesting.ReactionFunc {
+ return func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
+ if !action.Matches(verb, resource) {
+ return false, nil, nil
+ }
+ return true, nil, fmt.Errorf("inducing failure for %s %s", action.GetVerb(), action.GetResource().Resource)
+ }
+}
+
+func ValidateCreates(ctx context.Context, action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
+ got := action.(clientgotesting.CreateAction).GetObject()
+ obj, ok := got.(apis.Validatable)
+ if !ok {
+ return false, nil, nil
+ }
+ if err := obj.Validate(ctx); err != nil {
+ return true, nil, err
+ }
+ return false, nil, nil
+}
+
+func ValidateUpdates(ctx context.Context, action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
+ got := action.(clientgotesting.UpdateAction).GetObject()
+ obj, ok := got.(apis.Validatable)
+ if !ok {
+ return false, nil, nil
+ }
+ if err := obj.Validate(ctx); err != nil {
+ return true, nil, err
+ }
+ return false, nil, nil
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/sorter.go b/vendor/github.com/knative/pkg/reconciler/testing/sorter.go
new file mode 100644
index 00000000000..27061b69d39
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/sorter.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2019 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package testing
+
+import (
+ "fmt"
+ "reflect"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ util_runtime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/client-go/tools/cache"
+)
+
+func NewObjectSorter(scheme *runtime.Scheme) ObjectSorter {
+ cache := make(map[reflect.Type]cache.Indexer)
+
+ for _, v := range scheme.AllKnownTypes() {
+ cache[v] = emptyIndexer()
+ }
+
+ ls := ObjectSorter{
+ cache: cache,
+ }
+
+ return ls
+}
+
+type ObjectSorter struct {
+ cache map[reflect.Type]cache.Indexer
+}
+
+func (o *ObjectSorter) AddObjects(objs ...runtime.Object) {
+ for _, obj := range objs {
+ t := reflect.TypeOf(obj).Elem()
+ indexer, ok := o.cache[t]
+ if !ok {
+ panic(fmt.Sprintf("Unrecognized type %T", obj))
+ }
+ indexer.Add(obj)
+ }
+}
+
+func (o *ObjectSorter) ObjectsForScheme(scheme *runtime.Scheme) []runtime.Object {
+ var objs []runtime.Object
+
+ for _, t := range scheme.AllKnownTypes() {
+ indexer := o.cache[t]
+ for _, item := range indexer.List() {
+ objs = append(objs, item.(runtime.Object))
+ }
+ }
+
+ return objs
+}
+
+func (o *ObjectSorter) ObjectsForSchemeFunc(funcs ...func(scheme *runtime.Scheme) error) []runtime.Object {
+ scheme := runtime.NewScheme()
+
+ for _, addToScheme := range funcs {
+ util_runtime.Must(addToScheme(scheme))
+ }
+
+ return o.ObjectsForScheme(scheme)
+}
+
+func (o *ObjectSorter) IndexerForObjectType(obj runtime.Object) cache.Indexer {
+ objType := reflect.TypeOf(obj).Elem()
+
+ indexer, ok := o.cache[objType]
+
+ if !ok {
+ panic(fmt.Sprintf("indexer for type %v doesn't exist", objType.Name()))
+ }
+
+ return indexer
+}
+
+func emptyIndexer() cache.Indexer {
+ return cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/stats.go b/vendor/github.com/knative/pkg/reconciler/testing/stats.go
new file mode 100644
index 00000000000..1d389a15d56
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/stats.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "fmt"
+ "time"
+)
+
+// FakeStatsReporter is a fake implementation of StatsReporter
+type FakeStatsReporter struct {
+ servicesReady map[string]int
+}
+
+func (r *FakeStatsReporter) ReportServiceReady(namespace, service string, d time.Duration) error {
+ key := fmt.Sprintf("%s/%s", namespace, service)
+ if r.servicesReady == nil {
+ r.servicesReady = make(map[string]int)
+ }
+ r.servicesReady[key]++
+ return nil
+}
+
+func (r *FakeStatsReporter) GetServiceReadyStats() map[string]int {
+ return r.servicesReady
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/table.go b/vendor/github.com/knative/pkg/reconciler/testing/table.go
new file mode 100644
index 00000000000..dcd24c66054
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/table.go
@@ -0,0 +1,367 @@
+/*
+Copyright 2019 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+ "path"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/knative/pkg/controller"
+ "github.com/knative/pkg/kmeta"
+ _ "github.com/knative/pkg/system/testing" // Setup system.Namespace()
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ clientgotesting "k8s.io/client-go/testing"
+ "k8s.io/client-go/tools/cache"
+)
+
+// TableRow holds a single row of our table test.
+type TableRow struct {
+ // Name is a descriptive name for this test suitable as a first argument to t.Run()
+ Name string
+
+ // Ctx is the context to pass to Reconcile. Defaults to context.Background()
+ Ctx context.Context
+
+ // Objects holds the state of the world at the onset of reconciliation.
+ Objects []runtime.Object
+
+ // Key is the parameter to reconciliation.
+ // This has the form "namespace/name".
+ Key string
+
+ // WantErr holds whether we should expect the reconciliation to result in an error.
+ WantErr bool
+
+ // WantCreates holds the ordered list of Create calls we expect during reconciliation.
+ WantCreates []metav1.Object
+
+ // WantUpdates holds the ordered list of Update calls we expect during reconciliation.
+ WantUpdates []clientgotesting.UpdateActionImpl
+
+ // WantStatusUpdates holds the ordered list of Update calls, with `status` subresource set,
+ // that we expect during reconciliation.
+ WantStatusUpdates []clientgotesting.UpdateActionImpl
+
+ // WantDeletes holds the ordered list of Delete calls we expect during reconciliation.
+ WantDeletes []clientgotesting.DeleteActionImpl
+
+ // WantDeleteCollections holds the ordered list of DeleteCollection calls we expect during reconciliation.
+ WantDeleteCollections []clientgotesting.DeleteCollectionActionImpl
+
+ // WantPatches holds the ordered list of Patch calls we expect during reconciliation.
+ WantPatches []clientgotesting.PatchActionImpl
+
+ // WantEvents holds the ordered list of events we expect during reconciliation.
+ WantEvents []string
+
+ // WantServiceReadyStats holds the ServiceReady stats we exepect during reconciliation.
+ WantServiceReadyStats map[string]int
+
+ // WithReactors is a set of functions that are installed as Reactors for the execution
+ // of this row of the table-driven-test.
+ WithReactors []clientgotesting.ReactionFunc
+
+ // For cluster-scoped resources like ClusterIngress, it does not have to be
+ // in the same namespace with its child resources.
+ SkipNamespaceValidation bool
+}
+
+func objKey(o runtime.Object) string {
+ on := o.(kmeta.Accessor)
+ // namespace + name is not unique, and the tests don't populate k8s kind
+ // information, so use GoLang's type name as part of the key.
+ return path.Join(reflect.TypeOf(o).String(), on.GetNamespace(), on.GetName())
+}
+
+// Factory returns a Reconciler.Interface to perform reconciliation in table test,
+// ActionRecorderList/EventList to capture k8s actions/events produced during reconciliation
+// and FakeStatsReporter to capture stats.
+type Factory func(*testing.T, *TableRow) (controller.Reconciler, ActionRecorderList, EventList, *FakeStatsReporter)
+
+// Test executes the single table test.
+func (r *TableRow) Test(t *testing.T, factory Factory) {
+ t.Helper()
+ c, recorderList, eventList, statsReporter := factory(t, r)
+
+ // Set context to not be nil.
+ ctx := r.Ctx
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ // Run the Reconcile we're testing.
+ if err := c.Reconcile(ctx, r.Key); (err != nil) != r.WantErr {
+ t.Errorf("Reconcile() error = %v, WantErr %v", err, r.WantErr)
+ }
+
+ expectedNamespace, _, _ := cache.SplitMetaNamespaceKey(r.Key)
+
+ actions, err := recorderList.ActionsByVerb()
+ if err != nil {
+ t.Errorf("Error capturing actions by verb: %q", err)
+ }
+
+ // Previous state is used to diff resource expected state for update requests that were missed.
+ objPrevState := map[string]runtime.Object{}
+ for _, o := range r.Objects {
+ objPrevState[objKey(o)] = o
+ }
+
+ for i, want := range r.WantCreates {
+ if i >= len(actions.Creates) {
+ t.Errorf("Missing create: %#v", want)
+ continue
+ }
+ got := actions.Creates[i]
+ obj := got.GetObject()
+ objPrevState[objKey(obj)] = obj
+
+ if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace {
+ t.Errorf("Unexpected action[%d]: %#v", i, got)
+ }
+
+ if diff := cmp.Diff(want, obj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" {
+ t.Errorf("Unexpected create (-want, +got): %s", diff)
+ }
+ }
+ if got, want := len(actions.Creates), len(r.WantCreates); got > want {
+ for _, extra := range actions.Creates[want:] {
+ t.Errorf("Extra create: %#v", extra)
+ }
+ }
+
+ updates := filterUpdatesWithSubresource("", actions.Updates)
+ for i, want := range r.WantUpdates {
+ if i >= len(updates) {
+ wo := want.GetObject()
+ key := objKey(wo)
+ oldObj, ok := objPrevState[key]
+ if !ok {
+ t.Errorf("Object %s was never created: want: %#v", key, wo)
+ continue
+ }
+ t.Errorf("Missing update for %s (-want, +prevState): %s", key,
+ cmp.Diff(wo, oldObj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()))
+ continue
+ }
+
+ if want.GetSubresource() != "" {
+ t.Errorf("Expectation was invalid - it should not include a subresource: %#v", want)
+ }
+
+ got := updates[i].GetObject()
+
+ // Update the object state.
+ objPrevState[objKey(got)] = got
+
+ if diff := cmp.Diff(want.GetObject(), got, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" {
+ t.Errorf("Unexpected update (-want, +got): %s", diff)
+ }
+ }
+ if got, want := len(updates), len(r.WantUpdates); got > want {
+ for _, extra := range updates[want:] {
+ t.Errorf("Extra update: %#v", extra)
+ }
+ }
+
+ // TODO(#2843): refactor.
+ statusUpdates := filterUpdatesWithSubresource("status", actions.Updates)
+ for i, want := range r.WantStatusUpdates {
+ if i >= len(statusUpdates) {
+ wo := want.GetObject()
+ key := objKey(wo)
+ oldObj, ok := objPrevState[key]
+ if !ok {
+ t.Errorf("Object %s was never created: want: %#v", key, wo)
+ continue
+ }
+ t.Errorf("Missing status update for %s (-want, +prevState): %s", key,
+ cmp.Diff(wo, oldObj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()))
+ continue
+ }
+
+ got := statusUpdates[i].GetObject()
+
+ // Update the object state.
+ objPrevState[objKey(got)] = got
+
+ if diff := cmp.Diff(want.GetObject(), got, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" {
+ t.Errorf("Unexpected status update (-want, +got): %s\nFull: %v", diff, got)
+ }
+ }
+ if got, want := len(statusUpdates), len(r.WantStatusUpdates); got > want {
+ for _, extra := range statusUpdates[want:] {
+ wo := extra.GetObject()
+ key := objKey(wo)
+ oldObj, ok := objPrevState[key]
+ if !ok {
+ t.Errorf("Object %s was never created: want: %#v", key, wo)
+ continue
+ }
+ t.Errorf("Extra status update for %s (-extra, +prevState): %s", key,
+ cmp.Diff(wo, oldObj, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty()))
+ }
+ }
+
+ if len(statusUpdates)+len(updates) != len(actions.Updates) {
+ var unexpected []clientgotesting.UpdateAction
+
+ for _, update := range actions.Updates {
+ if update.GetSubresource() != "status" && update.GetSubresource() != "" {
+ unexpected = append(unexpected, update)
+ }
+ }
+
+ t.Errorf("Unexpected subresource updates occurred %#v", unexpected)
+ }
+
+ for i, want := range r.WantDeletes {
+ if i >= len(actions.Deletes) {
+ t.Errorf("Missing delete: %#v", want)
+ continue
+ }
+ got := actions.Deletes[i]
+ if got.GetName() != want.GetName() {
+ t.Errorf("Unexpected delete[%d]: %#v", i, got)
+ }
+ if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace {
+ t.Errorf("Unexpected delete[%d]: %#v", i, got)
+ }
+ }
+ if got, want := len(actions.Deletes), len(r.WantDeletes); got > want {
+ for _, extra := range actions.Deletes[want:] {
+ t.Errorf("Extra delete: %#v", extra)
+ }
+ }
+
+ for i, want := range r.WantDeleteCollections {
+ if i >= len(actions.DeleteCollections) {
+ t.Errorf("Missing delete-collection: %#v", want)
+ continue
+ }
+ got := actions.DeleteCollections[i]
+ if got, want := got.GetListRestrictions().Labels, want.GetListRestrictions().Labels; (got != nil) != (want != nil) || got.String() != want.String() {
+ t.Errorf("Unexpected delete-collection[%d].Labels = %v, wanted %v", i, got, want)
+ }
+ // TODO(mattmoor): Add this if/when we need support.
+ if got := got.GetListRestrictions().Fields; got.String() != "" {
+ t.Errorf("Unexpected delete-collection[%d].Fields = %v, wanted ''", i, got)
+ }
+ if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace {
+ t.Errorf("Unexpected delete-collection[%d]: %#v, wanted %s", i, got, expectedNamespace)
+ }
+ }
+ if got, want := len(actions.DeleteCollections), len(r.WantDeleteCollections); got > want {
+ for _, extra := range actions.DeleteCollections[want:] {
+ t.Errorf("Extra delete-collection: %#v", extra)
+ }
+ }
+
+ for i, want := range r.WantPatches {
+ if i >= len(actions.Patches) {
+ t.Errorf("Missing patch: %#v; raw: %s", want, string(want.GetPatch()))
+ continue
+ }
+
+ got := actions.Patches[i]
+ if got.GetName() != want.GetName() {
+ t.Errorf("Unexpected patch[%d]: %#v", i, got)
+ }
+ if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace {
+ t.Errorf("Unexpected patch[%d]: %#v", i, got)
+ }
+ if diff := cmp.Diff(string(want.GetPatch()), string(got.GetPatch())); diff != "" {
+ t.Errorf("Unexpected patch(-want, +got): %s", diff)
+ }
+ }
+ if got, want := len(actions.Patches), len(r.WantPatches); got > want {
+ for _, extra := range actions.Patches[want:] {
+ t.Errorf("Extra patch: %#v; raw: %s", extra, string(extra.GetPatch()))
+ }
+ }
+
+ gotEvents := eventList.Events()
+ for i, want := range r.WantEvents {
+ if i >= len(gotEvents) {
+ t.Errorf("Missing event: %s", want)
+ continue
+ }
+
+ if diff := cmp.Diff(want, gotEvents[i]); diff != "" {
+ t.Errorf("unexpected event(-want, +got): %s", diff)
+ }
+ }
+ if got, want := len(gotEvents), len(r.WantEvents); got > want {
+ for _, extra := range gotEvents[want:] {
+ t.Errorf("Extra event: %s", extra)
+ }
+ }
+
+ gotStats := statsReporter.GetServiceReadyStats()
+ if diff := cmp.Diff(r.WantServiceReadyStats, gotStats); diff != "" {
+ t.Errorf("Unexpected service ready stats (-want, +got): %s", diff)
+ }
+}
+
+func filterUpdatesWithSubresource(
+ subresource string,
+ actions []clientgotesting.UpdateAction) (result []clientgotesting.UpdateAction) {
+ for _, action := range actions {
+ if action.GetSubresource() == subresource {
+ result = append(result, action)
+ }
+ }
+ return
+}
+
+// TableTest represents a list of TableRow tests instances.
+type TableTest []TableRow
+
+// Test executes the whole suite of the table tests.
+func (tt TableTest) Test(t *testing.T, factory Factory) {
+ t.Helper()
+ for _, test := range tt {
+ // Record the original objects in table.
+ originObjects := []runtime.Object{}
+ for _, obj := range test.Objects {
+ originObjects = append(originObjects, obj.DeepCopyObject())
+ }
+ t.Run(test.Name, func(t *testing.T) {
+ t.Helper()
+ test.Test(t, factory)
+ })
+ // Validate cached objects do not get soiled after controller loops
+ if diff := cmp.Diff(originObjects, test.Objects, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" {
+ t.Errorf("Unexpected objects in test %s (-want, +got): %v", test.Name, diff)
+ }
+ }
+}
+
+var (
+ ignoreLastTransitionTime = cmp.FilterPath(func(p cmp.Path) bool {
+ return strings.HasSuffix(p.String(), "LastTransitionTime.Inner.Time")
+ }, cmp.Ignore())
+
+ safeDeployDiff = cmpopts.IgnoreUnexported(resource.Quantity{})
+)
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/tracker.go b/vendor/github.com/knative/pkg/reconciler/testing/tracker.go
new file mode 100644
index 00000000000..893ba86aae8
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/tracker.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ corev1 "k8s.io/api/core/v1"
+
+ "github.com/knative/pkg/tracker"
+)
+
+// NullTracker implements Tracker.
+type NullTracker struct{}
+
+var _ tracker.Interface = (*NullTracker)(nil)
+
+// OnChanged implements OnChanged.
+func (*NullTracker) OnChanged(interface{}) {}
+
+// Track implements Track.
+func (*NullTracker) Track(corev1.ObjectReference, interface{}) error { return nil }
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/util.go b/vendor/github.com/knative/pkg/reconciler/testing/util.go
new file mode 100644
index 00000000000..14689693fd0
--- /dev/null
+++ b/vendor/github.com/knative/pkg/reconciler/testing/util.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package testing includes utilities for testing controllers.
+package testing
+
+import (
+ "regexp"
+ "testing"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/tools/cache"
+)
+
+// KeyOrDie returns the string key of the Kubernetes object or panics if a key
+// cannot be generated.
+func KeyOrDie(obj interface{}) string {
+ key, err := cache.MetaNamespaceKeyFunc(obj)
+ if err != nil {
+ panic(err)
+ }
+ return key
+}
+
+// ExpectNormalEventDelivery returns a hook function that can be passed to a
+// Hooks.OnCreate() call to verify that an event of type Normal was created
+// matching the given regular expression. For this expectation to be effective
+// the test must also call Hooks.WaitForHooks().
+func ExpectNormalEventDelivery(t *testing.T, messageRegexp string) CreateHookFunc {
+ t.Helper()
+ wantRegexp, err := regexp.Compile(messageRegexp)
+ if err != nil {
+ t.Fatalf("Invalid regular expression: %v", err)
+ }
+ return func(obj runtime.Object) HookResult {
+ t.Helper()
+ event := obj.(*corev1.Event)
+ if !wantRegexp.MatchString(event.Message) {
+ return HookIncomplete
+ }
+ t.Logf("Got an event message matching %q: %q", wantRegexp, event.Message)
+ if got, want := event.Type, corev1.EventTypeNormal; got != want {
+ t.Errorf("unexpected event Type: %q expected: %q", got, want)
+ }
+ return HookComplete
+ }
+}
+
+// ExpectWarningEventDelivery returns a hook function that can be passed to a
+// Hooks.OnCreate() call to verify that an event of type Warning was created
+// matching the given regular expression. For this expectation to be effective
+// the test must also call Hooks.WaitForHooks().
+func ExpectWarningEventDelivery(t *testing.T, messageRegexp string) CreateHookFunc {
+ t.Helper()
+ wantRegexp, err := regexp.Compile(messageRegexp)
+ if err != nil {
+ t.Fatalf("Invalid regular expression: %v", err)
+ }
+ return func(obj runtime.Object) HookResult {
+ t.Helper()
+ event := obj.(*corev1.Event)
+ if !wantRegexp.MatchString(event.Message) {
+ return HookIncomplete
+ }
+ t.Logf("Got an event message matching %q: %q", wantRegexp, event.Message)
+ if got, want := event.Type, corev1.EventTypeWarning; got != want {
+ t.Errorf("unexpected event Type: %q expected: %q", got, want)
+ }
+ return HookComplete
+ }
+}
diff --git a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_validation.go b/vendor/github.com/knative/pkg/tracker/doc.go
similarity index 65%
rename from vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_validation.go
rename to vendor/github.com/knative/pkg/tracker/doc.go
index fc2287a3ebf..a54e6affe6e 100644
--- a/vendor/github.com/knative/build/pkg/apis/build/v1alpha1/cluster_build_template_validation.go
+++ b/vendor/github.com/knative/pkg/tracker/doc.go
@@ -14,15 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
-
-import (
- "context"
-
- "github.com/knative/pkg/apis"
-)
-
-// Validate ClusterBuildTemplate
-func (b *ClusterBuildTemplate) Validate(ctx context.Context) *apis.FieldError {
- return validateObjectMetadata(b.GetObjectMeta()).ViaField("metadata").Also(b.Spec.Validate(ctx).ViaField("spec"))
-}
+// Package tracker defines a utility to enable Reconcilers to trigger
+// reconciliations when objects that are cross-referenced change, so
+// that the level-based reconciliation can react to the change. The
+// prototypical cross-reference in Kubernetes is corev1.ObjectReference.
+package tracker
diff --git a/vendor/github.com/knative/pkg/tracker/enqueue.go b/vendor/github.com/knative/pkg/tracker/enqueue.go
new file mode 100644
index 00000000000..ed225c0c4fc
--- /dev/null
+++ b/vendor/github.com/knative/pkg/tracker/enqueue.go
@@ -0,0 +1,169 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tracker
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/validation"
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/knative/pkg/kmeta"
+)
+
+// New returns an implementation of Interface that lets a Reconciler
+// register a particular resource as watching an ObjectReference for
+// a particular lease duration. This watch must be refreshed
+// periodically (e.g. by a controller resync) or it will expire.
+//
+// When OnChanged is called by the informer for a particular
+// GroupVersionKind, the provided callback is called with the "key"
+// of each object actively watching the changed object.
+func New(callback func(string), lease time.Duration) Interface {
+ return &impl{
+ leaseDuration: lease,
+ cb: callback,
+ }
+}
+
+type impl struct {
+ m sync.Mutex
+ // mapping maps from an object reference to the set of
+ // keys for objects watching it.
+ mapping map[corev1.ObjectReference]set
+
+ // The amount of time that an object may watch another
+ // before having to renew the lease.
+ leaseDuration time.Duration
+
+ cb func(string)
+}
+
+// Check that impl implements Interface.
+var _ Interface = (*impl)(nil)
+
+// set is a map from keys to expirations
+type set map[string]time.Time
+
+// Track implements Interface.
+func (i *impl) Track(ref corev1.ObjectReference, obj interface{}) error {
+ invalidFields := map[string][]string{
+ "APIVersion": validation.IsQualifiedName(ref.APIVersion),
+ "Kind": validation.IsCIdentifier(ref.Kind),
+ "Namespace": validation.IsDNS1123Label(ref.Namespace),
+ "Name": validation.IsDNS1123Subdomain(ref.Name),
+ }
+ fieldErrors := []string{}
+ for k, v := range invalidFields {
+ for _, msg := range v {
+ fieldErrors = append(fieldErrors, fmt.Sprintf("%s: %s", k, msg))
+ }
+ }
+ if len(fieldErrors) > 0 {
+ sort.Strings(fieldErrors)
+ return fmt.Errorf("Invalid ObjectReference:\n%s", strings.Join(fieldErrors, "\n"))
+ }
+
+ key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
+ if err != nil {
+ return err
+ }
+
+ i.m.Lock()
+ defer i.m.Unlock()
+ if i.mapping == nil {
+ i.mapping = make(map[corev1.ObjectReference]set)
+ }
+
+ l, ok := i.mapping[ref]
+ if !ok {
+ l = set{}
+ }
+ if expiry, ok := l[key]; !ok || isExpired(expiry) {
+ // When covering an uncovered key, immediately call the
+ // registered callback to ensure that the following pattern
+ // doesn't create problems:
+ // foo, err := lister.Get(key)
+ // // Later...
+ // err := tracker.Track(fooRef, parent)
+ // In this example, "Later" represents a window where "foo" may
+ // have changed or been created while the Track is not active.
+ // The simplest way of eliminating such a window is to call the
+ // callback to "catch up" immediately following new
+ // registrations.
+ i.cb(key)
+ }
+ // Overwrite the key with a new expiration.
+ l[key] = time.Now().Add(i.leaseDuration)
+
+ i.mapping[ref] = l
+ return nil
+}
+
+func objectReference(item kmeta.Accessor) corev1.ObjectReference {
+ gvk := item.GroupVersionKind()
+ apiVersion, kind := gvk.ToAPIVersionAndKind()
+ return corev1.ObjectReference{
+ APIVersion: apiVersion,
+ Kind: kind,
+ Namespace: item.GetNamespace(),
+ Name: item.GetName(),
+ }
+}
+
+func isExpired(expiry time.Time) bool {
+ return time.Now().After(expiry)
+}
+
+// OnChanged implements Interface.
+func (i *impl) OnChanged(obj interface{}) {
+ item, err := kmeta.DeletionHandlingAccessor(obj)
+ if err != nil {
+ // TODO(mattmoor): We should consider logging here.
+ return
+ }
+
+ or := objectReference(item)
+
+ // TODO(mattmoor): Consider locking the mapping (global) for a
+ // smaller scope and leveraging a per-set lock to guard its access.
+ i.m.Lock()
+ defer i.m.Unlock()
+ s, ok := i.mapping[or]
+ if !ok {
+ // TODO(mattmoor): We should consider logging here.
+ return
+ }
+
+ for key, expiry := range s {
+ // If the expiration has lapsed, then delete the key.
+ if isExpired(expiry) {
+ delete(s, key)
+ continue
+ }
+ i.cb(key)
+ }
+
+ if len(s) == 0 {
+ delete(i.mapping, or)
+ }
+}
diff --git a/vendor/github.com/knative/pkg/tracker/interface.go b/vendor/github.com/knative/pkg/tracker/interface.go
new file mode 100644
index 00000000000..6481a839d1b
--- /dev/null
+++ b/vendor/github.com/knative/pkg/tracker/interface.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tracker
+
+import (
+ corev1 "k8s.io/api/core/v1"
+)
+
+// Interface defines the interface through which an object can register
+// that it is tracking another object by reference.
+type Interface interface {
+ // Track tells us that "obj" is tracking changes to the
+ // referenced object.
+ Track(ref corev1.ObjectReference, obj interface{}) error
+
+ // OnChanged is a callback to register with the InformerFactory
+ // so that we are notified for appropriate object changes.
+ OnChanged(obj interface{})
+}
diff --git a/vendor/go.uber.org/zap/internal/ztest/doc.go b/vendor/go.uber.org/zap/internal/ztest/doc.go
new file mode 100644
index 00000000000..cd4b98cbcb6
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/ztest/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package ztest provides low-level helpers for testing log output. These
+// utilities are helpful in zap's own unit tests, but any assertions using
+// them are strongly coupled to a single encoding.
+package ztest // import "go.uber.org/zap/internal/ztest"
diff --git a/vendor/go.uber.org/zap/internal/ztest/timeout.go b/vendor/go.uber.org/zap/internal/ztest/timeout.go
new file mode 100644
index 00000000000..f7d58f31657
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/ztest/timeout.go
@@ -0,0 +1,59 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ztest
+
+import (
+ "log"
+ "os"
+ "strconv"
+ "time"
+)
+
+var _timeoutScale = 1.0
+
+// Timeout scales the provided duration by $TEST_TIMEOUT_SCALE.
+func Timeout(base time.Duration) time.Duration {
+ return time.Duration(float64(base) * _timeoutScale)
+}
+
+// Sleep scales the sleep duration by $TEST_TIMEOUT_SCALE.
+func Sleep(base time.Duration) {
+ time.Sleep(Timeout(base))
+}
+
+// Initialize checks the environment and alters the timeout scale accordingly.
+// It returns a function to undo the scaling.
+func Initialize(factor string) func() {
+ original := _timeoutScale
+ fv, err := strconv.ParseFloat(factor, 64)
+ if err != nil {
+ panic(err)
+ }
+ _timeoutScale = fv
+ return func() { _timeoutScale = original }
+}
+
+func init() {
+ if v := os.Getenv("TEST_TIMEOUT_SCALE"); v != "" {
+ Initialize(v)
+ log.Printf("Scaling timeouts by %vx.\n", _timeoutScale)
+ }
+}
diff --git a/vendor/go.uber.org/zap/internal/ztest/writer.go b/vendor/go.uber.org/zap/internal/ztest/writer.go
new file mode 100644
index 00000000000..9fdd5805e2c
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/ztest/writer.go
@@ -0,0 +1,96 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ztest
+
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+ "strings"
+)
+
+// A Syncer is a spy for the Sync portion of zapcore.WriteSyncer.
+type Syncer struct {
+ err error
+ called bool
+}
+
+// SetError sets the error that the Sync method will return.
+func (s *Syncer) SetError(err error) {
+ s.err = err
+}
+
+// Sync records that it was called, then returns the user-supplied error (if
+// any).
+func (s *Syncer) Sync() error {
+ s.called = true
+ return s.err
+}
+
+// Called reports whether the Sync method was called.
+func (s *Syncer) Called() bool {
+ return s.called
+}
+
+// A Discarder sends all writes to ioutil.Discard.
+type Discarder struct{ Syncer }
+
+// Write implements io.Writer.
+func (d *Discarder) Write(b []byte) (int, error) {
+ return ioutil.Discard.Write(b)
+}
+
+// FailWriter is a WriteSyncer that always returns an error on writes.
+type FailWriter struct{ Syncer }
+
+// Write implements io.Writer.
+func (w FailWriter) Write(b []byte) (int, error) {
+ return len(b), errors.New("failed")
+}
+
+// ShortWriter is a WriteSyncer whose write method never fails, but
+// nevertheless fails to the last byte of the input.
+type ShortWriter struct{ Syncer }
+
+// Write implements io.Writer.
+func (w ShortWriter) Write(b []byte) (int, error) {
+ return len(b) - 1, nil
+}
+
+// Buffer is an implementation of zapcore.WriteSyncer that sends all writes to
+// a bytes.Buffer. It has convenience methods to split the accumulated buffer
+// on newlines.
+type Buffer struct {
+ bytes.Buffer
+ Syncer
+}
+
+// Lines returns the current buffer contents, split on newlines.
+func (b *Buffer) Lines() []string {
+ output := strings.Split(b.String(), "\n")
+ return output[:len(output)-1]
+}
+
+// Stripped returns the current buffer contents with the last trailing newline
+// stripped.
+func (b *Buffer) Stripped() string {
+ return strings.TrimRight(b.String(), "\n")
+}
diff --git a/vendor/go.uber.org/zap/zaptest/doc.go b/vendor/go.uber.org/zap/zaptest/doc.go
new file mode 100644
index 00000000000..b377859c4a7
--- /dev/null
+++ b/vendor/go.uber.org/zap/zaptest/doc.go
@@ -0,0 +1,22 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zaptest provides a variety of helpers for testing log output.
+package zaptest // import "go.uber.org/zap/zaptest"
diff --git a/vendor/go.uber.org/zap/zaptest/logger.go b/vendor/go.uber.org/zap/zaptest/logger.go
new file mode 100644
index 00000000000..1e2451c264c
--- /dev/null
+++ b/vendor/go.uber.org/zap/zaptest/logger.go
@@ -0,0 +1,140 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zaptest
+
+import (
+ "bytes"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// LoggerOption configures the test logger built by NewLogger.
+type LoggerOption interface {
+ applyLoggerOption(*loggerOptions)
+}
+
+type loggerOptions struct {
+ Level zapcore.LevelEnabler
+ zapOptions []zap.Option
+}
+
+type loggerOptionFunc func(*loggerOptions)
+
+func (f loggerOptionFunc) applyLoggerOption(opts *loggerOptions) {
+ f(opts)
+}
+
+// Level controls which messages are logged by a test Logger built by
+// NewLogger.
+func Level(enab zapcore.LevelEnabler) LoggerOption {
+ return loggerOptionFunc(func(opts *loggerOptions) {
+ opts.Level = enab
+ })
+}
+
+// WrapOptions adds zap.Option's to a test Logger built by NewLogger.
+func WrapOptions(zapOpts ...zap.Option) LoggerOption {
+ return loggerOptionFunc(func(opts *loggerOptions) {
+ opts.zapOptions = zapOpts
+ })
+}
+
+// NewLogger builds a new Logger that logs all messages to the given
+// testing.TB.
+//
+// logger := zaptest.NewLogger(t)
+//
+// Use this with a *testing.T or *testing.B to get logs which get printed only
+// if a test fails or if you ran go test -v.
+//
+// The returned logger defaults to logging debug level messages and above.
+// This may be changed by passing a zaptest.Level during construction.
+//
+// logger := zaptest.NewLogger(t, zaptest.Level(zap.WarnLevel))
+//
+// You may also pass zap.Option's to customize test logger.
+//
+// logger := zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller()))
+func NewLogger(t TestingT, opts ...LoggerOption) *zap.Logger {
+ cfg := loggerOptions{
+ Level: zapcore.DebugLevel,
+ }
+ for _, o := range opts {
+ o.applyLoggerOption(&cfg)
+ }
+
+ writer := newTestingWriter(t)
+ zapOptions := []zap.Option{
+ // Send zap errors to the same writer and mark the test as failed if
+ // that happens.
+ zap.ErrorOutput(writer.WithMarkFailed(true)),
+ }
+ zapOptions = append(zapOptions, cfg.zapOptions...)
+
+ return zap.New(
+ zapcore.NewCore(
+ zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()),
+ writer,
+ cfg.Level,
+ ),
+ zapOptions...,
+ )
+}
+
+// testingWriter is a WriteSyncer that writes to the given testing.TB.
+type testingWriter struct {
+ t TestingT
+
+ // If true, the test will be marked as failed if this testingWriter is
+ // ever used.
+ markFailed bool
+}
+
+func newTestingWriter(t TestingT) testingWriter {
+ return testingWriter{t: t}
+}
+
+// WithMarkFailed returns a copy of this testingWriter with markFailed set to
+// the provided value.
+func (w testingWriter) WithMarkFailed(v bool) testingWriter {
+ w.markFailed = v
+ return w
+}
+
+func (w testingWriter) Write(p []byte) (n int, err error) {
+ n = len(p)
+
+ // Strip trailing newline because t.Log always adds one.
+ p = bytes.TrimRight(p, "\n")
+
+ // Note: t.Log is safe for concurrent use.
+ w.t.Logf("%s", p)
+ if w.markFailed {
+ w.t.Fail()
+ }
+
+ return n, nil
+}
+
+func (w testingWriter) Sync() error {
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/zaptest/testingt.go b/vendor/go.uber.org/zap/zaptest/testingt.go
new file mode 100644
index 00000000000..792463be301
--- /dev/null
+++ b/vendor/go.uber.org/zap/zaptest/testingt.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zaptest
+
+// TestingT is a subset of the API provided by all *testing.T and *testing.B
+// objects.
+type TestingT interface {
+ // Logs the given message without failing the test.
+ Logf(string, ...interface{})
+
+ // Logs the given message and marks the test as failed.
+ Errorf(string, ...interface{})
+
+ // Marks the test as failed.
+ Fail()
+
+ // Returns true if the test has been marked as failed.
+ Failed() bool
+
+ // Returns the name of the test.
+ Name() string
+
+ // Marks the test as failed and stops execution of that test.
+ FailNow()
+}
+
+// Note: We currently only rely on Logf. We are including Errorf and FailNow
+// in the interface in anticipation of future need since we can't extend the
+// interface without a breaking change.
diff --git a/vendor/go.uber.org/zap/zaptest/timeout.go b/vendor/go.uber.org/zap/zaptest/timeout.go
new file mode 100644
index 00000000000..f0be4441651
--- /dev/null
+++ b/vendor/go.uber.org/zap/zaptest/timeout.go
@@ -0,0 +1,45 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zaptest
+
+import (
+ "time"
+
+ "go.uber.org/zap/internal/ztest"
+)
+
+// Timeout scales the provided duration by $TEST_TIMEOUT_SCALE.
+//
+// Deprecated: This function is intended for internal testing and shouldn't be
+// used outside zap itself. It was introduced before Go supported internal
+// packages.
+func Timeout(base time.Duration) time.Duration {
+ return ztest.Timeout(base)
+}
+
+// Sleep scales the sleep duration by $TEST_TIMEOUT_SCALE.
+//
+// Deprecated: This function is intended for internal testing and shouldn't be
+// used outside zap itself. It was introduced before Go supported internal
+// packages.
+func Sleep(base time.Duration) {
+ ztest.Sleep(base)
+}
diff --git a/vendor/go.uber.org/zap/zaptest/writer.go b/vendor/go.uber.org/zap/zaptest/writer.go
new file mode 100644
index 00000000000..0701630e16b
--- /dev/null
+++ b/vendor/go.uber.org/zap/zaptest/writer.go
@@ -0,0 +1,44 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zaptest
+
+import "go.uber.org/zap/internal/ztest"
+
+type (
+ // A Syncer is a spy for the Sync portion of zapcore.WriteSyncer.
+ Syncer = ztest.Syncer
+
+ // A Discarder sends all writes to ioutil.Discard.
+ Discarder = ztest.Discarder
+
+ // FailWriter is a WriteSyncer that always returns an error on writes.
+ FailWriter = ztest.FailWriter
+
+ // ShortWriter is a WriteSyncer whose write method never returns an error,
+ // but always reports that it wrote one byte less than the input slice's
+ // length (thus, a "short write").
+ ShortWriter = ztest.ShortWriter
+
+ // Buffer is an implementation of zapcore.WriteSyncer that sends all writes to
+ // a bytes.Buffer. It has convenience methods to split the accumulated buffer
+ // on newlines.
+ Buffer = ztest.Buffer
+)
diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go
new file mode 100644
index 00000000000..ab510a8fff6
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go
@@ -0,0 +1,632 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/distribution.proto
+
+package distribution
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Distribution contains summary statistics for a population of values and,
+// optionally, a histogram representing the distribution of those values across
+// a specified set of histogram buckets.
+//
+// The summary statistics are the count, mean, sum of the squared deviation from
+// the mean, the minimum, and the maximum of the set of population of values.
+//
+// The histogram is based on a sequence of buckets and gives a count of values
+// that fall into each bucket. The boundaries of the buckets are given either
+// explicitly or by specifying parameters for a method of computing them
+// (buckets of fixed width or buckets of exponentially increasing width).
+//
+// Although it is not forbidden, it is generally a bad idea to include
+// non-finite values (infinities or NaNs) in the population of values, as this
+// will render the `mean` and `sum_of_squared_deviation` fields meaningless.
+type Distribution struct {
+ // The number of values in the population. Must be non-negative.
+ Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The arithmetic mean of the values in the population. If `count` is zero
+ // then this field must be zero.
+ Mean float64 `protobuf:"fixed64,2,opt,name=mean,proto3" json:"mean,omitempty"`
+ // The sum of squared deviations from the mean of the values in the
+ // population. For values x_i this is:
+ //
+ // Sum[i=1..n]((x_i - mean)^2)
+ //
+ // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
+ // describes Welford's method for accumulating this sum in one pass.
+ //
+ // If `count` is zero then this field must be zero.
+ SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"`
+ // If specified, contains the range of the population values. The field
+ // must not be present if the `count` is zero.
+ Range *Distribution_Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"`
+ // Defines the histogram bucket boundaries.
+ BucketOptions *Distribution_BucketOptions `protobuf:"bytes,6,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"`
+ // If `bucket_options` is given, then the sum of the values in `bucket_counts`
+ // must equal the value in `count`. If `bucket_options` is not given, no
+ // `bucket_counts` fields may be given.
+ //
+ // Bucket counts are given in order under the numbering scheme described
+ // above (the underflow bucket has number 0; the finite buckets, if any,
+ // have numbers 1 through N-2; the overflow bucket has number N-1).
+ //
+ // The size of `bucket_counts` must be no greater than N as defined in
+ // `bucket_options`.
+ //
+ // Any suffix of trailing zero bucket_count fields may be omitted.
+ BucketCounts []int64 `protobuf:"varint,7,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Distribution) Reset() { *m = Distribution{} }
+func (m *Distribution) String() string { return proto.CompactTextString(m) }
+func (*Distribution) ProtoMessage() {}
+func (*Distribution) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0835ee0fd90bf943, []int{0}
+}
+
+func (m *Distribution) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Distribution.Unmarshal(m, b)
+}
+func (m *Distribution) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Distribution.Marshal(b, m, deterministic)
+}
+func (m *Distribution) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Distribution.Merge(m, src)
+}
+func (m *Distribution) XXX_Size() int {
+ return xxx_messageInfo_Distribution.Size(m)
+}
+func (m *Distribution) XXX_DiscardUnknown() {
+ xxx_messageInfo_Distribution.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Distribution proto.InternalMessageInfo
+
+func (m *Distribution) GetCount() int64 {
+ if m != nil {
+ return m.Count
+ }
+ return 0
+}
+
+func (m *Distribution) GetMean() float64 {
+ if m != nil {
+ return m.Mean
+ }
+ return 0
+}
+
+func (m *Distribution) GetSumOfSquaredDeviation() float64 {
+ if m != nil {
+ return m.SumOfSquaredDeviation
+ }
+ return 0
+}
+
+func (m *Distribution) GetRange() *Distribution_Range {
+ if m != nil {
+ return m.Range
+ }
+ return nil
+}
+
+func (m *Distribution) GetBucketOptions() *Distribution_BucketOptions {
+ if m != nil {
+ return m.BucketOptions
+ }
+ return nil
+}
+
+func (m *Distribution) GetBucketCounts() []int64 {
+ if m != nil {
+ return m.BucketCounts
+ }
+ return nil
+}
+
+// The range of the population values.
+type Distribution_Range struct {
+ // The minimum of the population values.
+ Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"`
+ // The maximum of the population values.
+ Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Distribution_Range) Reset() { *m = Distribution_Range{} }
+func (m *Distribution_Range) String() string { return proto.CompactTextString(m) }
+func (*Distribution_Range) ProtoMessage() {}
+func (*Distribution_Range) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0835ee0fd90bf943, []int{0, 0}
+}
+
+func (m *Distribution_Range) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Distribution_Range.Unmarshal(m, b)
+}
+func (m *Distribution_Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Distribution_Range.Marshal(b, m, deterministic)
+}
+func (m *Distribution_Range) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Distribution_Range.Merge(m, src)
+}
+func (m *Distribution_Range) XXX_Size() int {
+ return xxx_messageInfo_Distribution_Range.Size(m)
+}
+func (m *Distribution_Range) XXX_DiscardUnknown() {
+ xxx_messageInfo_Distribution_Range.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Distribution_Range proto.InternalMessageInfo
+
+func (m *Distribution_Range) GetMin() float64 {
+ if m != nil {
+ return m.Min
+ }
+ return 0
+}
+
+func (m *Distribution_Range) GetMax() float64 {
+ if m != nil {
+ return m.Max
+ }
+ return 0
+}
+
+// A Distribution may optionally contain a histogram of the values in the
+// population. The histogram is given in `bucket_counts` as counts of values
+// that fall into one of a sequence of non-overlapping buckets. The sequence
+// of buckets is described by `bucket_options`.
+//
+// A bucket specifies an inclusive lower bound and exclusive upper bound for
+// the values that are counted for that bucket. The upper bound of a bucket
+// is strictly greater than the lower bound.
+//
+// The sequence of N buckets for a Distribution consists of an underflow
+// bucket (number 0), zero or more finite buckets (number 1 through N - 2) and
+// an overflow bucket (number N - 1). The buckets are contiguous: the lower
+// bound of bucket i (i > 0) is the same as the upper bound of bucket i - 1.
+// The buckets span the whole range of finite values: lower bound of the
+// underflow bucket is -infinity and the upper bound of the overflow bucket is
+// +infinity. The finite buckets are so-called because both bounds are
+// finite.
+//
+// `BucketOptions` describes bucket boundaries in one of three ways. Two
+// describe the boundaries by giving parameters for a formula to generate
+// boundaries and one gives the bucket boundaries explicitly.
+//
+// If `bucket_boundaries` is not given, then no `bucket_counts` may be given.
+type Distribution_BucketOptions struct {
+ // Exactly one of these three fields must be set.
+ //
+ // Types that are valid to be assigned to Options:
+ // *Distribution_BucketOptions_LinearBuckets
+ // *Distribution_BucketOptions_ExponentialBuckets
+ // *Distribution_BucketOptions_ExplicitBuckets
+ Options isDistribution_BucketOptions_Options `protobuf_oneof:"options"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Distribution_BucketOptions) Reset() { *m = Distribution_BucketOptions{} }
+func (m *Distribution_BucketOptions) String() string { return proto.CompactTextString(m) }
+func (*Distribution_BucketOptions) ProtoMessage() {}
+func (*Distribution_BucketOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0835ee0fd90bf943, []int{0, 1}
+}
+
+func (m *Distribution_BucketOptions) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Distribution_BucketOptions.Unmarshal(m, b)
+}
+func (m *Distribution_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Distribution_BucketOptions.Marshal(b, m, deterministic)
+}
+func (m *Distribution_BucketOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Distribution_BucketOptions.Merge(m, src)
+}
+func (m *Distribution_BucketOptions) XXX_Size() int {
+ return xxx_messageInfo_Distribution_BucketOptions.Size(m)
+}
+func (m *Distribution_BucketOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_Distribution_BucketOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Distribution_BucketOptions proto.InternalMessageInfo
+
+type isDistribution_BucketOptions_Options interface {
+ isDistribution_BucketOptions_Options()
+}
+
+type Distribution_BucketOptions_LinearBuckets struct {
+ LinearBuckets *Distribution_BucketOptions_Linear `protobuf:"bytes,1,opt,name=linear_buckets,json=linearBuckets,proto3,oneof"`
+}
+
+type Distribution_BucketOptions_ExponentialBuckets struct {
+ ExponentialBuckets *Distribution_BucketOptions_Exponential `protobuf:"bytes,2,opt,name=exponential_buckets,json=exponentialBuckets,proto3,oneof"`
+}
+
+type Distribution_BucketOptions_ExplicitBuckets struct {
+ ExplicitBuckets *Distribution_BucketOptions_Explicit `protobuf:"bytes,3,opt,name=explicit_buckets,json=explicitBuckets,proto3,oneof"`
+}
+
+func (*Distribution_BucketOptions_LinearBuckets) isDistribution_BucketOptions_Options() {}
+
+func (*Distribution_BucketOptions_ExponentialBuckets) isDistribution_BucketOptions_Options() {}
+
+func (*Distribution_BucketOptions_ExplicitBuckets) isDistribution_BucketOptions_Options() {}
+
+func (m *Distribution_BucketOptions) GetOptions() isDistribution_BucketOptions_Options {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *Distribution_BucketOptions) GetLinearBuckets() *Distribution_BucketOptions_Linear {
+ if x, ok := m.GetOptions().(*Distribution_BucketOptions_LinearBuckets); ok {
+ return x.LinearBuckets
+ }
+ return nil
+}
+
+func (m *Distribution_BucketOptions) GetExponentialBuckets() *Distribution_BucketOptions_Exponential {
+ if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExponentialBuckets); ok {
+ return x.ExponentialBuckets
+ }
+ return nil
+}
+
+func (m *Distribution_BucketOptions) GetExplicitBuckets() *Distribution_BucketOptions_Explicit {
+ if x, ok := m.GetOptions().(*Distribution_BucketOptions_ExplicitBuckets); ok {
+ return x.ExplicitBuckets
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Distribution_BucketOptions) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Distribution_BucketOptions_OneofMarshaler, _Distribution_BucketOptions_OneofUnmarshaler, _Distribution_BucketOptions_OneofSizer, []interface{}{
+ (*Distribution_BucketOptions_LinearBuckets)(nil),
+ (*Distribution_BucketOptions_ExponentialBuckets)(nil),
+ (*Distribution_BucketOptions_ExplicitBuckets)(nil),
+ }
+}
+
+func _Distribution_BucketOptions_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Distribution_BucketOptions)
+ // options
+ switch x := m.Options.(type) {
+ case *Distribution_BucketOptions_LinearBuckets:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.LinearBuckets); err != nil {
+ return err
+ }
+ case *Distribution_BucketOptions_ExponentialBuckets:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ExponentialBuckets); err != nil {
+ return err
+ }
+ case *Distribution_BucketOptions_ExplicitBuckets:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ExplicitBuckets); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Distribution_BucketOptions.Options has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Distribution_BucketOptions_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Distribution_BucketOptions)
+ switch tag {
+ case 1: // options.linear_buckets
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Distribution_BucketOptions_Linear)
+ err := b.DecodeMessage(msg)
+ m.Options = &Distribution_BucketOptions_LinearBuckets{msg}
+ return true, err
+ case 2: // options.exponential_buckets
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Distribution_BucketOptions_Exponential)
+ err := b.DecodeMessage(msg)
+ m.Options = &Distribution_BucketOptions_ExponentialBuckets{msg}
+ return true, err
+ case 3: // options.explicit_buckets
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Distribution_BucketOptions_Explicit)
+ err := b.DecodeMessage(msg)
+ m.Options = &Distribution_BucketOptions_ExplicitBuckets{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Distribution_BucketOptions_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Distribution_BucketOptions)
+ // options
+ switch x := m.Options.(type) {
+ case *Distribution_BucketOptions_LinearBuckets:
+ s := proto.Size(x.LinearBuckets)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Distribution_BucketOptions_ExponentialBuckets:
+ s := proto.Size(x.ExponentialBuckets)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Distribution_BucketOptions_ExplicitBuckets:
+ s := proto.Size(x.ExplicitBuckets)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Specify a sequence of buckets that all have the same width (except
+// overflow and underflow). Each bucket represents a constant absolute
+// uncertainty on the specific value in the bucket.
+//
+// Defines `num_finite_buckets + 2` (= N) buckets with these boundaries for
+// bucket `i`:
+//
+// Upper bound (0 <= i < N-1): offset + (width * i).
+// Lower bound (1 <= i < N): offset + (width * (i - 1)).
+type Distribution_BucketOptions_Linear struct {
+ // Must be greater than 0.
+ NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"`
+ // Must be greater than 0.
+ Width float64 `protobuf:"fixed64,2,opt,name=width,proto3" json:"width,omitempty"`
+ // Lower bound of the first bucket.
+ Offset float64 `protobuf:"fixed64,3,opt,name=offset,proto3" json:"offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Distribution_BucketOptions_Linear) Reset() { *m = Distribution_BucketOptions_Linear{} }
+func (m *Distribution_BucketOptions_Linear) String() string { return proto.CompactTextString(m) }
+func (*Distribution_BucketOptions_Linear) ProtoMessage() {}
+func (*Distribution_BucketOptions_Linear) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0835ee0fd90bf943, []int{0, 1, 0}
+}
+
+func (m *Distribution_BucketOptions_Linear) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Distribution_BucketOptions_Linear.Unmarshal(m, b)
+}
+func (m *Distribution_BucketOptions_Linear) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Distribution_BucketOptions_Linear.Marshal(b, m, deterministic)
+}
+func (m *Distribution_BucketOptions_Linear) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Distribution_BucketOptions_Linear.Merge(m, src)
+}
+func (m *Distribution_BucketOptions_Linear) XXX_Size() int {
+ return xxx_messageInfo_Distribution_BucketOptions_Linear.Size(m)
+}
+func (m *Distribution_BucketOptions_Linear) XXX_DiscardUnknown() {
+ xxx_messageInfo_Distribution_BucketOptions_Linear.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Distribution_BucketOptions_Linear proto.InternalMessageInfo
+
+func (m *Distribution_BucketOptions_Linear) GetNumFiniteBuckets() int32 {
+ if m != nil {
+ return m.NumFiniteBuckets
+ }
+ return 0
+}
+
+func (m *Distribution_BucketOptions_Linear) GetWidth() float64 {
+ if m != nil {
+ return m.Width
+ }
+ return 0
+}
+
+func (m *Distribution_BucketOptions_Linear) GetOffset() float64 {
+ if m != nil {
+ return m.Offset
+ }
+ return 0
+}
+
+// Specify a sequence of buckets that have a width that is proportional to
+// the value of the lower bound. Each bucket represents a constant relative
+// uncertainty on a specific value in the bucket.
+//
+// Defines `num_finite_buckets + 2` (= N) buckets with these boundaries for
+// bucket i:
+//
+// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i).
+// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)).
+type Distribution_BucketOptions_Exponential struct {
+ // Must be greater than 0.
+ NumFiniteBuckets int32 `protobuf:"varint,1,opt,name=num_finite_buckets,json=numFiniteBuckets,proto3" json:"num_finite_buckets,omitempty"`
+ // Must be greater than 1.
+ GrowthFactor float64 `protobuf:"fixed64,2,opt,name=growth_factor,json=growthFactor,proto3" json:"growth_factor,omitempty"`
+ // Must be greater than 0.
+ Scale float64 `protobuf:"fixed64,3,opt,name=scale,proto3" json:"scale,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Distribution_BucketOptions_Exponential) Reset() {
+ *m = Distribution_BucketOptions_Exponential{}
+}
+func (m *Distribution_BucketOptions_Exponential) String() string { return proto.CompactTextString(m) }
+func (*Distribution_BucketOptions_Exponential) ProtoMessage() {}
+func (*Distribution_BucketOptions_Exponential) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0835ee0fd90bf943, []int{0, 1, 1}
+}
+
+func (m *Distribution_BucketOptions_Exponential) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Distribution_BucketOptions_Exponential.Unmarshal(m, b)
+}
+func (m *Distribution_BucketOptions_Exponential) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Distribution_BucketOptions_Exponential.Marshal(b, m, deterministic)
+}
+func (m *Distribution_BucketOptions_Exponential) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Distribution_BucketOptions_Exponential.Merge(m, src)
+}
+func (m *Distribution_BucketOptions_Exponential) XXX_Size() int {
+ return xxx_messageInfo_Distribution_BucketOptions_Exponential.Size(m)
+}
+func (m *Distribution_BucketOptions_Exponential) XXX_DiscardUnknown() {
+ xxx_messageInfo_Distribution_BucketOptions_Exponential.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Distribution_BucketOptions_Exponential proto.InternalMessageInfo
+
+func (m *Distribution_BucketOptions_Exponential) GetNumFiniteBuckets() int32 {
+ if m != nil {
+ return m.NumFiniteBuckets
+ }
+ return 0
+}
+
+func (m *Distribution_BucketOptions_Exponential) GetGrowthFactor() float64 {
+ if m != nil {
+ return m.GrowthFactor
+ }
+ return 0
+}
+
+func (m *Distribution_BucketOptions_Exponential) GetScale() float64 {
+ if m != nil {
+ return m.Scale
+ }
+ return 0
+}
+
+// A set of buckets with arbitrary widths.
+//
+// Defines `size(bounds) + 1` (= N) buckets with these boundaries for
+// bucket i:
+//
+// Upper bound (0 <= i < N-1): bounds[i]
+// Lower bound (1 <= i < N); bounds[i - 1]
+//
+// There must be at least one element in `bounds`. If `bounds` has only one
+// element, there are no finite buckets, and that single element is the
+// common boundary of the overflow and underflow buckets.
+type Distribution_BucketOptions_Explicit struct {
+ // The values must be monotonically increasing.
+ Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Distribution_BucketOptions_Explicit) Reset() { *m = Distribution_BucketOptions_Explicit{} }
+func (m *Distribution_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) }
+func (*Distribution_BucketOptions_Explicit) ProtoMessage() {}
+func (*Distribution_BucketOptions_Explicit) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0835ee0fd90bf943, []int{0, 1, 2}
+}
+
+func (m *Distribution_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Distribution_BucketOptions_Explicit.Unmarshal(m, b)
+}
+func (m *Distribution_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Distribution_BucketOptions_Explicit.Marshal(b, m, deterministic)
+}
+func (m *Distribution_BucketOptions_Explicit) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Distribution_BucketOptions_Explicit.Merge(m, src)
+}
+func (m *Distribution_BucketOptions_Explicit) XXX_Size() int {
+ return xxx_messageInfo_Distribution_BucketOptions_Explicit.Size(m)
+}
+func (m *Distribution_BucketOptions_Explicit) XXX_DiscardUnknown() {
+ xxx_messageInfo_Distribution_BucketOptions_Explicit.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Distribution_BucketOptions_Explicit proto.InternalMessageInfo
+
+func (m *Distribution_BucketOptions_Explicit) GetBounds() []float64 {
+ if m != nil {
+ return m.Bounds
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Distribution)(nil), "google.api.Distribution")
+ proto.RegisterType((*Distribution_Range)(nil), "google.api.Distribution.Range")
+ proto.RegisterType((*Distribution_BucketOptions)(nil), "google.api.Distribution.BucketOptions")
+ proto.RegisterType((*Distribution_BucketOptions_Linear)(nil), "google.api.Distribution.BucketOptions.Linear")
+ proto.RegisterType((*Distribution_BucketOptions_Exponential)(nil), "google.api.Distribution.BucketOptions.Exponential")
+ proto.RegisterType((*Distribution_BucketOptions_Explicit)(nil), "google.api.Distribution.BucketOptions.Explicit")
+}
+
+func init() { proto.RegisterFile("google/api/distribution.proto", fileDescriptor_0835ee0fd90bf943) }
+
+var fileDescriptor_0835ee0fd90bf943 = []byte{
+ // 522 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x5d, 0x6b, 0xd4, 0x40,
+ 0x14, 0xdd, 0x34, 0xfb, 0xa1, 0x77, 0x3f, 0x5c, 0xc7, 0x2a, 0x21, 0xa8, 0x2c, 0x2d, 0xc8, 0x82,
+ 0x9a, 0x85, 0x55, 0xf0, 0xc1, 0xb7, 0x6d, 0x2d, 0xfb, 0xa0, 0xb4, 0x8c, 0xe0, 0x83, 0x08, 0x61,
+ 0x36, 0x99, 0xa4, 0xa3, 0xc9, 0x4c, 0xcc, 0x4c, 0xda, 0xfd, 0x01, 0xfe, 0x29, 0xff, 0x9d, 0xe4,
+ 0x4e, 0xb6, 0x4d, 0x11, 0x61, 0x7d, 0x9b, 0x73, 0xef, 0x99, 0x73, 0xce, 0xbd, 0x64, 0x02, 0xcf,
+ 0x52, 0xa5, 0xd2, 0x8c, 0x2f, 0x58, 0x21, 0x16, 0xb1, 0xd0, 0xa6, 0x14, 0x9b, 0xca, 0x08, 0x25,
+ 0x83, 0xa2, 0x54, 0x46, 0x11, 0xb0, 0xed, 0x80, 0x15, 0xc2, 0x7f, 0xda, 0xa2, 0x32, 0x29, 0x95,
+ 0x61, 0x35, 0x51, 0x5b, 0xe6, 0xd1, 0xaf, 0x01, 0x8c, 0x4e, 0x5b, 0x02, 0xe4, 0x10, 0x7a, 0x91,
+ 0xaa, 0xa4, 0xf1, 0x9c, 0x99, 0x33, 0x77, 0xa9, 0x05, 0x84, 0x40, 0x37, 0xe7, 0x4c, 0x7a, 0x07,
+ 0x33, 0x67, 0xee, 0x50, 0x3c, 0x93, 0x77, 0xe0, 0xe9, 0x2a, 0x0f, 0x55, 0x12, 0xea, 0x9f, 0x15,
+ 0x2b, 0x79, 0x1c, 0xc6, 0xfc, 0x4a, 0xa0, 0xba, 0xe7, 0x22, 0xef, 0xb1, 0xae, 0xf2, 0xf3, 0xe4,
+ 0xb3, 0xed, 0x9e, 0xee, 0x9a, 0xe4, 0x2d, 0xf4, 0x4a, 0x26, 0x53, 0xee, 0x75, 0x67, 0xce, 0x7c,
+ 0xb8, 0x7c, 0x1e, 0xdc, 0xa6, 0x0d, 0xda, 0x59, 0x02, 0x5a, 0xb3, 0xa8, 0x25, 0x93, 0x4f, 0x30,
+ 0xd9, 0x54, 0xd1, 0x0f, 0x6e, 0x42, 0x55, 0xe0, 0x04, 0x5e, 0x1f, 0xaf, 0xbf, 0xf8, 0xe7, 0xf5,
+ 0x15, 0xd2, 0xcf, 0x2d, 0x9b, 0x8e, 0x37, 0x6d, 0x48, 0x8e, 0xa1, 0x29, 0x84, 0x38, 0xa1, 0xf6,
+ 0x06, 0x33, 0x77, 0xee, 0xd2, 0x91, 0x2d, 0x9e, 0x60, 0xcd, 0x7f, 0x09, 0x3d, 0xcc, 0x40, 0xa6,
+ 0xe0, 0xe6, 0x42, 0xe2, 0x4e, 0x1c, 0x5a, 0x1f, 0xb1, 0xc2, 0xb6, 0xcd, 0x42, 0xea, 0xa3, 0xff,
+ 0xbb, 0x0b, 0xe3, 0x3b, 0x96, 0xe4, 0x0b, 0x4c, 0x32, 0x21, 0x39, 0x2b, 0x43, 0xab, 0xaa, 0x51,
+ 0x60, 0xb8, 0x7c, 0xbd, 0x5f, 0xe4, 0xe0, 0x23, 0x5e, 0x5e, 0x77, 0xe8, 0xd8, 0xca, 0xd8, 0xae,
+ 0x26, 0x1c, 0x1e, 0xf1, 0x6d, 0xa1, 0x24, 0x97, 0x46, 0xb0, 0xec, 0x46, 0xfc, 0x00, 0xc5, 0x97,
+ 0x7b, 0x8a, 0x7f, 0xb8, 0x55, 0x58, 0x77, 0x28, 0x69, 0x09, 0xee, 0x6c, 0xbe, 0xc1, 0x94, 0x6f,
+ 0x8b, 0x4c, 0x44, 0xc2, 0xdc, 0x78, 0xb8, 0xe8, 0xb1, 0xd8, 0xdf, 0x03, 0xaf, 0xaf, 0x3b, 0xf4,
+ 0xc1, 0x4e, 0xaa, 0x51, 0xf7, 0x63, 0xe8, 0xdb, 0xf9, 0xc8, 0x2b, 0x20, 0xb2, 0xca, 0xc3, 0x44,
+ 0x48, 0x61, 0xf8, 0x9d, 0x55, 0xf5, 0xe8, 0x54, 0x56, 0xf9, 0x19, 0x36, 0x76, 0xa9, 0x0e, 0xa1,
+ 0x77, 0x2d, 0x62, 0x73, 0xd9, 0xac, 0xde, 0x02, 0xf2, 0x04, 0xfa, 0x2a, 0x49, 0x34, 0x37, 0xcd,
+ 0xa7, 0xd7, 0x20, 0xff, 0x0a, 0x86, 0xad, 0x41, 0xff, 0xd3, 0xea, 0x18, 0xc6, 0x69, 0xa9, 0xae,
+ 0xcd, 0x65, 0x98, 0xb0, 0xc8, 0xa8, 0xb2, 0xb1, 0x1c, 0xd9, 0xe2, 0x19, 0xd6, 0xea, 0x3c, 0x3a,
+ 0x62, 0x19, 0x6f, 0x8c, 0x2d, 0xf0, 0x8f, 0xe0, 0xde, 0x6e, 0xf8, 0x3a, 0xdb, 0x46, 0x55, 0x32,
+ 0xae, 0x8d, 0xdc, 0x3a, 0x9b, 0x45, 0xab, 0xfb, 0x30, 0x68, 0x3e, 0xe5, 0xd5, 0x77, 0x98, 0x44,
+ 0x2a, 0x6f, 0x6d, 0x75, 0xf5, 0xb0, 0xbd, 0xd6, 0x8b, 0xfa, 0xad, 0x5e, 0x38, 0x5f, 0x4f, 0x1a,
+ 0x42, 0xaa, 0x32, 0x26, 0xd3, 0x40, 0x95, 0xe9, 0x22, 0xe5, 0x12, 0x5f, 0xf2, 0xc2, 0xb6, 0x58,
+ 0x21, 0xf4, 0x5f, 0x7f, 0x85, 0xf7, 0x6d, 0xb0, 0xe9, 0x23, 0xff, 0xcd, 0x9f, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x62, 0xb4, 0xef, 0x6b, 0x44, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go
new file mode 100644
index 00000000000..82a31400db5
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/label/label.pb.go
@@ -0,0 +1,139 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/label.proto
+
+package label
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Value types that can be used as label values.
+type LabelDescriptor_ValueType int32
+
+const (
+ // A variable-length string. This is the default.
+ LabelDescriptor_STRING LabelDescriptor_ValueType = 0
+ // Boolean; true or false.
+ LabelDescriptor_BOOL LabelDescriptor_ValueType = 1
+ // A 64-bit signed integer.
+ LabelDescriptor_INT64 LabelDescriptor_ValueType = 2
+)
+
+var LabelDescriptor_ValueType_name = map[int32]string{
+ 0: "STRING",
+ 1: "BOOL",
+ 2: "INT64",
+}
+
+var LabelDescriptor_ValueType_value = map[string]int32{
+ "STRING": 0,
+ "BOOL": 1,
+ "INT64": 2,
+}
+
+func (x LabelDescriptor_ValueType) String() string {
+ return proto.EnumName(LabelDescriptor_ValueType_name, int32(x))
+}
+
+func (LabelDescriptor_ValueType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_f372a463e25ba151, []int{0, 0}
+}
+
+// A description of a label.
+type LabelDescriptor struct {
+ // The label key.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The type of data that can be assigned to the label.
+ ValueType LabelDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.LabelDescriptor_ValueType" json:"value_type,omitempty"`
+ // A human-readable description for the label.
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LabelDescriptor) Reset() { *m = LabelDescriptor{} }
+func (m *LabelDescriptor) String() string { return proto.CompactTextString(m) }
+func (*LabelDescriptor) ProtoMessage() {}
+func (*LabelDescriptor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f372a463e25ba151, []int{0}
+}
+
+func (m *LabelDescriptor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LabelDescriptor.Unmarshal(m, b)
+}
+func (m *LabelDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LabelDescriptor.Marshal(b, m, deterministic)
+}
+func (m *LabelDescriptor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelDescriptor.Merge(m, src)
+}
+func (m *LabelDescriptor) XXX_Size() int {
+ return xxx_messageInfo_LabelDescriptor.Size(m)
+}
+func (m *LabelDescriptor) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelDescriptor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelDescriptor proto.InternalMessageInfo
+
+func (m *LabelDescriptor) GetKey() string {
+ if m != nil {
+ return m.Key
+ }
+ return ""
+}
+
+func (m *LabelDescriptor) GetValueType() LabelDescriptor_ValueType {
+ if m != nil {
+ return m.ValueType
+ }
+ return LabelDescriptor_STRING
+}
+
+func (m *LabelDescriptor) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("google.api.LabelDescriptor_ValueType", LabelDescriptor_ValueType_name, LabelDescriptor_ValueType_value)
+ proto.RegisterType((*LabelDescriptor)(nil), "google.api.LabelDescriptor")
+}
+
+func init() { proto.RegisterFile("google/api/label.proto", fileDescriptor_f372a463e25ba151) }
+
+var fileDescriptor_f372a463e25ba151 = []byte{
+ // 252 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0xd1, 0x2b, 0x28, 0xca,
+ 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x88, 0xeb, 0x25, 0x16, 0x64, 0x2a, 0xed, 0x64, 0xe4, 0xe2, 0xf7,
+ 0x01, 0xc9, 0xb9, 0xa4, 0x16, 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0x09, 0x09, 0x70, 0x31,
+ 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0x98, 0x42, 0x2e, 0x5c, 0x5c,
+ 0x65, 0x89, 0x39, 0xa5, 0xa9, 0xf1, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x7c,
+ 0x46, 0xaa, 0x7a, 0x08, 0x63, 0xf4, 0xd0, 0x8c, 0xd0, 0x0b, 0x03, 0xa9, 0x0e, 0xa9, 0x2c, 0x48,
+ 0x0d, 0xe2, 0x2c, 0x83, 0x31, 0x85, 0x14, 0xb8, 0xb8, 0x53, 0xa0, 0x4a, 0x32, 0xf3, 0xf3, 0x24,
+ 0x98, 0xc1, 0xe6, 0x23, 0x0b, 0x29, 0xe9, 0x70, 0x71, 0xc2, 0x75, 0x0a, 0x71, 0x71, 0xb1, 0x05,
+ 0x87, 0x04, 0x79, 0xfa, 0xb9, 0x0b, 0x30, 0x08, 0x71, 0x70, 0xb1, 0x38, 0xf9, 0xfb, 0xfb, 0x08,
+ 0x30, 0x0a, 0x71, 0x72, 0xb1, 0x7a, 0xfa, 0x85, 0x98, 0x99, 0x08, 0x30, 0x39, 0xc5, 0x73, 0xf1,
+ 0x25, 0xe7, 0xe7, 0x22, 0x39, 0xc3, 0x89, 0x0b, 0xec, 0x8e, 0x00, 0x90, 0x2f, 0x03, 0x18, 0xa3,
+ 0x4c, 0xa1, 0x32, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa, 0xe9, 0xa9,
+ 0x79, 0xe0, 0x30, 0xd0, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16, 0x23, 0x82, 0xc7, 0x1a, 0x4c, 0xfe,
+ 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xe2, 0xee, 0x18, 0xe0, 0x99, 0xc4, 0x06, 0x56, 0x6b, 0x0c, 0x08,
+ 0x00, 0x00, 0xff, 0xff, 0x57, 0x04, 0xaa, 0x1f, 0x49, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
new file mode 100644
index 00000000000..800e512cc9b
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go
@@ -0,0 +1,397 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/metric.proto
+
+package metric
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ label "google.golang.org/genproto/googleapis/api/label"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The kind of measurement. It describes how the data is reported.
+type MetricDescriptor_MetricKind int32
+
+const (
+ // Do not use this default value.
+ MetricDescriptor_METRIC_KIND_UNSPECIFIED MetricDescriptor_MetricKind = 0
+ // An instantaneous measurement of a value.
+ MetricDescriptor_GAUGE MetricDescriptor_MetricKind = 1
+ // The change in a value during a time interval.
+ MetricDescriptor_DELTA MetricDescriptor_MetricKind = 2
+ // A value accumulated over a time interval. Cumulative
+ // measurements in a time series should have the same start time
+ // and increasing end times, until an event resets the cumulative
+ // value to zero and sets a new start time for the following
+ // points.
+ MetricDescriptor_CUMULATIVE MetricDescriptor_MetricKind = 3
+)
+
+var MetricDescriptor_MetricKind_name = map[int32]string{
+ 0: "METRIC_KIND_UNSPECIFIED",
+ 1: "GAUGE",
+ 2: "DELTA",
+ 3: "CUMULATIVE",
+}
+
+var MetricDescriptor_MetricKind_value = map[string]int32{
+ "METRIC_KIND_UNSPECIFIED": 0,
+ "GAUGE": 1,
+ "DELTA": 2,
+ "CUMULATIVE": 3,
+}
+
+func (x MetricDescriptor_MetricKind) String() string {
+ return proto.EnumName(MetricDescriptor_MetricKind_name, int32(x))
+}
+
+func (MetricDescriptor_MetricKind) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_927eaac1a24f8abb, []int{0, 0}
+}
+
+// The value type of a metric.
+type MetricDescriptor_ValueType int32
+
+const (
+ // Do not use this default value.
+ MetricDescriptor_VALUE_TYPE_UNSPECIFIED MetricDescriptor_ValueType = 0
+ // The value is a boolean.
+ // This value type can be used only if the metric kind is `GAUGE`.
+ MetricDescriptor_BOOL MetricDescriptor_ValueType = 1
+ // The value is a signed 64-bit integer.
+ MetricDescriptor_INT64 MetricDescriptor_ValueType = 2
+ // The value is a double precision floating point number.
+ MetricDescriptor_DOUBLE MetricDescriptor_ValueType = 3
+ // The value is a text string.
+ // This value type can be used only if the metric kind is `GAUGE`.
+ MetricDescriptor_STRING MetricDescriptor_ValueType = 4
+ // The value is a [`Distribution`][google.api.Distribution].
+ MetricDescriptor_DISTRIBUTION MetricDescriptor_ValueType = 5
+ // The value is money.
+ MetricDescriptor_MONEY MetricDescriptor_ValueType = 6
+)
+
+var MetricDescriptor_ValueType_name = map[int32]string{
+ 0: "VALUE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "DOUBLE",
+ 4: "STRING",
+ 5: "DISTRIBUTION",
+ 6: "MONEY",
+}
+
+var MetricDescriptor_ValueType_value = map[string]int32{
+ "VALUE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "DOUBLE": 3,
+ "STRING": 4,
+ "DISTRIBUTION": 5,
+ "MONEY": 6,
+}
+
+func (x MetricDescriptor_ValueType) String() string {
+ return proto.EnumName(MetricDescriptor_ValueType_name, int32(x))
+}
+
+func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_927eaac1a24f8abb, []int{0, 1}
+}
+
+// Defines a metric type and its schema. Once a metric descriptor is created,
+// deleting or altering it stops data collection and makes the metric type's
+// existing data unusable.
+type MetricDescriptor struct {
+ // The resource name of the metric descriptor.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The metric type, including its DNS name prefix. The type is not
+ // URL-encoded. All user-defined custom metric types have the DNS name
+ // `custom.googleapis.com`. Metric types should use a natural hierarchical
+ // grouping. For example:
+ //
+ // "custom.googleapis.com/invoice/paid/amount"
+ // "appengine.googleapis.com/http/server/response_latencies"
+ Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"`
+ // The set of labels that can be used to describe a specific
+ // instance of this metric type. For example, the
+ // `appengine.googleapis.com/http/server/response_latencies` metric
+ // type has a label for the HTTP response code, `response_code`, so
+ // you can look at latencies for successful responses or just
+ // for responses that failed.
+ Labels []*label.LabelDescriptor `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"`
+ // Whether the metric records instantaneous values, changes to a value, etc.
+ // Some combinations of `metric_kind` and `value_type` might not be supported.
+ MetricKind MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"`
+ // Whether the measurement is an integer, a floating-point number, etc.
+ // Some combinations of `metric_kind` and `value_type` might not be supported.
+ ValueType MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"`
+ // The unit in which the metric value is reported. It is only applicable
+ // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The
+ // supported units are a subset of [The Unified Code for Units of
+ // Measure](http://unitsofmeasure.org/ucum.html) standard:
+ //
+ // **Basic units (UNIT)**
+ //
+ // * `bit` bit
+ // * `By` byte
+ // * `s` second
+ // * `min` minute
+ // * `h` hour
+ // * `d` day
+ //
+ // **Prefixes (PREFIX)**
+ //
+ // * `k` kilo (10**3)
+ // * `M` mega (10**6)
+ // * `G` giga (10**9)
+ // * `T` tera (10**12)
+ // * `P` peta (10**15)
+ // * `E` exa (10**18)
+ // * `Z` zetta (10**21)
+ // * `Y` yotta (10**24)
+ // * `m` milli (10**-3)
+ // * `u` micro (10**-6)
+ // * `n` nano (10**-9)
+ // * `p` pico (10**-12)
+ // * `f` femto (10**-15)
+ // * `a` atto (10**-18)
+ // * `z` zepto (10**-21)
+ // * `y` yocto (10**-24)
+ // * `Ki` kibi (2**10)
+ // * `Mi` mebi (2**20)
+ // * `Gi` gibi (2**30)
+ // * `Ti` tebi (2**40)
+ //
+ // **Grammar**
+ //
+ // The grammar also includes these connectors:
+ //
+ // * `/` division (as an infix operator, e.g. `1/s`).
+ // * `.` multiplication (as an infix operator, e.g. `GBy.d`)
+ //
+ // The grammar for a unit is as follows:
+ //
+ // Expression = Component { "." Component } { "/" Component } ;
+ //
+ // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ]
+ // | Annotation
+ // | "1"
+ // ;
+ //
+ // Annotation = "{" NAME "}" ;
+ //
+ // Notes:
+ //
+ // * `Annotation` is just a comment if it follows a `UNIT` and is
+ // equivalent to `1` if it is used alone. For examples,
+ // `{requests}/s == 1/s`, `By{transmitted}/s == By/s`.
+ // * `NAME` is a sequence of non-blank printable ASCII characters not
+ // containing '{' or '}'.
+ // * `1` represents dimensionless value 1, such as in `1/s`.
+ // * `%` represents dimensionless value 1/100, and annotates values giving
+ // a percentage.
+ Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
+ // A detailed description of the metric, which can be used in documentation.
+ Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+ // A concise name for the metric, which can be displayed in user interfaces.
+ // Use sentence case without an ending period, for example "Request count".
+ // This field is optional but it is recommended to be set for any metrics
+ // associated with user-visible concepts, such as Quota.
+ DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} }
+func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) }
+func (*MetricDescriptor) ProtoMessage() {}
+func (*MetricDescriptor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_927eaac1a24f8abb, []int{0}
+}
+
+func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b)
+}
+func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic)
+}
+func (m *MetricDescriptor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricDescriptor.Merge(m, src)
+}
+func (m *MetricDescriptor) XXX_Size() int {
+ return xxx_messageInfo_MetricDescriptor.Size(m)
+}
+func (m *MetricDescriptor) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetricDescriptor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo
+
+func (m *MetricDescriptor) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetLabels() []*label.LabelDescriptor {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *MetricDescriptor) GetMetricKind() MetricDescriptor_MetricKind {
+ if m != nil {
+ return m.MetricKind
+ }
+ return MetricDescriptor_METRIC_KIND_UNSPECIFIED
+}
+
+func (m *MetricDescriptor) GetValueType() MetricDescriptor_ValueType {
+ if m != nil {
+ return m.ValueType
+ }
+ return MetricDescriptor_VALUE_TYPE_UNSPECIFIED
+}
+
+func (m *MetricDescriptor) GetUnit() string {
+ if m != nil {
+ return m.Unit
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+// A specific metric, identified by specifying values for all of the
+// labels of a [`MetricDescriptor`][google.api.MetricDescriptor].
+type Metric struct {
+ // An existing metric type, see [google.api.MetricDescriptor][google.api.MetricDescriptor].
+ // For example, `custom.googleapis.com/invoice/paid/amount`.
+ Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"`
+ // The set of label values that uniquely identify this metric. All
+ // labels listed in the `MetricDescriptor` must be assigned values.
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+func (*Metric) Descriptor() ([]byte, []int) {
+ return fileDescriptor_927eaac1a24f8abb, []int{1}
+}
+
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Metric.Unmarshal(m, b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+}
+func (m *Metric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metric.Merge(m, src)
+}
+func (m *Metric) XXX_Size() int {
+ return xxx_messageInfo_Metric.Size(m)
+}
+func (m *Metric) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
+
+func (m *Metric) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *Metric) GetLabels() map[string]string {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("google.api.MetricDescriptor_MetricKind", MetricDescriptor_MetricKind_name, MetricDescriptor_MetricKind_value)
+ proto.RegisterEnum("google.api.MetricDescriptor_ValueType", MetricDescriptor_ValueType_name, MetricDescriptor_ValueType_value)
+ proto.RegisterType((*MetricDescriptor)(nil), "google.api.MetricDescriptor")
+ proto.RegisterType((*Metric)(nil), "google.api.Metric")
+ proto.RegisterMapType((map[string]string)(nil), "google.api.Metric.LabelsEntry")
+}
+
+func init() { proto.RegisterFile("google/api/metric.proto", fileDescriptor_927eaac1a24f8abb) }
+
+var fileDescriptor_927eaac1a24f8abb = []byte{
+ // 506 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0x4d, 0x6f, 0xda, 0x40,
+ 0x10, 0xad, 0x3f, 0x70, 0xc3, 0x10, 0xa1, 0xd5, 0xaa, 0x4a, 0x2c, 0x22, 0x55, 0x94, 0x43, 0xcb,
+ 0x09, 0xa4, 0xa4, 0x4a, 0xbf, 0x4e, 0x80, 0xb7, 0xd4, 0x8a, 0xb1, 0x91, 0x63, 0x23, 0xa5, 0x17,
+ 0xcb, 0x81, 0x95, 0x65, 0xc5, 0xd8, 0xae, 0x71, 0x22, 0xf9, 0x57, 0xf4, 0x17, 0xf4, 0xd2, 0x5f,
+ 0x5a, 0xed, 0xae, 0x03, 0x16, 0x95, 0x72, 0xe2, 0xed, 0x9b, 0x37, 0x6f, 0x67, 0x96, 0x67, 0x38,
+ 0x8f, 0xb2, 0x2c, 0x4a, 0xe8, 0x38, 0xcc, 0xe3, 0xf1, 0x96, 0x96, 0x45, 0xbc, 0x1e, 0xe5, 0x45,
+ 0x56, 0x66, 0x18, 0x44, 0x61, 0x14, 0xe6, 0x71, 0xef, 0xac, 0x21, 0x4a, 0xc2, 0x7b, 0x9a, 0x08,
+ 0xcd, 0xe0, 0x8f, 0x0a, 0x68, 0xc1, 0x9b, 0x0c, 0xba, 0x5b, 0x17, 0x71, 0x5e, 0x66, 0x05, 0xc6,
+ 0xa0, 0xa6, 0xe1, 0x96, 0xea, 0x52, 0x5f, 0x1a, 0xb6, 0x5d, 0x8e, 0x19, 0x57, 0x56, 0x39, 0xd5,
+ 0x4f, 0x04, 0xc7, 0x30, 0xbe, 0x02, 0x8d, 0x7b, 0xed, 0x74, 0xb9, 0xaf, 0x0c, 0x3b, 0x97, 0x17,
+ 0xa3, 0xc3, 0x8d, 0x23, 0x8b, 0x55, 0x0e, 0xa6, 0x6e, 0x2d, 0xc5, 0x3f, 0xa0, 0x23, 0xa6, 0x0c,
+ 0x1e, 0xe2, 0x74, 0xa3, 0x2b, 0x7d, 0x69, 0xd8, 0xbd, 0xfc, 0xd0, 0xec, 0x3c, 0x9e, 0xa7, 0x26,
+ 0x6e, 0xe2, 0x74, 0xe3, 0xc2, 0x76, 0x8f, 0x31, 0x01, 0x78, 0x0a, 0x93, 0x47, 0x1a, 0xf0, 0xc1,
+ 0x54, 0x6e, 0xf4, 0xfe, 0x45, 0xa3, 0x15, 0x93, 0x7b, 0x55, 0x4e, 0xdd, 0xf6, 0xd3, 0x33, 0x64,
+ 0x9b, 0x3d, 0xa6, 0x71, 0xa9, 0xb7, 0xc4, 0x66, 0x0c, 0xe3, 0x3e, 0x74, 0x36, 0x75, 0x5b, 0x9c,
+ 0xa5, 0xba, 0xc6, 0x4b, 0x4d, 0x0a, 0xbf, 0x83, 0xd3, 0x4d, 0xbc, 0xcb, 0x93, 0xb0, 0x0a, 0xf8,
+ 0x5b, 0xbd, 0xae, 0x25, 0x82, 0xb3, 0xc3, 0x2d, 0x1d, 0x38, 0x00, 0x87, 0xc9, 0xf1, 0x05, 0x9c,
+ 0x2f, 0x88, 0xe7, 0x9a, 0xb3, 0xe0, 0xc6, 0xb4, 0x8d, 0xc0, 0xb7, 0x6f, 0x97, 0x64, 0x66, 0x7e,
+ 0x37, 0x89, 0x81, 0x5e, 0xe1, 0x36, 0xb4, 0xe6, 0x13, 0x7f, 0x4e, 0x90, 0xc4, 0xa0, 0x41, 0x2c,
+ 0x6f, 0x82, 0x64, 0xdc, 0x05, 0x98, 0xf9, 0x0b, 0xdf, 0x9a, 0x78, 0xe6, 0x8a, 0x20, 0x65, 0xf0,
+ 0x0b, 0xda, 0xfb, 0x0d, 0x70, 0x0f, 0xce, 0x56, 0x13, 0xcb, 0x27, 0x81, 0x77, 0xb7, 0x24, 0x47,
+ 0x76, 0x27, 0xa0, 0x4e, 0x1d, 0xc7, 0x12, 0x6e, 0xa6, 0xed, 0x5d, 0x7f, 0x44, 0x32, 0x06, 0xd0,
+ 0x0c, 0xc7, 0x9f, 0x5a, 0x04, 0x29, 0x0c, 0xdf, 0x7a, 0xae, 0x69, 0xcf, 0x91, 0x8a, 0x11, 0x9c,
+ 0x1a, 0x26, 0x3b, 0x4d, 0x7d, 0xcf, 0x74, 0x6c, 0xd4, 0x62, 0x4d, 0x0b, 0xc7, 0x26, 0x77, 0x48,
+ 0x1b, 0xfc, 0x96, 0x40, 0x13, 0x4b, 0xec, 0x13, 0xa0, 0x34, 0x12, 0x70, 0x7d, 0x94, 0x80, 0xb7,
+ 0xff, 0x3f, 0xbf, 0x08, 0xc2, 0x8e, 0xa4, 0x65, 0x51, 0x3d, 0x87, 0xa0, 0xf7, 0x05, 0x3a, 0x0d,
+ 0x1a, 0x23, 0x50, 0x1e, 0x68, 0x55, 0xe7, 0x8d, 0x41, 0xfc, 0x06, 0x5a, 0xfc, 0x1f, 0xd2, 0x65,
+ 0xce, 0x89, 0xc3, 0x57, 0xf9, 0xb3, 0x34, 0x0d, 0xa0, 0xbb, 0xce, 0xb6, 0x8d, 0x7b, 0xa6, 0x1d,
+ 0x71, 0xd1, 0x92, 0x05, 0x7a, 0x29, 0xfd, 0xfc, 0x54, 0x97, 0xa2, 0x2c, 0x09, 0xd3, 0x68, 0x94,
+ 0x15, 0xd1, 0x38, 0xa2, 0x29, 0x8f, 0xfb, 0x58, 0x94, 0xc2, 0x3c, 0xde, 0x35, 0x3e, 0x97, 0x6f,
+ 0xe2, 0xe7, 0xaf, 0xac, 0xce, 0x27, 0x4b, 0xf3, 0x5e, 0xe3, 0xd2, 0xab, 0x7f, 0x01, 0x00, 0x00,
+ 0xff, 0xff, 0x18, 0x04, 0x05, 0x82, 0x58, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
new file mode 100644
index 00000000000..aede16b1413
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go
@@ -0,0 +1,294 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/monitored_resource.proto
+
+package monitoredres
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ _struct "github.com/golang/protobuf/ptypes/struct"
+ label "google.golang.org/genproto/googleapis/api/label"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a
+// type name and a set of labels. For example, the monitored resource
+// descriptor for Google Compute Engine VM instances has a type of
+// `"gce_instance"` and specifies the use of the labels `"instance_id"` and
+// `"zone"` to identify particular VM instances.
+//
+// Different APIs can support different monitored resource types. APIs generally
+// provide a `list` method that returns the monitored resource descriptors used
+// by the API.
+type MonitoredResourceDescriptor struct {
+ // Optional. The resource name of the monitored resource descriptor:
+ // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where
+ // {type} is the value of the `type` field in this object and
+ // {project_id} is a project ID that provides API-specific context for
+ // accessing the type. APIs that do not use project information can use the
+ // resource name format `"monitoredResourceDescriptors/{type}"`.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The monitored resource type. For example, the type
+ // `"cloudsql_database"` represents databases in Google Cloud SQL.
+ // The maximum length of this value is 256 characters.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Optional. A concise name for the monitored resource type that might be
+ // displayed in user interfaces. It should be a Title Cased Noun Phrase,
+ // without any article or other determiners. For example,
+ // `"Google Cloud SQL Database"`.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // Optional. A detailed description of the monitored resource type that might
+ // be used in documentation.
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ // Required. A set of labels used to describe instances of this monitored
+ // resource type. For example, an individual Google Cloud SQL database is
+ // identified by values for the labels `"database_id"` and `"zone"`.
+ Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MonitoredResourceDescriptor) Reset() { *m = MonitoredResourceDescriptor{} }
+func (m *MonitoredResourceDescriptor) String() string { return proto.CompactTextString(m) }
+func (*MonitoredResourceDescriptor) ProtoMessage() {}
+func (*MonitoredResourceDescriptor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cd8bd738b08f2bf, []int{0}
+}
+
+func (m *MonitoredResourceDescriptor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MonitoredResourceDescriptor.Unmarshal(m, b)
+}
+func (m *MonitoredResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MonitoredResourceDescriptor.Marshal(b, m, deterministic)
+}
+func (m *MonitoredResourceDescriptor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MonitoredResourceDescriptor.Merge(m, src)
+}
+func (m *MonitoredResourceDescriptor) XXX_Size() int {
+ return xxx_messageInfo_MonitoredResourceDescriptor.Size(m)
+}
+func (m *MonitoredResourceDescriptor) XXX_DiscardUnknown() {
+ xxx_messageInfo_MonitoredResourceDescriptor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MonitoredResourceDescriptor proto.InternalMessageInfo
+
+func (m *MonitoredResourceDescriptor) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *MonitoredResourceDescriptor) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *MonitoredResourceDescriptor) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *MonitoredResourceDescriptor) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *MonitoredResourceDescriptor) GetLabels() []*label.LabelDescriptor {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+// An object representing a resource that can be used for monitoring, logging,
+// billing, or other purposes. Examples include virtual machine instances,
+// databases, and storage devices such as disks. The `type` field identifies a
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's
+// schema. Information in the `labels` field identifies the actual resource and
+// its attributes according to the schema. For example, a particular Compute
+// Engine VM instance could be represented by the following object, because the
+// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels
+// `"instance_id"` and `"zone"`:
+//
+// { "type": "gce_instance",
+// "labels": { "instance_id": "12345678901234",
+// "zone": "us-central1-a" }}
+type MonitoredResource struct {
+ // Required. The monitored resource type. This field must match
+ // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For
+ // example, the type of a Compute Engine VM instance is `gce_instance`.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Required. Values for all of the labels listed in the associated monitored
+ // resource descriptor. For example, Compute Engine VM instances use the
+ // labels `"project_id"`, `"instance_id"`, and `"zone"`.
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MonitoredResource) Reset() { *m = MonitoredResource{} }
+func (m *MonitoredResource) String() string { return proto.CompactTextString(m) }
+func (*MonitoredResource) ProtoMessage() {}
+func (*MonitoredResource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cd8bd738b08f2bf, []int{1}
+}
+
+func (m *MonitoredResource) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MonitoredResource.Unmarshal(m, b)
+}
+func (m *MonitoredResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MonitoredResource.Marshal(b, m, deterministic)
+}
+func (m *MonitoredResource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MonitoredResource.Merge(m, src)
+}
+func (m *MonitoredResource) XXX_Size() int {
+ return xxx_messageInfo_MonitoredResource.Size(m)
+}
+func (m *MonitoredResource) XXX_DiscardUnknown() {
+ xxx_messageInfo_MonitoredResource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MonitoredResource proto.InternalMessageInfo
+
+func (m *MonitoredResource) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *MonitoredResource) GetLabels() map[string]string {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] object.
+// [MonitoredResource][google.api.MonitoredResource] objects contain the minimum set of information to
+// uniquely identify a monitored resource instance. There is some other useful
+// auxiliary metadata. Google Stackdriver Monitoring & Logging uses an ingestion
+// pipeline to extract metadata for cloud resources of all types , and stores
+// the metadata in this message.
+type MonitoredResourceMetadata struct {
+ // Output only. Values for predefined system metadata labels.
+ // System labels are a kind of metadata extracted by Google Stackdriver.
+ // Stackdriver determines what system labels are useful and how to obtain
+ // their values. Some examples: "machine_image", "vpc", "subnet_id",
+ // "security_group", "name", etc.
+ // System label values can be only strings, Boolean values, or a list of
+ // strings. For example:
+ //
+ // { "name": "my-test-instance",
+ // "security_group": ["a", "b", "c"],
+ // "spot_instance": false }
+ SystemLabels *_struct.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"`
+ // Output only. A map of user-defined metadata labels.
+ UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MonitoredResourceMetadata) Reset() { *m = MonitoredResourceMetadata{} }
+func (m *MonitoredResourceMetadata) String() string { return proto.CompactTextString(m) }
+func (*MonitoredResourceMetadata) ProtoMessage() {}
+func (*MonitoredResourceMetadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6cd8bd738b08f2bf, []int{2}
+}
+
+func (m *MonitoredResourceMetadata) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MonitoredResourceMetadata.Unmarshal(m, b)
+}
+func (m *MonitoredResourceMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MonitoredResourceMetadata.Marshal(b, m, deterministic)
+}
+func (m *MonitoredResourceMetadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MonitoredResourceMetadata.Merge(m, src)
+}
+func (m *MonitoredResourceMetadata) XXX_Size() int {
+ return xxx_messageInfo_MonitoredResourceMetadata.Size(m)
+}
+func (m *MonitoredResourceMetadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_MonitoredResourceMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MonitoredResourceMetadata proto.InternalMessageInfo
+
+func (m *MonitoredResourceMetadata) GetSystemLabels() *_struct.Struct {
+ if m != nil {
+ return m.SystemLabels
+ }
+ return nil
+}
+
+func (m *MonitoredResourceMetadata) GetUserLabels() map[string]string {
+ if m != nil {
+ return m.UserLabels
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*MonitoredResourceDescriptor)(nil), "google.api.MonitoredResourceDescriptor")
+ proto.RegisterType((*MonitoredResource)(nil), "google.api.MonitoredResource")
+ proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResource.LabelsEntry")
+ proto.RegisterType((*MonitoredResourceMetadata)(nil), "google.api.MonitoredResourceMetadata")
+ proto.RegisterMapType((map[string]string)(nil), "google.api.MonitoredResourceMetadata.UserLabelsEntry")
+}
+
+func init() {
+ proto.RegisterFile("google/api/monitored_resource.proto", fileDescriptor_6cd8bd738b08f2bf)
+}
+
+var fileDescriptor_6cd8bd738b08f2bf = []byte{
+ // 415 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0xab, 0xd3, 0x40,
+ 0x14, 0x65, 0xd2, 0x0f, 0xf0, 0xa6, 0x7e, 0x0d, 0x52, 0x63, 0xea, 0xa2, 0xd6, 0x4d, 0xdd, 0x24,
+ 0xd0, 0x22, 0xf8, 0xb9, 0x68, 0x55, 0x44, 0xb0, 0x52, 0x22, 0xba, 0x70, 0x13, 0xa6, 0xc9, 0x18,
+ 0x82, 0x49, 0x26, 0xcc, 0x4c, 0x84, 0xfc, 0x1d, 0xc1, 0xdf, 0xe1, 0x5f, 0x72, 0xe9, 0x52, 0x32,
+ 0x33, 0x69, 0xd3, 0x97, 0xc7, 0x83, 0xb7, 0xbb, 0xf7, 0xdc, 0x73, 0xcf, 0x3d, 0x27, 0x43, 0xe0,
+ 0x71, 0xc2, 0x58, 0x92, 0x51, 0x9f, 0x94, 0xa9, 0x9f, 0xb3, 0x22, 0x95, 0x8c, 0xd3, 0x38, 0xe4,
+ 0x54, 0xb0, 0x8a, 0x47, 0xd4, 0x2b, 0x39, 0x93, 0x0c, 0x83, 0x26, 0x79, 0xa4, 0x4c, 0xdd, 0x69,
+ 0x67, 0x21, 0x23, 0x07, 0x9a, 0x69, 0x8e, 0xfb, 0xd0, 0xe0, 0xaa, 0x3b, 0x54, 0xdf, 0x7d, 0x21,
+ 0x79, 0x15, 0x49, 0x3d, 0x5d, 0xfc, 0x41, 0x30, 0xdb, 0xb5, 0xf2, 0x81, 0x51, 0x7f, 0x4b, 0x45,
+ 0xc4, 0xd3, 0x52, 0x32, 0x8e, 0x31, 0x0c, 0x0b, 0x92, 0x53, 0x67, 0x34, 0x47, 0xcb, 0x1b, 0x81,
+ 0xaa, 0x1b, 0x4c, 0xd6, 0x25, 0x75, 0x90, 0xc6, 0x9a, 0x1a, 0x3f, 0x82, 0x49, 0x9c, 0x8a, 0x32,
+ 0x23, 0x75, 0xa8, 0xf8, 0x96, 0x9a, 0xd9, 0x06, 0xfb, 0xd4, 0xac, 0xcd, 0xc1, 0x8e, 0x8d, 0x70,
+ 0xca, 0x0a, 0x67, 0x60, 0x18, 0x27, 0x08, 0xaf, 0x61, 0xac, 0x9c, 0x0b, 0x67, 0x38, 0x1f, 0x2c,
+ 0xed, 0xd5, 0xcc, 0x3b, 0xe5, 0xf3, 0x3e, 0x36, 0x93, 0x93, 0xb3, 0xc0, 0x50, 0x17, 0xbf, 0x11,
+ 0xdc, 0xed, 0x25, 0xb8, 0xd4, 0xe3, 0xe6, 0x28, 0x6f, 0x29, 0xf9, 0x27, 0x5d, 0xf9, 0x9e, 0x84,
+ 0x3e, 0x28, 0xde, 0x15, 0x92, 0xd7, 0xed, 0x31, 0xf7, 0x39, 0xd8, 0x1d, 0x18, 0xdf, 0x81, 0xc1,
+ 0x0f, 0x5a, 0x9b, 0x23, 0x4d, 0x89, 0xef, 0xc1, 0xe8, 0x27, 0xc9, 0xaa, 0xf6, 0x03, 0xe8, 0xe6,
+ 0x85, 0xf5, 0x0c, 0x2d, 0xfe, 0x22, 0x78, 0xd0, 0x3b, 0xb2, 0xa3, 0x92, 0xc4, 0x44, 0x12, 0xfc,
+ 0x0a, 0x6e, 0x8a, 0x5a, 0x48, 0x9a, 0x87, 0xc6, 0x62, 0xa3, 0x69, 0xaf, 0xee, 0xb7, 0x16, 0xdb,
+ 0xd7, 0xf3, 0x3e, 0xab, 0xd7, 0x0b, 0x26, 0x9a, 0xad, 0xcd, 0xe0, 0xaf, 0x60, 0x57, 0x82, 0xf2,
+ 0xf0, 0x2c, 0xde, 0xd3, 0x2b, 0xe3, 0xb5, 0x97, 0xbd, 0x2f, 0x82, 0xf2, 0x6e, 0x54, 0xa8, 0x8e,
+ 0x80, 0xfb, 0x1a, 0x6e, 0x5f, 0x18, 0x5f, 0x27, 0xf2, 0xb6, 0x86, 0x5b, 0x11, 0xcb, 0x3b, 0x36,
+ 0xb6, 0xd3, 0x9e, 0x8f, 0x7d, 0x13, 0x6c, 0x8f, 0xbe, 0xbd, 0x31, 0xac, 0x84, 0x65, 0xa4, 0x48,
+ 0x3c, 0xc6, 0x13, 0x3f, 0xa1, 0x85, 0x8a, 0xed, 0xeb, 0x11, 0x29, 0x53, 0x71, 0xfe, 0x3b, 0x70,
+ 0x2a, 0x5e, 0x76, 0x9b, 0x7f, 0x08, 0xfd, 0xb2, 0x86, 0xef, 0x37, 0xfb, 0x0f, 0x87, 0xb1, 0xda,
+ 0x5c, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x10, 0x16, 0x7c, 0xe9, 0x47, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go
new file mode 100644
index 00000000000..dd2d831ef91
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/trace.pb.go
@@ -0,0 +1,1411 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/devtools/cloudtrace/v2/trace.proto
+
+package cloudtrace
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Indicates whether the message was sent or received.
+type Span_TimeEvent_MessageEvent_Type int32
+
+const (
+ // Unknown event type.
+ Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0
+ // Indicates a sent message.
+ Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1
+ // Indicates a received message.
+ Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2
+)
+
+var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "SENT",
+ 2: "RECEIVED",
+}
+
+var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "SENT": 1,
+ "RECEIVED": 2,
+}
+
+func (x Span_TimeEvent_MessageEvent_Type) String() string {
+ return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x))
+}
+
+func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0, 1, 1, 0}
+}
+
+// The relationship of the current span relative to the linked span: child,
+// parent, or unspecified.
+type Span_Link_Type int32
+
+const (
+ // The relationship of the two spans is unknown.
+ Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0
+ // The linked span is a child of the current span.
+ Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1
+ // The linked span is a parent of the current span.
+ Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2
+)
+
+var Span_Link_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "CHILD_LINKED_SPAN",
+ 2: "PARENT_LINKED_SPAN",
+}
+
+var Span_Link_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "CHILD_LINKED_SPAN": 1,
+ "PARENT_LINKED_SPAN": 2,
+}
+
+func (x Span_Link_Type) String() string {
+ return proto.EnumName(Span_Link_Type_name, int32(x))
+}
+
+func (Span_Link_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0, 3, 0}
+}
+
+// A span represents a single operation within a trace. Spans can be
+// nested to form a trace tree. Often, a trace contains a root span
+// that describes the end-to-end latency, and one or more subspans for
+// its sub-operations. A trace can also contain multiple root spans,
+// or none at all. Spans do not need to be contiguous—there may be
+// gaps or overlaps between spans in a trace.
+type Span struct {
+ // The resource name of the span in the following format:
+ //
+ // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
+ //
+ // [TRACE_ID] is a unique identifier for a trace within a project;
+ // it is a 32-character hexadecimal encoding of a 16-byte array.
+ //
+ // [SPAN_ID] is a unique identifier for a span within a trace; it
+ // is a 16-character hexadecimal encoding of an 8-byte array.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The [SPAN_ID] portion of the span's resource name.
+ SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The [SPAN_ID] of this span's parent span. If this is a root span,
+ // then this field must be empty.
+ ParentSpanId string `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+ // A description of the span's operation (up to 128 bytes).
+ // Stackdriver Trace displays the description in the
+ // {% dynamic print site_values.console_name %}.
+ // For example, the display name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name within an application and at the same call point.
+ // This makes it easier to correlate spans in different traces.
+ DisplayName *TruncatableString `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The start time of the span. On the client side, this is the time kept by
+ // the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // The end time of the span. On the client side, this is the time kept by
+ // the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ // A set of attributes on the span. You can have up to 32 attributes per
+ // span.
+ Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ // Stack trace captured at the start of the span.
+ StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"`
+ // A set of time events. You can have up to 32 annotations and 128 message
+ // events per span.
+ TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"`
+ // Links associated with the span. You can have up to 128 links per Span.
+ Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"`
+ // An optional final status for this span.
+ Status *status.Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"`
+ // (Optional) Set this parameter to indicate whether this span is in
+ // the same process as its parent. If you do not set this parameter,
+ // Stackdriver Trace is unable to take advantage of this helpful
+ // information.
+ SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"`
+ // An optional number of child spans that were generated while this span
+ // was active. If set, allows implementation to detect missing child spans.
+ ChildSpanCount *wrappers.Int32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span) Reset() { *m = Span{} }
+func (m *Span) String() string { return proto.CompactTextString(m) }
+func (*Span) ProtoMessage() {}
+func (*Span) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0}
+}
+
+func (m *Span) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span.Unmarshal(m, b)
+}
+func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span.Marshal(b, m, deterministic)
+}
+func (m *Span) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span.Merge(m, src)
+}
+func (m *Span) XXX_Size() int {
+ return xxx_messageInfo_Span.Size(m)
+}
+func (m *Span) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span proto.InternalMessageInfo
+
+func (m *Span) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Span) GetSpanId() string {
+ if m != nil {
+ return m.SpanId
+ }
+ return ""
+}
+
+func (m *Span) GetParentSpanId() string {
+ if m != nil {
+ return m.ParentSpanId
+ }
+ return ""
+}
+
+func (m *Span) GetDisplayName() *TruncatableString {
+ if m != nil {
+ return m.DisplayName
+ }
+ return nil
+}
+
+func (m *Span) GetStartTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.StartTime
+ }
+ return nil
+}
+
+func (m *Span) GetEndTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.EndTime
+ }
+ return nil
+}
+
+func (m *Span) GetAttributes() *Span_Attributes {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+func (m *Span) GetStackTrace() *StackTrace {
+ if m != nil {
+ return m.StackTrace
+ }
+ return nil
+}
+
+func (m *Span) GetTimeEvents() *Span_TimeEvents {
+ if m != nil {
+ return m.TimeEvents
+ }
+ return nil
+}
+
+func (m *Span) GetLinks() *Span_Links {
+ if m != nil {
+ return m.Links
+ }
+ return nil
+}
+
+func (m *Span) GetStatus() *status.Status {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue {
+ if m != nil {
+ return m.SameProcessAsParentSpan
+ }
+ return nil
+}
+
+func (m *Span) GetChildSpanCount() *wrappers.Int32Value {
+ if m != nil {
+ return m.ChildSpanCount
+ }
+ return nil
+}
+
+// A set of attributes, each in the format `[KEY]:[VALUE]`.
+type Span_Attributes struct {
+ // The set of attributes. Each attribute's key can be up to 128 bytes
+ // long. The value can be a string up to 256 bytes, an integer, or the
+ // Boolean values `true` and `false`. For example:
+ //
+ // "/instance_id": "my-instance"
+ // "/http/user_agent": ""
+ // "/http/request_bytes": 300
+ // "abc.com/myattribute": true
+ AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The number of attributes that were discarded. Attributes can be discarded
+ // because their keys are too long or because there are too many attributes.
+ // If this value is 0 then all attributes are valid.
+ DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Attributes) Reset() { *m = Span_Attributes{} }
+func (m *Span_Attributes) String() string { return proto.CompactTextString(m) }
+func (*Span_Attributes) ProtoMessage() {}
+func (*Span_Attributes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0, 0}
+}
+
+func (m *Span_Attributes) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Attributes.Unmarshal(m, b)
+}
+func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic)
+}
+func (m *Span_Attributes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Attributes.Merge(m, src)
+}
+func (m *Span_Attributes) XXX_Size() int {
+ return xxx_messageInfo_Span_Attributes.Size(m)
+}
+func (m *Span_Attributes) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Attributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo
+
+func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue {
+ if m != nil {
+ return m.AttributeMap
+ }
+ return nil
+}
+
+func (m *Span_Attributes) GetDroppedAttributesCount() int32 {
+ if m != nil {
+ return m.DroppedAttributesCount
+ }
+ return 0
+}
+
+// A time-stamped annotation or message event in the Span.
+type Span_TimeEvent struct {
+ // The timestamp indicating the time the event occurred.
+ Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
+ // A `TimeEvent` can contain either an `Annotation` object or a
+ // `MessageEvent` object, but not both.
+ //
+ // Types that are valid to be assigned to Value:
+ // *Span_TimeEvent_Annotation_
+ // *Span_TimeEvent_MessageEvent_
+ Value isSpan_TimeEvent_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} }
+func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvent) ProtoMessage() {}
+func (*Span_TimeEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0, 1}
+}
+
+func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b)
+}
+func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic)
+}
+func (m *Span_TimeEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvent.Merge(m, src)
+}
+func (m *Span_TimeEvent) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvent.Size(m)
+}
+func (m *Span_TimeEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo
+
+func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.Time
+ }
+ return nil
+}
+
+type isSpan_TimeEvent_Value interface {
+ isSpan_TimeEvent_Value()
+}
+
+type Span_TimeEvent_Annotation_ struct {
+ Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"`
+}
+
+type Span_TimeEvent_MessageEvent_ struct {
+ MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"`
+}
+
+func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {}
+
+func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {}
+
+func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation {
+ if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok {
+ return x.Annotation
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent {
+ if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok {
+ return x.MessageEvent
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Span_TimeEvent) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Span_TimeEvent_OneofMarshaler, _Span_TimeEvent_OneofUnmarshaler, _Span_TimeEvent_OneofSizer, []interface{}{
+ (*Span_TimeEvent_Annotation_)(nil),
+ (*Span_TimeEvent_MessageEvent_)(nil),
+ }
+}
+
+func _Span_TimeEvent_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Span_TimeEvent)
+ // value
+ switch x := m.Value.(type) {
+ case *Span_TimeEvent_Annotation_:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Annotation); err != nil {
+ return err
+ }
+ case *Span_TimeEvent_MessageEvent_:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.MessageEvent); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Span_TimeEvent.Value has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Span_TimeEvent_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Span_TimeEvent)
+ switch tag {
+ case 2: // value.annotation
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Span_TimeEvent_Annotation)
+ err := b.DecodeMessage(msg)
+ m.Value = &Span_TimeEvent_Annotation_{msg}
+ return true, err
+ case 3: // value.message_event
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(Span_TimeEvent_MessageEvent)
+ err := b.DecodeMessage(msg)
+ m.Value = &Span_TimeEvent_MessageEvent_{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Span_TimeEvent_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Span_TimeEvent)
+ // value
+ switch x := m.Value.(type) {
+ case *Span_TimeEvent_Annotation_:
+ s := proto.Size(x.Annotation)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *Span_TimeEvent_MessageEvent_:
+ s := proto.Size(x.MessageEvent)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Text annotation with a set of attributes.
+type Span_TimeEvent_Annotation struct {
+ // A user-supplied message describing the event. The maximum length for
+ // the description is 256 bytes.
+ Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // A set of attributes on the annotation. You can have up to 4 attributes
+ // per Annotation.
+ Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} }
+func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvent_Annotation) ProtoMessage() {}
+func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0, 1, 0}
+}
+
+func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b)
+}
+func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic)
+}
+func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src)
+}
+func (m *Span_TimeEvent_Annotation) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m)
+}
+func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo
+
+func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+// An event describing a message sent/received between Spans.
+type Span_TimeEvent_MessageEvent struct {
+ // Type of MessageEvent. Indicates whether the message was sent or
+ // received.
+ Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"`
+ // An identifier for the MessageEvent's message that can be used to match
+ // SENT and RECEIVED MessageEvents. It is recommended to be unique within
+ // a Span.
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // The number of uncompressed bytes sent or received.
+ UncompressedSizeBytes int64 `protobuf:"varint,3,opt,name=uncompressed_size_bytes,json=uncompressedSizeBytes,proto3" json:"uncompressed_size_bytes,omitempty"`
+ // The number of compressed bytes sent or received. If missing assumed to
+ // be the same size as uncompressed.
+ CompressedSizeBytes int64 `protobuf:"varint,4,opt,name=compressed_size_bytes,json=compressedSizeBytes,proto3" json:"compressed_size_bytes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} }
+func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvent_MessageEvent) ProtoMessage() {}
+func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0, 1, 1}
+}
+
+func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo
+
+func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type {
+ if m != nil {
+ return m.Type
+ }
+ return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED
+}
+
+func (m *Span_TimeEvent_MessageEvent) GetId() int64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
+func (m *Span_TimeEvent_MessageEvent) GetUncompressedSizeBytes() int64 {
+ if m != nil {
+ return m.UncompressedSizeBytes
+ }
+ return 0
+}
+
+func (m *Span_TimeEvent_MessageEvent) GetCompressedSizeBytes() int64 {
+ if m != nil {
+ return m.CompressedSizeBytes
+ }
+ return 0
+}
+
+// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation
+// on the span, consisting of either user-supplied key:value pairs, or
+// details of a message sent/received between Spans.
+type Span_TimeEvents struct {
+ // A collection of `TimeEvent`s.
+ TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"`
+ // The number of dropped annotations in all the included time events.
+ // If the value is 0, then no annotations were dropped.
+ DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"`
+ // The number of dropped message events in all the included time events.
+ // If the value is 0, then no message events were dropped.
+ DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} }
+func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvents) ProtoMessage() {}
+func (*Span_TimeEvents) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0, 2}
+}
+
+func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b)
+}
+func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic)
+}
+func (m *Span_TimeEvents) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvents.Merge(m, src)
+}
+func (m *Span_TimeEvents) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvents.Size(m)
+}
+func (m *Span_TimeEvents) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo
+
+func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent {
+ if m != nil {
+ return m.TimeEvent
+ }
+ return nil
+}
+
+func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 {
+ if m != nil {
+ return m.DroppedAnnotationsCount
+ }
+ return 0
+}
+
+func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 {
+ if m != nil {
+ return m.DroppedMessageEventsCount
+ }
+ return 0
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type Span_Link struct {
+ // The [TRACE_ID] for a trace within a project.
+ TraceId string `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // The [SPAN_ID] for a span within a trace.
+ SpanId string `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The relationship of the current span relative to the linked span.
+ Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=google.devtools.cloudtrace.v2.Span_Link_Type" json:"type,omitempty"`
+ // A set of attributes on the link. You have have up to 32 attributes per
+ // link.
+ Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Link) Reset() { *m = Span_Link{} }
+func (m *Span_Link) String() string { return proto.CompactTextString(m) }
+func (*Span_Link) ProtoMessage() {}
+func (*Span_Link) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0, 3}
+}
+
+func (m *Span_Link) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Link.Unmarshal(m, b)
+}
+func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic)
+}
+func (m *Span_Link) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Link.Merge(m, src)
+}
+func (m *Span_Link) XXX_Size() int {
+ return xxx_messageInfo_Span_Link.Size(m)
+}
+func (m *Span_Link) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Link.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Link proto.InternalMessageInfo
+
+func (m *Span_Link) GetTraceId() string {
+ if m != nil {
+ return m.TraceId
+ }
+ return ""
+}
+
+func (m *Span_Link) GetSpanId() string {
+ if m != nil {
+ return m.SpanId
+ }
+ return ""
+}
+
+func (m *Span_Link) GetType() Span_Link_Type {
+ if m != nil {
+ return m.Type
+ }
+ return Span_Link_TYPE_UNSPECIFIED
+}
+
+func (m *Span_Link) GetAttributes() *Span_Attributes {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+// A collection of links, which are references from this span to a span
+// in the same or different trace.
+type Span_Links struct {
+ // A collection of links.
+ Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"`
+ // The number of dropped links after the maximum size was enforced. If
+ // this value is 0, then no links were dropped.
+ DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Links) Reset() { *m = Span_Links{} }
+func (m *Span_Links) String() string { return proto.CompactTextString(m) }
+func (*Span_Links) ProtoMessage() {}
+func (*Span_Links) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{0, 4}
+}
+
+func (m *Span_Links) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Links.Unmarshal(m, b)
+}
+func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic)
+}
+func (m *Span_Links) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Links.Merge(m, src)
+}
+func (m *Span_Links) XXX_Size() int {
+ return xxx_messageInfo_Span_Links.Size(m)
+}
+func (m *Span_Links) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Links.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Links proto.InternalMessageInfo
+
+func (m *Span_Links) GetLink() []*Span_Link {
+ if m != nil {
+ return m.Link
+ }
+ return nil
+}
+
+func (m *Span_Links) GetDroppedLinksCount() int32 {
+ if m != nil {
+ return m.DroppedLinksCount
+ }
+ return 0
+}
+
+// The allowed types for [VALUE] in a `[KEY]:[VALUE]` attribute.
+type AttributeValue struct {
+ // The type of the value.
+ //
+ // Types that are valid to be assigned to Value:
+ // *AttributeValue_StringValue
+ // *AttributeValue_IntValue
+ // *AttributeValue_BoolValue
+ Value isAttributeValue_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AttributeValue) Reset() { *m = AttributeValue{} }
+func (m *AttributeValue) String() string { return proto.CompactTextString(m) }
+func (*AttributeValue) ProtoMessage() {}
+func (*AttributeValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{1}
+}
+
+func (m *AttributeValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AttributeValue.Unmarshal(m, b)
+}
+func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic)
+}
+func (m *AttributeValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AttributeValue.Merge(m, src)
+}
+func (m *AttributeValue) XXX_Size() int {
+ return xxx_messageInfo_AttributeValue.Size(m)
+}
+func (m *AttributeValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_AttributeValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AttributeValue proto.InternalMessageInfo
+
+type isAttributeValue_Value interface {
+ isAttributeValue_Value()
+}
+
+type AttributeValue_StringValue struct {
+ StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type AttributeValue_IntValue struct {
+ IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type AttributeValue_BoolValue struct {
+ BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+func (*AttributeValue_StringValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_IntValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_BoolValue) isAttributeValue_Value() {}
+
+func (m *AttributeValue) GetValue() isAttributeValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *AttributeValue) GetStringValue() *TruncatableString {
+ if x, ok := m.GetValue().(*AttributeValue_StringValue); ok {
+ return x.StringValue
+ }
+ return nil
+}
+
+func (m *AttributeValue) GetIntValue() int64 {
+ if x, ok := m.GetValue().(*AttributeValue_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+func (m *AttributeValue) GetBoolValue() bool {
+ if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*AttributeValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _AttributeValue_OneofMarshaler, _AttributeValue_OneofUnmarshaler, _AttributeValue_OneofSizer, []interface{}{
+ (*AttributeValue_StringValue)(nil),
+ (*AttributeValue_IntValue)(nil),
+ (*AttributeValue_BoolValue)(nil),
+ }
+}
+
+func _AttributeValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*AttributeValue)
+ // value
+ switch x := m.Value.(type) {
+ case *AttributeValue_StringValue:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.StringValue); err != nil {
+ return err
+ }
+ case *AttributeValue_IntValue:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.IntValue))
+ case *AttributeValue_BoolValue:
+ t := uint64(0)
+ if x.BoolValue {
+ t = 1
+ }
+ b.EncodeVarint(3<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case nil:
+ default:
+ return fmt.Errorf("AttributeValue.Value has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _AttributeValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*AttributeValue)
+ switch tag {
+ case 1: // value.string_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(TruncatableString)
+ err := b.DecodeMessage(msg)
+ m.Value = &AttributeValue_StringValue{msg}
+ return true, err
+ case 2: // value.int_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &AttributeValue_IntValue{int64(x)}
+ return true, err
+ case 3: // value.bool_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &AttributeValue_BoolValue{x != 0}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _AttributeValue_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*AttributeValue)
+ // value
+ switch x := m.Value.(type) {
+ case *AttributeValue_StringValue:
+ s := proto.Size(x.StringValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *AttributeValue_IntValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.IntValue))
+ case *AttributeValue_BoolValue:
+ n += 1 // tag and wire
+ n += 1
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// A call stack appearing in a trace.
+type StackTrace struct {
+ // Stack frames in this stack trace. A maximum of 128 frames are allowed.
+ StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"`
+ // The hash ID is used to conserve network bandwidth for duplicate
+ // stack traces within a single trace.
+ //
+ // Often multiple spans will have identical stack traces.
+ // The first occurrence of a stack trace should contain both the
+ // `stackFrame` content and a value in `stackTraceHashId`.
+ //
+ // Subsequent spans within the same request can refer
+ // to that stack trace by only setting `stackTraceHashId`.
+ StackTraceHashId int64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StackTrace) Reset() { *m = StackTrace{} }
+func (m *StackTrace) String() string { return proto.CompactTextString(m) }
+func (*StackTrace) ProtoMessage() {}
+func (*StackTrace) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{2}
+}
+
+func (m *StackTrace) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StackTrace.Unmarshal(m, b)
+}
+func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic)
+}
+func (m *StackTrace) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StackTrace.Merge(m, src)
+}
+func (m *StackTrace) XXX_Size() int {
+ return xxx_messageInfo_StackTrace.Size(m)
+}
+func (m *StackTrace) XXX_DiscardUnknown() {
+ xxx_messageInfo_StackTrace.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StackTrace proto.InternalMessageInfo
+
+func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames {
+ if m != nil {
+ return m.StackFrames
+ }
+ return nil
+}
+
+func (m *StackTrace) GetStackTraceHashId() int64 {
+ if m != nil {
+ return m.StackTraceHashId
+ }
+ return 0
+}
+
+// Represents a single stack frame in a stack trace.
+type StackTrace_StackFrame struct {
+ // The fully-qualified name that uniquely identifies the function or
+ // method that is active in this frame (up to 1024 bytes).
+ FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"`
+ // An un-mangled function name, if `function_name` is
+ // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can
+ // be fully-qualified (up to 1024 bytes).
+ OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"`
+ // The name of the source file where the function call appears (up to 256
+ // bytes).
+ FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
+ // The line number in `file_name` where the function call appears.
+ LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"`
+ // The column number where the function call appears, if available.
+ // This is important in JavaScript because of its anonymous functions.
+ ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"`
+ // The binary module from where the code was loaded.
+ LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"`
+ // The version of the deployed source code (up to 128 bytes).
+ SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} }
+func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) }
+func (*StackTrace_StackFrame) ProtoMessage() {}
+func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{2, 0}
+}
+
+func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b)
+}
+func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic)
+}
+func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StackTrace_StackFrame.Merge(m, src)
+}
+func (m *StackTrace_StackFrame) XXX_Size() int {
+ return xxx_messageInfo_StackTrace_StackFrame.Size(m)
+}
+func (m *StackTrace_StackFrame) XXX_DiscardUnknown() {
+ xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo
+
+func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString {
+ if m != nil {
+ return m.FunctionName
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString {
+ if m != nil {
+ return m.OriginalFunctionName
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetFileName() *TruncatableString {
+ if m != nil {
+ return m.FileName
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetLineNumber() int64 {
+ if m != nil {
+ return m.LineNumber
+ }
+ return 0
+}
+
+func (m *StackTrace_StackFrame) GetColumnNumber() int64 {
+ if m != nil {
+ return m.ColumnNumber
+ }
+ return 0
+}
+
+func (m *StackTrace_StackFrame) GetLoadModule() *Module {
+ if m != nil {
+ return m.LoadModule
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString {
+ if m != nil {
+ return m.SourceVersion
+ }
+ return nil
+}
+
+// A collection of stack frames, which can be truncated.
+type StackTrace_StackFrames struct {
+ // Stack frames in this call stack.
+ Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"`
+ // The number of stack frames that were dropped because there
+ // were too many stack frames.
+ // If this value is 0, then no stack frames were dropped.
+ DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} }
+func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) }
+func (*StackTrace_StackFrames) ProtoMessage() {}
+func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{2, 1}
+}
+
+func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b)
+}
+func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic)
+}
+func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StackTrace_StackFrames.Merge(m, src)
+}
+func (m *StackTrace_StackFrames) XXX_Size() int {
+ return xxx_messageInfo_StackTrace_StackFrames.Size(m)
+}
+func (m *StackTrace_StackFrames) XXX_DiscardUnknown() {
+ xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo
+
+func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame {
+ if m != nil {
+ return m.Frame
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 {
+ if m != nil {
+ return m.DroppedFramesCount
+ }
+ return 0
+}
+
+// Binary module.
+type Module struct {
+ // For example: main binary, kernel modules, and dynamic libraries
+ // such as libc.so, sharedlib.so (up to 256 bytes).
+ Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"`
+ // A unique identifier for the module, usually a hash of its
+ // contents (up to 128 bytes).
+ BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Module) Reset() { *m = Module{} }
+func (m *Module) String() string { return proto.CompactTextString(m) }
+func (*Module) ProtoMessage() {}
+func (*Module) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{3}
+}
+
+func (m *Module) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Module.Unmarshal(m, b)
+}
+func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Module.Marshal(b, m, deterministic)
+}
+func (m *Module) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Module.Merge(m, src)
+}
+func (m *Module) XXX_Size() int {
+ return xxx_messageInfo_Module.Size(m)
+}
+func (m *Module) XXX_DiscardUnknown() {
+ xxx_messageInfo_Module.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Module proto.InternalMessageInfo
+
+func (m *Module) GetModule() *TruncatableString {
+ if m != nil {
+ return m.Module
+ }
+ return nil
+}
+
+func (m *Module) GetBuildId() *TruncatableString {
+ if m != nil {
+ return m.BuildId
+ }
+ return nil
+}
+
+// Represents a string that might be shortened to a specified length.
+type TruncatableString struct {
+ // The shortened string. For example, if the original string is 500
+ // bytes long and the limit of the string is 128 bytes, then
+ // `value` contains the first 128 bytes of the 500-byte string.
+ //
+ // Truncation always happens on a UTF8 character boundary. If there
+ // are multi-byte characters in the string, then the length of the
+ // shortened string might be less than the size limit.
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ // The number of bytes removed from the original string. If this
+ // value is 0, then the string was not shortened.
+ TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TruncatableString) Reset() { *m = TruncatableString{} }
+func (m *TruncatableString) String() string { return proto.CompactTextString(m) }
+func (*TruncatableString) ProtoMessage() {}
+func (*TruncatableString) Descriptor() ([]byte, []int) {
+ return fileDescriptor_29869cc16dc8ce61, []int{4}
+}
+
+func (m *TruncatableString) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TruncatableString.Unmarshal(m, b)
+}
+func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic)
+}
+func (m *TruncatableString) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TruncatableString.Merge(m, src)
+}
+func (m *TruncatableString) XXX_Size() int {
+ return xxx_messageInfo_TruncatableString.Size(m)
+}
+func (m *TruncatableString) XXX_DiscardUnknown() {
+ xxx_messageInfo_TruncatableString.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TruncatableString proto.InternalMessageInfo
+
+func (m *TruncatableString) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+func (m *TruncatableString) GetTruncatedByteCount() int32 {
+ if m != nil {
+ return m.TruncatedByteCount
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value)
+ proto.RegisterEnum("google.devtools.cloudtrace.v2.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value)
+ proto.RegisterType((*Span)(nil), "google.devtools.cloudtrace.v2.Span")
+ proto.RegisterType((*Span_Attributes)(nil), "google.devtools.cloudtrace.v2.Span.Attributes")
+ proto.RegisterMapType((map[string]*AttributeValue)(nil), "google.devtools.cloudtrace.v2.Span.Attributes.AttributeMapEntry")
+ proto.RegisterType((*Span_TimeEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent")
+ proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.Annotation")
+ proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvent.MessageEvent")
+ proto.RegisterType((*Span_TimeEvents)(nil), "google.devtools.cloudtrace.v2.Span.TimeEvents")
+ proto.RegisterType((*Span_Link)(nil), "google.devtools.cloudtrace.v2.Span.Link")
+ proto.RegisterType((*Span_Links)(nil), "google.devtools.cloudtrace.v2.Span.Links")
+ proto.RegisterType((*AttributeValue)(nil), "google.devtools.cloudtrace.v2.AttributeValue")
+ proto.RegisterType((*StackTrace)(nil), "google.devtools.cloudtrace.v2.StackTrace")
+ proto.RegisterType((*StackTrace_StackFrame)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrame")
+ proto.RegisterType((*StackTrace_StackFrames)(nil), "google.devtools.cloudtrace.v2.StackTrace.StackFrames")
+ proto.RegisterType((*Module)(nil), "google.devtools.cloudtrace.v2.Module")
+ proto.RegisterType((*TruncatableString)(nil), "google.devtools.cloudtrace.v2.TruncatableString")
+}
+
+func init() {
+ proto.RegisterFile("google/devtools/cloudtrace/v2/trace.proto", fileDescriptor_29869cc16dc8ce61)
+}
+
+var fileDescriptor_29869cc16dc8ce61 = []byte{
+ // 1425 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4b, 0x6f, 0xdb, 0xc6,
+ 0x16, 0x36, 0xf5, 0xd6, 0x91, 0x6c, 0xc8, 0x13, 0x3b, 0x56, 0x94, 0xe4, 0x26, 0xd7, 0xf7, 0x16,
+ 0x70, 0x0a, 0x98, 0x0a, 0x94, 0xa4, 0x48, 0xd3, 0x02, 0xa9, 0x1f, 0x72, 0xa4, 0xc4, 0x56, 0x05,
+ 0x4a, 0x71, 0xd3, 0x34, 0x00, 0x31, 0x22, 0xc7, 0x32, 0x11, 0x8a, 0x24, 0x38, 0x43, 0x17, 0xce,
+ 0xae, 0xeb, 0xae, 0xbb, 0x29, 0x50, 0x74, 0x59, 0x20, 0xab, 0xfc, 0x8e, 0x2e, 0xba, 0xed, 0x7f,
+ 0xe9, 0xaa, 0x98, 0x07, 0x49, 0x29, 0x2f, 0xdb, 0xca, 0x6e, 0x66, 0xce, 0xf9, 0x3e, 0x9e, 0x33,
+ 0x73, 0x5e, 0x84, 0x5b, 0x63, 0xdf, 0x1f, 0xbb, 0xa4, 0x69, 0x93, 0x13, 0xe6, 0xfb, 0x2e, 0x6d,
+ 0x5a, 0xae, 0x1f, 0xd9, 0x2c, 0xc4, 0x16, 0x69, 0x9e, 0xb4, 0x9a, 0x62, 0xa1, 0x07, 0xa1, 0xcf,
+ 0x7c, 0x74, 0x5d, 0xaa, 0xea, 0xb1, 0xaa, 0x9e, 0xaa, 0xea, 0x27, 0xad, 0xc6, 0x35, 0xc5, 0x84,
+ 0x03, 0xa7, 0x89, 0x3d, 0xcf, 0x67, 0x98, 0x39, 0xbe, 0x47, 0x25, 0xb8, 0x71, 0x43, 0x49, 0xc5,
+ 0x6e, 0x14, 0x1d, 0x35, 0x99, 0x33, 0x21, 0x94, 0xe1, 0x49, 0xa0, 0x14, 0xfe, 0xf3, 0xb6, 0xc2,
+ 0x8f, 0x21, 0x0e, 0x02, 0x12, 0xc6, 0x04, 0x6b, 0x4a, 0x1e, 0x06, 0x56, 0x93, 0x32, 0xcc, 0x22,
+ 0x25, 0x58, 0xff, 0x07, 0x41, 0x6e, 0x10, 0x60, 0x0f, 0x21, 0xc8, 0x79, 0x78, 0x42, 0xea, 0xda,
+ 0x4d, 0x6d, 0xa3, 0x6c, 0x88, 0x35, 0x5a, 0x83, 0x22, 0x0d, 0xb0, 0x67, 0x3a, 0x76, 0x3d, 0x23,
+ 0x8e, 0x0b, 0x7c, 0xdb, 0xb5, 0xd1, 0xff, 0x61, 0x29, 0xc0, 0x21, 0xf1, 0x98, 0x19, 0xcb, 0xb3,
+ 0x42, 0x5e, 0x95, 0xa7, 0x03, 0xa9, 0x35, 0x80, 0xaa, 0xed, 0xd0, 0xc0, 0xc5, 0xa7, 0xa6, 0xa0,
+ 0xce, 0xdd, 0xd4, 0x36, 0x2a, 0xad, 0xdb, 0xfa, 0x47, 0x6f, 0x42, 0x1f, 0x86, 0x91, 0x67, 0x61,
+ 0x86, 0x47, 0x2e, 0x19, 0xb0, 0xd0, 0xf1, 0xc6, 0x46, 0x45, 0xb1, 0xf4, 0xb8, 0x4d, 0x5f, 0x02,
+ 0x50, 0x86, 0x43, 0x66, 0xf2, 0x2b, 0xa8, 0xe7, 0x05, 0x65, 0x23, 0xa6, 0x8c, 0xdd, 0xd7, 0x87,
+ 0xf1, 0xfd, 0x18, 0x65, 0xa1, 0xcd, 0xf7, 0xe8, 0x1e, 0x94, 0x88, 0x67, 0x4b, 0x60, 0xe1, 0x4c,
+ 0x60, 0x91, 0x78, 0xb6, 0x80, 0xf5, 0x00, 0x30, 0x63, 0xa1, 0x33, 0x8a, 0x18, 0xa1, 0xf5, 0xa2,
+ 0x00, 0xea, 0x67, 0x38, 0xc1, 0x6f, 0x40, 0xdf, 0x4a, 0x50, 0xc6, 0x14, 0x03, 0x7a, 0x0c, 0x15,
+ 0xca, 0xb0, 0xf5, 0xd2, 0x14, 0xda, 0xf5, 0x92, 0x20, 0xbc, 0x75, 0x16, 0x21, 0x47, 0x0c, 0xf9,
+ 0xce, 0x00, 0x9a, 0xac, 0xd1, 0xb7, 0x50, 0xe1, 0xee, 0x98, 0xe4, 0x84, 0x78, 0x8c, 0xd6, 0xcb,
+ 0xe7, 0x37, 0x8e, 0xbb, 0xd6, 0x16, 0x28, 0x03, 0x58, 0xb2, 0x46, 0x0f, 0x21, 0xef, 0x3a, 0xde,
+ 0x4b, 0x5a, 0x87, 0xf3, 0x99, 0xc5, 0xa9, 0xf6, 0x39, 0xc0, 0x90, 0x38, 0xf4, 0x39, 0x14, 0x64,
+ 0x80, 0xd5, 0x2b, 0x82, 0x01, 0xc5, 0x0c, 0x61, 0x60, 0x71, 0x2f, 0x58, 0x44, 0x0d, 0xa5, 0x81,
+ 0x9e, 0xc1, 0x55, 0x8a, 0x27, 0xc4, 0x0c, 0x42, 0xdf, 0x22, 0x94, 0x9a, 0x98, 0x9a, 0x53, 0x61,
+ 0x55, 0xaf, 0x7e, 0xe0, 0x8d, 0xb6, 0x7d, 0xdf, 0x3d, 0xc4, 0x6e, 0x44, 0x8c, 0x35, 0x0e, 0xef,
+ 0x4b, 0xf4, 0x16, 0xed, 0x27, 0xc1, 0x87, 0xda, 0x50, 0xb3, 0x8e, 0x1d, 0xd7, 0x96, 0xf1, 0x69,
+ 0xf9, 0x91, 0xc7, 0xea, 0x8b, 0x82, 0xee, 0xea, 0x3b, 0x74, 0x5d, 0x8f, 0xdd, 0x69, 0x49, 0xbe,
+ 0x25, 0x01, 0xe2, 0x0c, 0x3b, 0x1c, 0xd2, 0xf8, 0x2d, 0x03, 0x90, 0xbe, 0x22, 0x22, 0xb0, 0x98,
+ 0xbc, 0xa3, 0x39, 0xc1, 0x41, 0x5d, 0xbb, 0x99, 0xdd, 0xa8, 0xb4, 0xbe, 0xb9, 0x58, 0x30, 0xa4,
+ 0xcb, 0x03, 0x1c, 0xb4, 0x3d, 0x16, 0x9e, 0x1a, 0x55, 0x3c, 0x75, 0x84, 0xee, 0x43, 0xdd, 0x0e,
+ 0xfd, 0x20, 0x20, 0xb6, 0x99, 0x86, 0x8d, 0x72, 0x82, 0xe7, 0x61, 0xde, 0xb8, 0xac, 0xe4, 0x29,
+ 0xa9, 0xb4, 0xd7, 0x83, 0xe5, 0x77, 0xc8, 0x51, 0x0d, 0xb2, 0x2f, 0xc9, 0xa9, 0x4a, 0x6c, 0xbe,
+ 0x44, 0x3b, 0x90, 0x3f, 0xe1, 0xfe, 0x0a, 0xb6, 0x4a, 0x6b, 0xf3, 0x0c, 0xfb, 0x13, 0x4a, 0x79,
+ 0x49, 0x12, 0xfb, 0x20, 0x73, 0x5f, 0x6b, 0xfc, 0x95, 0x87, 0x72, 0x12, 0x48, 0x48, 0x87, 0x9c,
+ 0xc8, 0x2d, 0xed, 0xcc, 0xdc, 0x12, 0x7a, 0xe8, 0x39, 0x40, 0x5a, 0xea, 0x94, 0x2d, 0xf7, 0x2f,
+ 0x14, 0xbb, 0xfa, 0x56, 0x82, 0xef, 0x2c, 0x18, 0x53, 0x6c, 0x08, 0xc3, 0xe2, 0x84, 0x50, 0x8a,
+ 0xc7, 0x2a, 0x37, 0x44, 0x81, 0xaa, 0xb4, 0x1e, 0x5c, 0x8c, 0xfe, 0x40, 0x52, 0x88, 0x4d, 0x67,
+ 0xc1, 0xa8, 0x4e, 0xa6, 0xf6, 0x8d, 0x37, 0x1a, 0x40, 0xfa, 0x7d, 0x64, 0x40, 0xc5, 0x26, 0xd4,
+ 0x0a, 0x9d, 0x40, 0xb8, 0xa3, 0xcd, 0x5d, 0xec, 0x52, 0x92, 0xb7, 0x4a, 0x4f, 0xe6, 0x53, 0x4b,
+ 0x4f, 0xe3, 0x97, 0x0c, 0x54, 0xa7, 0x7d, 0x42, 0x03, 0xc8, 0xb1, 0xd3, 0x40, 0x3e, 0xd9, 0x52,
+ 0xeb, 0xe1, 0xfc, 0xb7, 0xa3, 0x0f, 0x4f, 0x03, 0x62, 0x08, 0x32, 0xb4, 0x04, 0x19, 0xd5, 0x31,
+ 0xb2, 0x46, 0xc6, 0xb1, 0xd1, 0x17, 0xb0, 0x16, 0x79, 0x96, 0x3f, 0x09, 0x42, 0x42, 0x29, 0xb1,
+ 0x4d, 0xea, 0xbc, 0x22, 0xe6, 0xe8, 0x94, 0xbb, 0x94, 0x15, 0x4a, 0xab, 0xd3, 0xe2, 0x81, 0xf3,
+ 0x8a, 0x6c, 0x73, 0x21, 0x6a, 0xc1, 0xea, 0xfb, 0x51, 0x39, 0x81, 0xba, 0xf4, 0x1e, 0xcc, 0xfa,
+ 0x5d, 0xc8, 0x71, 0x4b, 0xd0, 0x0a, 0xd4, 0x86, 0xdf, 0xf7, 0xdb, 0xe6, 0xd3, 0xde, 0xa0, 0xdf,
+ 0xde, 0xe9, 0xee, 0x75, 0xdb, 0xbb, 0xb5, 0x05, 0x54, 0x82, 0xdc, 0xa0, 0xdd, 0x1b, 0xd6, 0x34,
+ 0x54, 0x85, 0x92, 0xd1, 0xde, 0x69, 0x77, 0x0f, 0xdb, 0xbb, 0xb5, 0xcc, 0x76, 0x51, 0x25, 0x44,
+ 0xe3, 0x6f, 0x0d, 0x20, 0xad, 0x8c, 0x68, 0x1f, 0x20, 0x2d, 0xaf, 0x2a, 0xdb, 0x37, 0x2f, 0x74,
+ 0x49, 0x46, 0x39, 0x29, 0xae, 0xe8, 0x01, 0x5c, 0x49, 0xf2, 0x3a, 0x6d, 0xf1, 0x33, 0x89, 0xbd,
+ 0x16, 0x27, 0x76, 0x2a, 0x17, 0x99, 0x8d, 0x1e, 0xc2, 0xb5, 0x18, 0x3b, 0x13, 0xd7, 0x31, 0x3c,
+ 0x2b, 0xe0, 0x31, 0xff, 0xf4, 0xcb, 0xa8, 0xd2, 0xf0, 0x6b, 0x06, 0x72, 0xbc, 0x50, 0xa3, 0x2b,
+ 0x50, 0x12, 0xb6, 0xf2, 0xae, 0x2d, 0x6b, 0x42, 0x51, 0xec, 0xbb, 0xf6, 0x87, 0xfb, 0xfd, 0x96,
+ 0x0a, 0x93, 0xac, 0x08, 0x93, 0xcd, 0xf3, 0x36, 0x85, 0xe9, 0xa0, 0x98, 0x0d, 0xe5, 0xdc, 0xa7,
+ 0x86, 0xf2, 0xfa, 0x93, 0x8f, 0x3e, 0xf4, 0x2a, 0x2c, 0xef, 0x74, 0xba, 0xfb, 0xbb, 0xe6, 0x7e,
+ 0xb7, 0xf7, 0xa4, 0xbd, 0x6b, 0x0e, 0xfa, 0x5b, 0xbd, 0x9a, 0x86, 0x2e, 0x03, 0xea, 0x6f, 0x19,
+ 0xed, 0xde, 0x70, 0xe6, 0x3c, 0xd3, 0x88, 0x20, 0x2f, 0x9a, 0x18, 0xfa, 0x1a, 0x72, 0xbc, 0x8d,
+ 0xa9, 0xa7, 0xde, 0x38, 0xaf, 0xa3, 0x86, 0x40, 0x21, 0x1d, 0x2e, 0xc5, 0x8f, 0x24, 0x9a, 0xe1,
+ 0xcc, 0xd3, 0x2e, 0x2b, 0x91, 0xf8, 0x90, 0x78, 0x93, 0xf5, 0x37, 0x1a, 0x2c, 0xcd, 0x16, 0x57,
+ 0xf4, 0x14, 0xaa, 0x54, 0x14, 0x02, 0x53, 0x56, 0xe8, 0x39, 0xcb, 0x48, 0x67, 0xc1, 0xa8, 0x48,
+ 0x1e, 0x49, 0x7b, 0x1d, 0xca, 0x8e, 0xc7, 0xcc, 0xb4, 0xea, 0x67, 0x3b, 0x0b, 0x46, 0xc9, 0xf1,
+ 0x98, 0x14, 0xdf, 0x00, 0x18, 0xf9, 0xbe, 0xab, 0xe4, 0xfc, 0x95, 0x4b, 0x9d, 0x05, 0xa3, 0x3c,
+ 0x8a, 0x1b, 0x6d, 0x92, 0x20, 0xeb, 0x7f, 0x14, 0x00, 0xd2, 0x59, 0x04, 0x3d, 0xe3, 0xe6, 0xf2,
+ 0x59, 0xe6, 0x28, 0xc4, 0x13, 0x42, 0x95, 0xb9, 0xf7, 0xce, 0x3d, 0xcc, 0xc8, 0xe5, 0x9e, 0x00,
+ 0x1b, 0x72, 0x2c, 0x92, 0x1b, 0xb4, 0x09, 0x97, 0xa6, 0xa6, 0x24, 0xf3, 0x18, 0xd3, 0x63, 0x33,
+ 0xa9, 0x2a, 0xb5, 0x74, 0x04, 0xea, 0x60, 0x7a, 0xdc, 0xb5, 0x1b, 0x3f, 0xe5, 0x94, 0x5d, 0x02,
+ 0x8e, 0x9e, 0xc2, 0xe2, 0x51, 0xe4, 0x59, 0x3c, 0x81, 0xcc, 0x64, 0xac, 0x9d, 0xa7, 0x1c, 0x57,
+ 0x63, 0x1a, 0x31, 0x7c, 0x1e, 0xc1, 0x65, 0x3f, 0x74, 0xc6, 0x8e, 0x87, 0x5d, 0x73, 0x96, 0x3f,
+ 0x33, 0x27, 0xff, 0x4a, 0xcc, 0xb7, 0x37, 0xfd, 0x9d, 0x03, 0x28, 0x1f, 0x39, 0x2e, 0x91, 0xd4,
+ 0xd9, 0x39, 0xa9, 0x4b, 0x9c, 0x42, 0xd0, 0xdd, 0x80, 0x8a, 0xeb, 0x78, 0xc4, 0xf4, 0xa2, 0xc9,
+ 0x88, 0x84, 0xaa, 0x7c, 0x02, 0x3f, 0xea, 0x89, 0x13, 0xf4, 0x3f, 0x58, 0xb4, 0x7c, 0x37, 0x9a,
+ 0x78, 0xb1, 0x4a, 0x5e, 0xa8, 0x54, 0xe5, 0xa1, 0x52, 0xda, 0x83, 0x8a, 0xeb, 0x63, 0xdb, 0x9c,
+ 0xf8, 0x76, 0xe4, 0xc6, 0x13, 0xf4, 0x67, 0x67, 0x98, 0x75, 0x20, 0x94, 0x0d, 0xe0, 0x48, 0xb9,
+ 0x46, 0xdf, 0xc1, 0x12, 0xf5, 0xa3, 0xd0, 0x22, 0xe6, 0x09, 0x09, 0x29, 0xef, 0x95, 0xc5, 0x39,
+ 0x3d, 0x5c, 0x94, 0x3c, 0x87, 0x92, 0xa6, 0xf1, 0xb3, 0x06, 0x95, 0xa9, 0x78, 0x42, 0x8f, 0x21,
+ 0x2f, 0xc2, 0x52, 0x65, 0xf3, 0xdd, 0x79, 0xa2, 0xd2, 0x90, 0x14, 0xe8, 0x36, 0xac, 0xc4, 0xa9,
+ 0x2d, 0x43, 0x7d, 0x26, 0xb7, 0x91, 0x92, 0xc9, 0x0f, 0xcb, 0xe4, 0xfe, 0x5d, 0x83, 0x82, 0xf2,
+ 0xb8, 0x03, 0x05, 0x75, 0x69, 0xf3, 0x86, 0xa1, 0xc2, 0xa3, 0x27, 0x50, 0x1a, 0x45, 0x7c, 0xae,
+ 0x55, 0xa9, 0x30, 0x0f, 0x57, 0x51, 0x30, 0x74, 0xed, 0xf5, 0x1f, 0x60, 0xf9, 0x1d, 0x29, 0x5a,
+ 0x89, 0x67, 0x43, 0xd9, 0x1b, 0xe4, 0x86, 0xbb, 0xcf, 0xa4, 0x2a, 0xb1, 0x45, 0x13, 0x9e, 0x75,
+ 0x3f, 0x91, 0xf1, 0x26, 0x2c, 0xdc, 0xdf, 0x7e, 0xad, 0xc1, 0x7f, 0x2d, 0x7f, 0xf2, 0x71, 0xeb,
+ 0xb6, 0x41, 0xdc, 0x77, 0x9f, 0x4f, 0x88, 0x7d, 0xed, 0xf9, 0x23, 0xa5, 0x3c, 0xf6, 0x5d, 0xec,
+ 0x8d, 0x75, 0x3f, 0x1c, 0x37, 0xc7, 0xc4, 0x13, 0xf3, 0x63, 0x53, 0x8a, 0x70, 0xe0, 0xd0, 0x0f,
+ 0xfc, 0x6d, 0x7f, 0x95, 0xee, 0x5e, 0x67, 0x56, 0x1f, 0x49, 0xa6, 0x1d, 0x7e, 0xa6, 0xcb, 0x47,
+ 0x3d, 0x6c, 0xfd, 0x19, 0x9f, 0xbf, 0x10, 0xe7, 0x2f, 0xc4, 0xf9, 0x8b, 0xc3, 0xd6, 0xa8, 0x20,
+ 0xbe, 0x71, 0xe7, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x3c, 0x8a, 0x77, 0xd0, 0x0f, 0x00,
+ 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go
new file mode 100644
index 00000000000..73186b7a5d9
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/devtools/cloudtrace/v2/tracing.pb.go
@@ -0,0 +1,227 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/devtools/cloudtrace/v2/tracing.proto
+
+package cloudtrace
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ empty "github.com/golang/protobuf/ptypes/empty"
+ _ "github.com/golang/protobuf/ptypes/timestamp"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The request message for the `BatchWriteSpans` method.
+type BatchWriteSpansRequest struct {
+ // Required. The name of the project where the spans belong. The format is
+ // `projects/[PROJECT_ID]`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A list of new spans. The span names must not match existing
+ // spans, or the results are undefined.
+ Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BatchWriteSpansRequest) Reset() { *m = BatchWriteSpansRequest{} }
+func (m *BatchWriteSpansRequest) String() string { return proto.CompactTextString(m) }
+func (*BatchWriteSpansRequest) ProtoMessage() {}
+func (*BatchWriteSpansRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d1f9b588db05fdc6, []int{0}
+}
+
+func (m *BatchWriteSpansRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_BatchWriteSpansRequest.Unmarshal(m, b)
+}
+func (m *BatchWriteSpansRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_BatchWriteSpansRequest.Marshal(b, m, deterministic)
+}
+func (m *BatchWriteSpansRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BatchWriteSpansRequest.Merge(m, src)
+}
+func (m *BatchWriteSpansRequest) XXX_Size() int {
+ return xxx_messageInfo_BatchWriteSpansRequest.Size(m)
+}
+func (m *BatchWriteSpansRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_BatchWriteSpansRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BatchWriteSpansRequest proto.InternalMessageInfo
+
+func (m *BatchWriteSpansRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *BatchWriteSpansRequest) GetSpans() []*Span {
+ if m != nil {
+ return m.Spans
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*BatchWriteSpansRequest)(nil), "google.devtools.cloudtrace.v2.BatchWriteSpansRequest")
+}
+
+func init() {
+ proto.RegisterFile("google/devtools/cloudtrace/v2/tracing.proto", fileDescriptor_d1f9b588db05fdc6)
+}
+
+var fileDescriptor_d1f9b588db05fdc6 = []byte{
+ // 404 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xdd, 0x6a, 0xdb, 0x30,
+ 0x14, 0x46, 0xde, 0x0f, 0x4c, 0x1b, 0x0c, 0x04, 0x0b, 0xc1, 0xdb, 0x58, 0xe6, 0x0d, 0x96, 0x64,
+ 0x43, 0x02, 0x8f, 0x5d, 0x2c, 0x63, 0x37, 0x09, 0x23, 0xb7, 0x21, 0x19, 0x19, 0x8c, 0xdc, 0x28,
+ 0x8e, 0xa6, 0x69, 0xd8, 0x92, 0x67, 0x29, 0x86, 0x52, 0x7a, 0xd3, 0x9b, 0x3e, 0x40, 0xfb, 0x14,
+ 0xa5, 0xd0, 0xf7, 0xe8, 0x6d, 0x5f, 0xa1, 0x0f, 0x52, 0x24, 0xd9, 0x0d, 0x84, 0x34, 0xc9, 0x9d,
+ 0xce, 0x39, 0xdf, 0xf9, 0xce, 0xf7, 0x7d, 0x36, 0xfc, 0xc8, 0x95, 0xe2, 0x29, 0x23, 0x0b, 0x56,
+ 0x1a, 0xa5, 0x52, 0x4d, 0x92, 0x54, 0x2d, 0x17, 0xa6, 0xa0, 0x09, 0x23, 0x65, 0x4c, 0xec, 0x43,
+ 0x48, 0x8e, 0xf3, 0x42, 0x19, 0x85, 0x5e, 0x7b, 0x30, 0xae, 0xc1, 0x78, 0x05, 0xc6, 0x65, 0x1c,
+ 0xbe, 0xaa, 0xb8, 0x68, 0x2e, 0x08, 0x95, 0x52, 0x19, 0x6a, 0x84, 0x92, 0xda, 0x2f, 0x87, 0x9d,
+ 0xdd, 0x97, 0x58, 0x05, 0x7d, 0x59, 0x41, 0x5d, 0x35, 0x5f, 0xfe, 0x21, 0x2c, 0xcb, 0xcd, 0x41,
+ 0x35, 0x7c, 0xb3, 0x3e, 0x34, 0x22, 0x63, 0xda, 0xd0, 0x2c, 0xf7, 0x80, 0x88, 0xc3, 0x46, 0x9f,
+ 0x9a, 0xe4, 0xef, 0xaf, 0x42, 0x18, 0x36, 0xc9, 0xa9, 0xd4, 0x63, 0xf6, 0x7f, 0xc9, 0xb4, 0x41,
+ 0x08, 0x3e, 0x94, 0x34, 0x63, 0x4d, 0xd0, 0x02, 0xed, 0x27, 0x63, 0xf7, 0x46, 0x5f, 0xe1, 0x23,
+ 0x6d, 0x31, 0xcd, 0xa0, 0xf5, 0xa0, 0xfd, 0x34, 0x7e, 0x87, 0xb7, 0x7a, 0xc4, 0x96, 0x6f, 0xec,
+ 0x37, 0xe2, 0xcb, 0x00, 0x3e, 0xfb, 0x69, 0x07, 0x13, 0x56, 0x94, 0x22, 0x61, 0xe8, 0x0c, 0xc0,
+ 0xe7, 0x6b, 0xa7, 0xd1, 0x97, 0x1d, 0x84, 0x9b, 0xa5, 0x86, 0x8d, 0x7a, 0xad, 0xb6, 0x89, 0x7f,
+ 0xd8, 0x0c, 0xa2, 0xf8, 0xf8, 0xfa, 0xe6, 0x34, 0xf8, 0x14, 0x7d, 0xb0, 0x99, 0x1d, 0x5a, 0x07,
+ 0xdf, 0xf3, 0x42, 0xfd, 0x63, 0x89, 0xd1, 0xa4, 0x7b, 0xe4, 0x53, 0xd4, 0xbd, 0xf9, 0x1d, 0x69,
+ 0x0f, 0x74, 0xd1, 0x09, 0x80, 0x70, 0x50, 0x30, 0xea, 0x4f, 0xa0, 0x7d, 0x2c, 0x86, 0xfb, 0x80,
+ 0x22, 0xe2, 0xc4, 0x74, 0xa2, 0xf7, 0x9b, 0xc4, 0x54, 0x5a, 0xac, 0x2a, 0x17, 0x57, 0x0f, 0x74,
+ 0xfb, 0x17, 0x00, 0xbe, 0x4d, 0x54, 0xb6, 0x9d, 0xbb, 0xef, 0x42, 0x15, 0x92, 0x8f, 0xac, 0xf5,
+ 0x11, 0xf8, 0x3d, 0xac, 0xe0, 0x5c, 0xa5, 0x54, 0x72, 0xac, 0x0a, 0x4e, 0x38, 0x93, 0x2e, 0x18,
+ 0xe2, 0x47, 0x34, 0x17, 0xfa, 0x9e, 0x1f, 0xeb, 0xdb, 0xaa, 0x3a, 0x0f, 0x5e, 0x0c, 0x3d, 0xd3,
+ 0xc0, 0xf6, 0xb0, 0xfb, 0x76, 0x78, 0x1a, 0x5f, 0xd5, 0xfd, 0x99, 0xeb, 0xcf, 0x5c, 0x7f, 0x36,
+ 0x8d, 0xe7, 0x8f, 0xdd, 0x8d, 0xcf, 0xb7, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x94, 0x51, 0x1d,
+ 0x25, 0x03, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// TraceServiceClient is the client API for TraceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type TraceServiceClient interface {
+ // Sends new spans to new or existing traces. You cannot update
+ // existing spans.
+ BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ // Creates a new span.
+ CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error)
+}
+
+type traceServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
+ return &traceServiceClient{cc}
+}
+
+func (c *traceServiceClient) BatchWriteSpans(ctx context.Context, in *BatchWriteSpansRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *traceServiceClient) CreateSpan(ctx context.Context, in *Span, opts ...grpc.CallOption) (*Span, error) {
+ out := new(Span)
+ err := c.cc.Invoke(ctx, "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// TraceServiceServer is the server API for TraceService service.
+type TraceServiceServer interface {
+ // Sends new spans to new or existing traces. You cannot update
+ // existing spans.
+ BatchWriteSpans(context.Context, *BatchWriteSpansRequest) (*empty.Empty, error)
+ // Creates a new span.
+ CreateSpan(context.Context, *Span) (*Span, error)
+}
+
+func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
+ s.RegisterService(&_TraceService_serviceDesc, srv)
+}
+
+func _TraceService_BatchWriteSpans_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(BatchWriteSpansRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TraceServiceServer).BatchWriteSpans(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TraceServiceServer).BatchWriteSpans(ctx, req.(*BatchWriteSpansRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _TraceService_CreateSpan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Span)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TraceServiceServer).CreateSpan(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.devtools.cloudtrace.v2.TraceService/CreateSpan",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TraceServiceServer).CreateSpan(ctx, req.(*Span))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _TraceService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.devtools.cloudtrace.v2.TraceService",
+ HandlerType: (*TraceServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "BatchWriteSpans",
+ Handler: _TraceService_BatchWriteSpans_Handler,
+ },
+ {
+ MethodName: "CreateSpan",
+ Handler: _TraceService_CreateSpan_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/devtools/cloudtrace/v2/tracing.proto",
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go
new file mode 100644
index 00000000000..f30ec39538d
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go
@@ -0,0 +1,966 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/alert.proto
+
+package monitoring
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Operators for combining conditions.
+type AlertPolicy_ConditionCombinerType int32
+
+const (
+ // An unspecified combiner.
+ AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0
+ // Combine conditions using the logical `AND` operator. An
+ // incident is created only if all conditions are met
+ // simultaneously. This combiner is satisfied if all conditions are
+ // met, even if they are met on completely different resources.
+ AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1
+ // Combine conditions using the logical `OR` operator. An incident
+ // is created if any of the listed conditions is met.
+ AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2
+ // Combine conditions using logical `AND` operator, but unlike the regular
+ // `AND` option, an incident is created only if all conditions are met
+ // simultaneously on at least one resource.
+ AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3
+)
+
+var AlertPolicy_ConditionCombinerType_name = map[int32]string{
+ 0: "COMBINE_UNSPECIFIED",
+ 1: "AND",
+ 2: "OR",
+ 3: "AND_WITH_MATCHING_RESOURCE",
+}
+
+var AlertPolicy_ConditionCombinerType_value = map[string]int32{
+ "COMBINE_UNSPECIFIED": 0,
+ "AND": 1,
+ "OR": 2,
+ "AND_WITH_MATCHING_RESOURCE": 3,
+}
+
+func (x AlertPolicy_ConditionCombinerType) String() string {
+ return proto.EnumName(AlertPolicy_ConditionCombinerType_name, int32(x))
+}
+
+func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_014ef0e1a0f00a00, []int{0, 0}
+}
+
+// A description of the conditions under which some aspect of your system is
+// considered to be "unhealthy" and the ways to notify people or services about
+// this state. For an overview of alert policies, see
+// [Introduction to Alerting](/monitoring/alerts/).
+type AlertPolicy struct {
+ // Required if the policy exists. The resource name for this policy. The
+ // syntax is:
+ //
+ // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
+ //
+ // `[ALERT_POLICY_ID]` is assigned by Stackdriver Monitoring when the policy
+ // is created. When calling the
+ // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
+ // method, do not include the `name` field in the alerting policy passed as
+ // part of the request.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A short name or phrase used to identify the policy in dashboards,
+ // notifications, and incidents. To avoid confusion, don't use the same
+ // display name for multiple policies in the same project. The name is
+ // limited to 512 Unicode characters.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // Documentation that is included with notifications and incidents related to
+ // this policy. Best practice is for the documentation to include information
+ // to help responders understand, mitigate, escalate, and correct the
+ // underlying problems detected by the alerting policy. Notification channels
+ // that have limited capacity might not show this documentation.
+ Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"`
+ // User-supplied key/value data to be used for organizing and
+ // identifying the `AlertPolicy` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // A list of conditions for the policy. The conditions are combined by AND or
+ // OR according to the `combiner` field. If the combined conditions evaluate
+ // to true, then an incident is created. A policy can have from one to six
+ // conditions.
+ Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"`
+ // How to combine the results of multiple conditions
+ // to determine if an incident should be opened.
+ Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"`
+ // Whether or not the policy is enabled. On write, the default interpretation
+ // if unset is that the policy is enabled. On read, clients should not make
+ // any assumption about the state if it has not been populated. The
+ // field should always be populated on List and Get operations, unless
+ // a field projection has been specified that strips it out.
+ Enabled *wrappers.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Identifies the notification channels to which notifications should be sent
+ // when incidents are opened or closed or when new violations occur on
+ // an already opened incident. Each element of this array corresponds to
+ // the `name` field in each of the
+ // [`NotificationChannel`][google.monitoring.v3.NotificationChannel]
+ // objects that are returned from the [`ListNotificationChannels`]
+ // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
+ // method. The syntax of the entries in this field is:
+ //
+ // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
+ NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"`
+ // A read-only record of the creation of the alerting policy. If provided
+ // in a call to create or update, this field will be ignored.
+ CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"`
+ // A read-only record of the most recent change to the alerting policy. If
+ // provided in a call to create or update, this field will be ignored.
+ MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AlertPolicy) Reset() { *m = AlertPolicy{} }
+func (m *AlertPolicy) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy) ProtoMessage() {}
+func (*AlertPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_014ef0e1a0f00a00, []int{0}
+}
+
+func (m *AlertPolicy) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AlertPolicy.Unmarshal(m, b)
+}
+func (m *AlertPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AlertPolicy.Marshal(b, m, deterministic)
+}
+func (m *AlertPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AlertPolicy.Merge(m, src)
+}
+func (m *AlertPolicy) XXX_Size() int {
+ return xxx_messageInfo_AlertPolicy.Size(m)
+}
+func (m *AlertPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_AlertPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlertPolicy proto.InternalMessageInfo
+
+func (m *AlertPolicy) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AlertPolicy) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation {
+ if m != nil {
+ return m.Documentation
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetUserLabels() map[string]string {
+ if m != nil {
+ return m.UserLabels
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetConditions() []*AlertPolicy_Condition {
+ if m != nil {
+ return m.Conditions
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType {
+ if m != nil {
+ return m.Combiner
+ }
+ return AlertPolicy_COMBINE_UNSPECIFIED
+}
+
+func (m *AlertPolicy) GetEnabled() *wrappers.BoolValue {
+ if m != nil {
+ return m.Enabled
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetNotificationChannels() []string {
+ if m != nil {
+ return m.NotificationChannels
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetCreationRecord() *MutationRecord {
+ if m != nil {
+ return m.CreationRecord
+ }
+ return nil
+}
+
+func (m *AlertPolicy) GetMutationRecord() *MutationRecord {
+ if m != nil {
+ return m.MutationRecord
+ }
+ return nil
+}
+
+// A content string and a MIME type that describes the content string's
+// format.
+type AlertPolicy_Documentation struct {
+ // The text of the documentation, interpreted according to `mime_type`.
+ // The content may not exceed 8,192 Unicode characters and may not exceed
+ // more than 10,240 bytes when encoded in UTF-8 format, whichever is
+ // smaller.
+ Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ // The format of the `content` field. Presently, only the value
+ // `"text/markdown"` is supported. See
+ // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information.
+ MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AlertPolicy_Documentation) Reset() { *m = AlertPolicy_Documentation{} }
+func (m *AlertPolicy_Documentation) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Documentation) ProtoMessage() {}
+func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_014ef0e1a0f00a00, []int{0, 0}
+}
+
+func (m *AlertPolicy_Documentation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AlertPolicy_Documentation.Unmarshal(m, b)
+}
+func (m *AlertPolicy_Documentation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AlertPolicy_Documentation.Marshal(b, m, deterministic)
+}
+func (m *AlertPolicy_Documentation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AlertPolicy_Documentation.Merge(m, src)
+}
+func (m *AlertPolicy_Documentation) XXX_Size() int {
+ return xxx_messageInfo_AlertPolicy_Documentation.Size(m)
+}
+func (m *AlertPolicy_Documentation) XXX_DiscardUnknown() {
+ xxx_messageInfo_AlertPolicy_Documentation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlertPolicy_Documentation proto.InternalMessageInfo
+
+func (m *AlertPolicy_Documentation) GetContent() string {
+ if m != nil {
+ return m.Content
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Documentation) GetMimeType() string {
+ if m != nil {
+ return m.MimeType
+ }
+ return ""
+}
+
+// A condition is a true/false test that determines when an alerting policy
+// should open an incident. If a condition evaluates to true, it signifies
+// that something is wrong.
+type AlertPolicy_Condition struct {
+ // Required if the condition exists. The unique resource name for this
+ // condition. Its syntax is:
+ //
+ // projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
+ //
+ // `[CONDITION_ID]` is assigned by Stackdriver Monitoring when the
+ // condition is created as part of a new or updated alerting policy.
+ //
+ // When calling the
+ // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
+ // method, do not include the `name` field in the conditions of the
+ // requested alerting policy. Stackdriver Monitoring creates the
+ // condition identifiers and includes them in the new policy.
+ //
+ // When calling the
+ // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy]
+ // method to update a policy, including a condition `name` causes the
+ // existing condition to be updated. Conditions without names are added to
+ // the updated policy. Existing conditions are deleted if they are not
+ // updated.
+ //
+ // Best practice is to preserve `[CONDITION_ID]` if you make only small
+ // changes, such as those to condition thresholds, durations, or trigger
+ // values. Otherwise, treat the change as a new condition and let the
+ // existing condition be deleted.
+ Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"`
+ // A short name or phrase used to identify the condition in dashboards,
+ // notifications, and incidents. To avoid confusion, don't use the same
+ // display name for multiple conditions in the same policy.
+ DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // Only one of the following condition types will be specified.
+ //
+ // Types that are valid to be assigned to Condition:
+ // *AlertPolicy_Condition_ConditionThreshold
+ // *AlertPolicy_Condition_ConditionAbsent
+ Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AlertPolicy_Condition) Reset() { *m = AlertPolicy_Condition{} }
+func (m *AlertPolicy_Condition) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Condition) ProtoMessage() {}
+func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_014ef0e1a0f00a00, []int{0, 1}
+}
+
+func (m *AlertPolicy_Condition) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AlertPolicy_Condition.Unmarshal(m, b)
+}
+func (m *AlertPolicy_Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AlertPolicy_Condition.Marshal(b, m, deterministic)
+}
+func (m *AlertPolicy_Condition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AlertPolicy_Condition.Merge(m, src)
+}
+func (m *AlertPolicy_Condition) XXX_Size() int {
+ return xxx_messageInfo_AlertPolicy_Condition.Size(m)
+}
+func (m *AlertPolicy_Condition) XXX_DiscardUnknown() {
+ xxx_messageInfo_AlertPolicy_Condition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlertPolicy_Condition proto.InternalMessageInfo
+
+func (m *AlertPolicy_Condition) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+type isAlertPolicy_Condition_Condition interface {
+ isAlertPolicy_Condition_Condition()
+}
+
+type AlertPolicy_Condition_ConditionThreshold struct {
+ ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionAbsent struct {
+ ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {}
+
+func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition {
+ if m != nil {
+ return m.Condition
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold {
+ if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok {
+ return x.ConditionThreshold
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence {
+ if x, ok := m.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok {
+ return x.ConditionAbsent
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*AlertPolicy_Condition) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _AlertPolicy_Condition_OneofMarshaler, _AlertPolicy_Condition_OneofUnmarshaler, _AlertPolicy_Condition_OneofSizer, []interface{}{
+ (*AlertPolicy_Condition_ConditionThreshold)(nil),
+ (*AlertPolicy_Condition_ConditionAbsent)(nil),
+ }
+}
+
+func _AlertPolicy_Condition_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*AlertPolicy_Condition)
+ // condition
+ switch x := m.Condition.(type) {
+ case *AlertPolicy_Condition_ConditionThreshold:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ConditionThreshold); err != nil {
+ return err
+ }
+ case *AlertPolicy_Condition_ConditionAbsent:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ConditionAbsent); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("AlertPolicy_Condition.Condition has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _AlertPolicy_Condition_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*AlertPolicy_Condition)
+ switch tag {
+ case 1: // condition.condition_threshold
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(AlertPolicy_Condition_MetricThreshold)
+ err := b.DecodeMessage(msg)
+ m.Condition = &AlertPolicy_Condition_ConditionThreshold{msg}
+ return true, err
+ case 2: // condition.condition_absent
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(AlertPolicy_Condition_MetricAbsence)
+ err := b.DecodeMessage(msg)
+ m.Condition = &AlertPolicy_Condition_ConditionAbsent{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _AlertPolicy_Condition_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*AlertPolicy_Condition)
+ // condition
+ switch x := m.Condition.(type) {
+ case *AlertPolicy_Condition_ConditionThreshold:
+ s := proto.Size(x.ConditionThreshold)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *AlertPolicy_Condition_ConditionAbsent:
+ s := proto.Size(x.ConditionAbsent)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Specifies how many time series must fail a predicate to trigger a
+// condition. If not specified, then a `{count: 1}` trigger is used.
+type AlertPolicy_Condition_Trigger struct {
+ // A type of trigger.
+ //
+ // Types that are valid to be assigned to Type:
+ // *AlertPolicy_Condition_Trigger_Count
+ // *AlertPolicy_Condition_Trigger_Percent
+ Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AlertPolicy_Condition_Trigger) Reset() { *m = AlertPolicy_Condition_Trigger{} }
+func (m *AlertPolicy_Condition_Trigger) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Condition_Trigger) ProtoMessage() {}
+func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) {
+ return fileDescriptor_014ef0e1a0f00a00, []int{0, 1, 0}
+}
+
+func (m *AlertPolicy_Condition_Trigger) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AlertPolicy_Condition_Trigger.Unmarshal(m, b)
+}
+func (m *AlertPolicy_Condition_Trigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AlertPolicy_Condition_Trigger.Marshal(b, m, deterministic)
+}
+func (m *AlertPolicy_Condition_Trigger) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AlertPolicy_Condition_Trigger.Merge(m, src)
+}
+func (m *AlertPolicy_Condition_Trigger) XXX_Size() int {
+ return xxx_messageInfo_AlertPolicy_Condition_Trigger.Size(m)
+}
+func (m *AlertPolicy_Condition_Trigger) XXX_DiscardUnknown() {
+ xxx_messageInfo_AlertPolicy_Condition_Trigger.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlertPolicy_Condition_Trigger proto.InternalMessageInfo
+
+type isAlertPolicy_Condition_Trigger_Type interface {
+ isAlertPolicy_Condition_Trigger_Type()
+}
+
+type AlertPolicy_Condition_Trigger_Count struct {
+ Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_Trigger_Percent struct {
+ Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {}
+
+func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {}
+
+func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_Trigger) GetCount() int32 {
+ if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Count); ok {
+ return x.Count
+ }
+ return 0
+}
+
+func (m *AlertPolicy_Condition_Trigger) GetPercent() float64 {
+ if x, ok := m.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok {
+ return x.Percent
+ }
+ return 0
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*AlertPolicy_Condition_Trigger) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _AlertPolicy_Condition_Trigger_OneofMarshaler, _AlertPolicy_Condition_Trigger_OneofUnmarshaler, _AlertPolicy_Condition_Trigger_OneofSizer, []interface{}{
+ (*AlertPolicy_Condition_Trigger_Count)(nil),
+ (*AlertPolicy_Condition_Trigger_Percent)(nil),
+ }
+}
+
+func _AlertPolicy_Condition_Trigger_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*AlertPolicy_Condition_Trigger)
+ // type
+ switch x := m.Type.(type) {
+ case *AlertPolicy_Condition_Trigger_Count:
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Count))
+ case *AlertPolicy_Condition_Trigger_Percent:
+ b.EncodeVarint(2<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.Percent))
+ case nil:
+ default:
+ return fmt.Errorf("AlertPolicy_Condition_Trigger.Type has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _AlertPolicy_Condition_Trigger_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*AlertPolicy_Condition_Trigger)
+ switch tag {
+ case 1: // type.count
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Type = &AlertPolicy_Condition_Trigger_Count{int32(x)}
+ return true, err
+ case 2: // type.percent
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Type = &AlertPolicy_Condition_Trigger_Percent{math.Float64frombits(x)}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _AlertPolicy_Condition_Trigger_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*AlertPolicy_Condition_Trigger)
+ // type
+ switch x := m.Type.(type) {
+ case *AlertPolicy_Condition_Trigger_Count:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Count))
+ case *AlertPolicy_Condition_Trigger_Percent:
+ n += 1 // tag and wire
+ n += 8
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// A condition type that compares a collection of time series
+// against a threshold.
+type AlertPolicy_Condition_MetricThreshold struct {
+ // A [filter](/monitoring/api/v3/filters) that
+ // identifies which time series should be compared with the threshold.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`MetricService.ListTimeSeries`
+ // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that
+ // call is useful to verify the time series that will be retrieved /
+ // processed) and must specify the metric type and optionally may contain
+ // restrictions on resource type, resource labels, and metric labels.
+ // This field may not exceed 2048 Unicode characters in length.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resrouces). Multiple aggregations
+ // are applied in the order specified.
+ //
+ // This field is similar to the one in the
+ // [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
+ // It is advisable to use the `ListTimeSeries` method when debugging this field.
+ Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"`
+ // A [filter](/monitoring/api/v3/filters) that identifies a time
+ // series that should be used as the denominator of a ratio that will be
+ // compared with the threshold. If a `denominator_filter` is specified,
+ // the time series specified by the `filter` field will be used as the
+ // numerator.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`MetricService.ListTimeSeries`
+ // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that
+ // call is useful to verify the time series that will be retrieved /
+ // processed) and must specify the metric type and optionally may contain
+ // restrictions on resource type, resource labels, and metric labels.
+ // This field may not exceed 2048 Unicode characters in length.
+ DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"`
+ // Specifies the alignment of data points in individual time series
+ // selected by `denominatorFilter` as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resources).
+ //
+ // When computing ratios, the `aggregations` and
+ // `denominator_aggregations` fields must use the same alignment period
+ // and produce time series that have the same periodicity and labels.
+ //
+ // This field is similar to the one in the
+ // [`MetricService.ListTimeSeries`
+ // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list). It
+ // is advisable to use the `ListTimeSeries` method when debugging this
+ // field.
+ DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"`
+ // The comparison to apply between the time series (indicated by `filter`
+ // and `aggregation`) and the threshold (indicated by `threshold_value`).
+ // The comparison is applied on each time series, with the time series
+ // on the left-hand side and the threshold on the right-hand side.
+ //
+ // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently.
+ Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"`
+ // A value against which to compare the time series.
+ ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"`
+ // The amount of time that a time series must violate the
+ // threshold to be considered failing. Currently, only values
+ // that are a multiple of a minute--e.g., 0, 60, 120, or 300
+ // seconds--are supported. If an invalid value is given, an
+ // error will be returned. When choosing a duration, it is useful to
+ // keep in mind the frequency of the underlying time series data
+ // (which may also be affected by any alignments specified in the
+ // `aggregations` field); a good duration is long enough so that a single
+ // outlier does not generate spurious alerts, but short enough that
+ // unhealthy states are detected and alerted on quickly.
+ Duration *duration.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`,
+ // or by the ratio, if `denominator_filter` and `denominator_aggregations`
+ // are specified.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) Reset() { *m = AlertPolicy_Condition_MetricThreshold{} }
+func (m *AlertPolicy_Condition_MetricThreshold) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {}
+func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) {
+ return fileDescriptor_014ef0e1a0f00a00, []int{0, 1, 1}
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Unmarshal(m, b)
+}
+func (m *AlertPolicy_Condition_MetricThreshold) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Marshal(b, m, deterministic)
+}
+func (m *AlertPolicy_Condition_MetricThreshold) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Merge(m, src)
+}
+func (m *AlertPolicy_Condition_MetricThreshold) XXX_Size() int {
+ return xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.Size(m)
+}
+func (m *AlertPolicy_Condition_MetricThreshold) XXX_DiscardUnknown() {
+ xxx_messageInfo_AlertPolicy_Condition_MetricThreshold.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlertPolicy_Condition_MetricThreshold proto.InternalMessageInfo
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation {
+ if m != nil {
+ return m.Aggregations
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string {
+ if m != nil {
+ return m.DenominatorFilter
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation {
+ if m != nil {
+ return m.DenominatorAggregations
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType {
+ if m != nil {
+ return m.Comparison
+ }
+ return ComparisonType_COMPARISON_UNSPECIFIED
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 {
+ if m != nil {
+ return m.ThresholdValue
+ }
+ return 0
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetDuration() *duration.Duration {
+ if m != nil {
+ return m.Duration
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if m != nil {
+ return m.Trigger
+ }
+ return nil
+}
+
+// A condition type that checks that monitored resources
+// are reporting data. The configuration defines a metric and
+// a set of monitored resources. The predicate is considered in violation
+// when a time series for the specified metric of a monitored
+// resource does not include any data in the specified `duration`.
+type AlertPolicy_Condition_MetricAbsence struct {
+ // A [filter](/monitoring/api/v3/filters) that
+ // identifies which time series should be compared with the threshold.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`MetricService.ListTimeSeries`
+ // request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list) (that
+ // call is useful to verify the time series that will be retrieved /
+ // processed) and must specify the metric type and optionally may contain
+ // restrictions on resource type, resource labels, and metric labels.
+ // This field may not exceed 2048 Unicode characters in length.
+ Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resrouces). Multiple aggregations
+ // are applied in the order specified.
+ //
+ // This field is similar to the
+ // one in the [`MetricService.ListTimeSeries` request](/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
+ // It is advisable to use the `ListTimeSeries` method when debugging this field.
+ Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"`
+ // The amount of time that a time series must fail to report new
+ // data to be considered failing. Currently, only values that
+ // are a multiple of a minute--e.g. 60, 120, or 300
+ // seconds--are supported. If an invalid value is given, an
+ // error will be returned. The `Duration.nanos` field is
+ // ignored.
+ Duration *duration.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) Reset() { *m = AlertPolicy_Condition_MetricAbsence{} }
+func (m *AlertPolicy_Condition_MetricAbsence) String() string { return proto.CompactTextString(m) }
+func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {}
+func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) {
+ return fileDescriptor_014ef0e1a0f00a00, []int{0, 1, 2}
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Unmarshal(m, b)
+}
+func (m *AlertPolicy_Condition_MetricAbsence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Marshal(b, m, deterministic)
+}
+func (m *AlertPolicy_Condition_MetricAbsence) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Merge(m, src)
+}
+func (m *AlertPolicy_Condition_MetricAbsence) XXX_Size() int {
+ return xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.Size(m)
+}
+func (m *AlertPolicy_Condition_MetricAbsence) XXX_DiscardUnknown() {
+ xxx_messageInfo_AlertPolicy_Condition_MetricAbsence.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlertPolicy_Condition_MetricAbsence proto.InternalMessageInfo
+
+func (m *AlertPolicy_Condition_MetricAbsence) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation {
+ if m != nil {
+ return m.Aggregations
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) GetDuration() *duration.Duration {
+ if m != nil {
+ return m.Duration
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if m != nil {
+ return m.Trigger
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("google.monitoring.v3.AlertPolicy_ConditionCombinerType", AlertPolicy_ConditionCombinerType_name, AlertPolicy_ConditionCombinerType_value)
+ proto.RegisterType((*AlertPolicy)(nil), "google.monitoring.v3.AlertPolicy")
+ proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.AlertPolicy.UserLabelsEntry")
+ proto.RegisterType((*AlertPolicy_Documentation)(nil), "google.monitoring.v3.AlertPolicy.Documentation")
+ proto.RegisterType((*AlertPolicy_Condition)(nil), "google.monitoring.v3.AlertPolicy.Condition")
+ proto.RegisterType((*AlertPolicy_Condition_Trigger)(nil), "google.monitoring.v3.AlertPolicy.Condition.Trigger")
+ proto.RegisterType((*AlertPolicy_Condition_MetricThreshold)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricThreshold")
+ proto.RegisterType((*AlertPolicy_Condition_MetricAbsence)(nil), "google.monitoring.v3.AlertPolicy.Condition.MetricAbsence")
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/alert.proto", fileDescriptor_014ef0e1a0f00a00) }
+
+var fileDescriptor_014ef0e1a0f00a00 = []byte{
+ // 941 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xeb, 0x6e, 0xe3, 0x44,
+ 0x14, 0xae, 0x93, 0xe6, 0x76, 0xd2, 0x36, 0xd9, 0xd9, 0xee, 0xae, 0x31, 0x68, 0x95, 0xae, 0x90,
+ 0x88, 0x40, 0x38, 0x22, 0x01, 0x71, 0x59, 0x81, 0x94, 0x5b, 0x37, 0x11, 0x24, 0xad, 0xa6, 0x69,
+ 0x91, 0x50, 0x25, 0xcb, 0x71, 0xa6, 0xae, 0x85, 0x3d, 0x63, 0x4d, 0xec, 0xa2, 0xbc, 0x0e, 0x3f,
+ 0x79, 0x14, 0x1e, 0x81, 0x7f, 0xbc, 0x02, 0xe2, 0x01, 0x90, 0xc7, 0x63, 0xc7, 0xe9, 0xa6, 0xbb,
+ 0x64, 0xf7, 0x5f, 0xce, 0x9c, 0xef, 0x7c, 0xe7, 0xf6, 0xcd, 0x38, 0xd0, 0xb0, 0x19, 0xb3, 0x5d,
+ 0xd2, 0xf2, 0x18, 0x75, 0x02, 0xc6, 0x1d, 0x6a, 0xb7, 0xee, 0x3a, 0x2d, 0xd3, 0x25, 0x3c, 0xd0,
+ 0x7d, 0xce, 0x02, 0x86, 0x8e, 0x63, 0x84, 0xbe, 0x46, 0xe8, 0x77, 0x1d, 0xed, 0x23, 0x19, 0x67,
+ 0xfa, 0x4e, 0xcb, 0xa4, 0x94, 0x05, 0x66, 0xe0, 0x30, 0xba, 0x8c, 0x63, 0xb4, 0x93, 0xad, 0xac,
+ 0x16, 0xf3, 0x3c, 0x46, 0x25, 0xe4, 0xd3, 0xad, 0x10, 0x2f, 0x8c, 0x89, 0x0c, 0x4e, 0x2c, 0xc6,
+ 0x17, 0x12, 0xfb, 0x5c, 0x62, 0x85, 0x35, 0x0f, 0x6f, 0x5a, 0x8b, 0x90, 0x0b, 0xd8, 0x43, 0xfe,
+ 0xdf, 0xb8, 0xe9, 0xfb, 0x84, 0xcb, 0x72, 0x5e, 0xfc, 0x5d, 0x83, 0x6a, 0x37, 0x6a, 0xe9, 0x9c,
+ 0xb9, 0x8e, 0xb5, 0x42, 0x08, 0xf6, 0xa9, 0xe9, 0x11, 0x55, 0x69, 0x28, 0xcd, 0x0a, 0x16, 0xbf,
+ 0xd1, 0x09, 0x1c, 0x2c, 0x9c, 0xa5, 0xef, 0x9a, 0x2b, 0x43, 0xf8, 0x72, 0xc2, 0x57, 0x95, 0x67,
+ 0xd3, 0x08, 0x72, 0x09, 0x87, 0x0b, 0x66, 0x85, 0x1e, 0xa1, 0x71, 0x91, 0xea, 0x61, 0x43, 0x69,
+ 0x56, 0xdb, 0x2d, 0x7d, 0xdb, 0x84, 0xf4, 0x4c, 0x42, 0x7d, 0x90, 0x0d, 0xc3, 0x9b, 0x2c, 0x08,
+ 0x43, 0x35, 0x5c, 0x12, 0x6e, 0xb8, 0xe6, 0x9c, 0xb8, 0x4b, 0xb5, 0xde, 0xc8, 0x37, 0xab, 0xed,
+ 0x2f, 0xde, 0x4e, 0x7a, 0xb9, 0x24, 0xfc, 0x27, 0x11, 0x33, 0xa4, 0x01, 0x5f, 0x61, 0x08, 0xd3,
+ 0x03, 0xf4, 0x23, 0x80, 0xc5, 0xe8, 0xc2, 0x11, 0x4b, 0x51, 0x0f, 0x04, 0xe5, 0x67, 0x6f, 0xa7,
+ 0xec, 0x27, 0x31, 0x38, 0x13, 0x8e, 0x2e, 0xa0, 0x6c, 0x31, 0x6f, 0xee, 0x50, 0xc2, 0xd5, 0x62,
+ 0x43, 0x69, 0x1e, 0xb5, 0xbf, 0xde, 0x81, 0xaa, 0x2f, 0x43, 0x67, 0x2b, 0x9f, 0xe0, 0x94, 0x08,
+ 0x7d, 0x09, 0x25, 0x42, 0xcd, 0xb9, 0x4b, 0x16, 0xea, 0x23, 0x31, 0x46, 0x2d, 0xe1, 0x4c, 0xb6,
+ 0xa8, 0xf7, 0x18, 0x73, 0xaf, 0x4c, 0x37, 0x24, 0x38, 0x81, 0xa2, 0x0e, 0x3c, 0xa1, 0x2c, 0x70,
+ 0x6e, 0x1c, 0x2b, 0x96, 0x89, 0x75, 0x6b, 0x52, 0x1a, 0x4d, 0xed, 0xa8, 0x91, 0x6f, 0x56, 0xf0,
+ 0x71, 0xd6, 0xd9, 0x97, 0x3e, 0x34, 0x81, 0x9a, 0xc5, 0x49, 0x56, 0x57, 0x2a, 0x88, 0x94, 0x1f,
+ 0x6f, 0x6f, 0x63, 0x22, 0x45, 0x88, 0x05, 0x16, 0x1f, 0x25, 0xc1, 0xb1, 0x1d, 0xd1, 0xdd, 0x93,
+ 0xa9, 0x5a, 0xdd, 0x85, 0xce, 0xdb, 0xb0, 0xb5, 0x53, 0x38, 0xdc, 0x90, 0x07, 0x52, 0xa1, 0x64,
+ 0x31, 0x1a, 0x10, 0x1a, 0x48, 0x81, 0x26, 0x26, 0xfa, 0x10, 0x2a, 0x9e, 0xe3, 0x11, 0x23, 0x58,
+ 0xf9, 0x89, 0x40, 0xcb, 0xd1, 0x41, 0x34, 0x5a, 0xed, 0xaf, 0x32, 0x54, 0xd2, 0xa1, 0xa7, 0x12,
+ 0x3f, 0x78, 0x83, 0xc4, 0x8b, 0xaf, 0x4b, 0x9c, 0xc2, 0xe3, 0x74, 0xf1, 0x46, 0x70, 0xcb, 0xc9,
+ 0xf2, 0x96, 0xb9, 0x0b, 0x51, 0x47, 0xb5, 0xfd, 0x72, 0x87, 0xad, 0xeb, 0x13, 0x12, 0x70, 0xc7,
+ 0x9a, 0x25, 0x14, 0xa3, 0x3d, 0x8c, 0x52, 0xe6, 0xf4, 0x14, 0xdd, 0x40, 0x7d, 0x9d, 0xcf, 0x9c,
+ 0x2f, 0xa3, 0xa6, 0x73, 0x22, 0xd9, 0xb7, 0xbb, 0x27, 0xeb, 0x46, 0xf1, 0x16, 0x19, 0xed, 0xe1,
+ 0x5a, 0x4a, 0x2a, 0xce, 0x02, 0x6d, 0x08, 0xa5, 0x19, 0x77, 0x6c, 0x9b, 0x70, 0xf4, 0x14, 0x0a,
+ 0x16, 0x0b, 0xe5, 0x70, 0x0b, 0xa3, 0x3d, 0x1c, 0x9b, 0x48, 0x83, 0x92, 0x4f, 0xb8, 0x95, 0x54,
+ 0xa0, 0x8c, 0xf6, 0x70, 0x72, 0xd0, 0x2b, 0xc2, 0x7e, 0x34, 0x73, 0xed, 0x9f, 0x3c, 0xd4, 0xee,
+ 0x35, 0x86, 0x9e, 0x42, 0xf1, 0xc6, 0x71, 0x03, 0xc2, 0xe5, 0x46, 0xa4, 0x85, 0x86, 0x70, 0x60,
+ 0xda, 0x36, 0x27, 0x76, 0xfc, 0x32, 0xaa, 0x65, 0x71, 0x09, 0x4f, 0x1e, 0x68, 0x6b, 0x8d, 0xc4,
+ 0x1b, 0x61, 0xe8, 0x73, 0x40, 0x0b, 0x42, 0x99, 0xe7, 0x50, 0x33, 0x60, 0xdc, 0x90, 0xa9, 0x2a,
+ 0x22, 0xd5, 0xa3, 0x8c, 0xe7, 0x34, 0xce, 0x7a, 0x0d, 0x6a, 0x16, 0xbe, 0x51, 0x01, 0xfc, 0xdf,
+ 0x0a, 0x9e, 0x65, 0x28, 0xba, 0xd9, 0x62, 0x06, 0xd1, 0xb3, 0xe2, 0xf9, 0x26, 0x77, 0x96, 0x8c,
+ 0xaa, 0xfb, 0xe2, 0x2d, 0x78, 0x40, 0xf5, 0xfd, 0x14, 0x27, 0x2e, 0x7e, 0x26, 0x0e, 0x7d, 0x02,
+ 0xb5, 0x54, 0x5a, 0xc6, 0x5d, 0x74, 0xc1, 0xd5, 0x42, 0x34, 0x71, 0x7c, 0x94, 0x1e, 0x8b, 0x6b,
+ 0x8f, 0xbe, 0x82, 0x72, 0xf2, 0xd2, 0x0b, 0xb1, 0x56, 0xdb, 0x1f, 0xbc, 0xf6, 0x48, 0x0c, 0x24,
+ 0x00, 0xa7, 0x50, 0x34, 0x81, 0x52, 0x10, 0x2f, 0x5b, 0x2d, 0x89, 0xa8, 0xce, 0x2e, 0x5a, 0x92,
+ 0x3a, 0xc1, 0x09, 0x87, 0xf6, 0xaf, 0x02, 0x87, 0x1b, 0x02, 0xcb, 0xac, 0x5c, 0x79, 0xe3, 0xca,
+ 0x0b, 0xef, 0xb6, 0xf2, 0x6c, 0xdb, 0xb9, 0x77, 0x6a, 0x3b, 0xff, 0xfe, 0x6d, 0xf7, 0xaa, 0x50,
+ 0x49, 0x6f, 0x91, 0xf6, 0x3d, 0xd4, 0xee, 0x7d, 0x6e, 0x50, 0x1d, 0xf2, 0xbf, 0x92, 0x95, 0x9c,
+ 0x40, 0xf4, 0x13, 0x1d, 0x43, 0x21, 0xde, 0x66, 0x7c, 0x11, 0x62, 0xe3, 0xbb, 0xdc, 0x37, 0xca,
+ 0x0b, 0x13, 0x9e, 0x6c, 0xfd, 0x1e, 0xa0, 0x67, 0xf0, 0xb8, 0x7f, 0x36, 0xe9, 0x8d, 0xa7, 0x43,
+ 0xe3, 0x72, 0x7a, 0x71, 0x3e, 0xec, 0x8f, 0x4f, 0xc7, 0xc3, 0x41, 0x7d, 0x0f, 0x95, 0x20, 0xdf,
+ 0x9d, 0x0e, 0xea, 0x0a, 0x2a, 0x42, 0xee, 0x0c, 0xd7, 0x73, 0xe8, 0x39, 0x68, 0xdd, 0xe9, 0xc0,
+ 0xf8, 0x79, 0x3c, 0x1b, 0x19, 0x93, 0xee, 0xac, 0x3f, 0x1a, 0x4f, 0x5f, 0x19, 0x78, 0x78, 0x71,
+ 0x76, 0x89, 0xfb, 0xc3, 0x7a, 0xbe, 0xf7, 0xbb, 0x02, 0xaa, 0xc5, 0xbc, 0xad, 0x2d, 0xf7, 0x20,
+ 0xee, 0x39, 0x1a, 0xde, 0xb9, 0xf2, 0xcb, 0x0f, 0x12, 0x63, 0x33, 0xd7, 0xa4, 0xb6, 0xce, 0xb8,
+ 0xdd, 0xb2, 0x09, 0x15, 0xa3, 0x6d, 0xc5, 0x2e, 0xd3, 0x77, 0x96, 0x9b, 0xff, 0x4c, 0x5e, 0xae,
+ 0xad, 0x3f, 0x72, 0xda, 0xab, 0x98, 0xa0, 0xef, 0xb2, 0x70, 0xa1, 0x4f, 0xd6, 0xa9, 0xae, 0x3a,
+ 0x7f, 0x26, 0xce, 0x6b, 0xe1, 0xbc, 0x5e, 0x3b, 0xaf, 0xaf, 0x3a, 0xf3, 0xa2, 0x48, 0xd2, 0xf9,
+ 0x2f, 0x00, 0x00, 0xff, 0xff, 0x66, 0xb5, 0x16, 0x64, 0x76, 0x09, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go
new file mode 100644
index 00000000000..70fcd5a3a6b
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go
@@ -0,0 +1,672 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/alert_service.proto
+
+package monitoring
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ empty "github.com/golang/protobuf/ptypes/empty"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ field_mask "google.golang.org/genproto/protobuf/field_mask"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The protocol for the `CreateAlertPolicy` request.
+type CreateAlertPolicyRequest struct {
+ // The project in which to create the alerting policy. The format is
+ // `projects/[PROJECT_ID]`.
+ //
+ // Note that this field names the parent container in which the alerting
+ // policy will be written, not the name of the created policy. The alerting
+ // policy that is returned will have a name that contains a normalized
+ // representation of this name as a prefix but adds a suffix of the form
+ // `/alertPolicies/[POLICY_ID]`, identifying the policy in the container.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // The requested alerting policy. You should omit the `name` field in this
+ // policy. The name will be returned in the new policy, including
+ // a new [ALERT_POLICY_ID] value.
+ AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateAlertPolicyRequest) Reset() { *m = CreateAlertPolicyRequest{} }
+func (m *CreateAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateAlertPolicyRequest) ProtoMessage() {}
+func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c45362b2a456d1bf, []int{0}
+}
+
+func (m *CreateAlertPolicyRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateAlertPolicyRequest.Unmarshal(m, b)
+}
+func (m *CreateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateAlertPolicyRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateAlertPolicyRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateAlertPolicyRequest.Merge(m, src)
+}
+func (m *CreateAlertPolicyRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateAlertPolicyRequest.Size(m)
+}
+func (m *CreateAlertPolicyRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateAlertPolicyRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateAlertPolicyRequest proto.InternalMessageInfo
+
+func (m *CreateAlertPolicyRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
+ if m != nil {
+ return m.AlertPolicy
+ }
+ return nil
+}
+
+// The protocol for the `GetAlertPolicy` request.
+type GetAlertPolicyRequest struct {
+ // The alerting policy to retrieve. The format is
+ //
+ // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetAlertPolicyRequest) Reset() { *m = GetAlertPolicyRequest{} }
+func (m *GetAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAlertPolicyRequest) ProtoMessage() {}
+func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c45362b2a456d1bf, []int{1}
+}
+
+func (m *GetAlertPolicyRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetAlertPolicyRequest.Unmarshal(m, b)
+}
+func (m *GetAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetAlertPolicyRequest.Marshal(b, m, deterministic)
+}
+func (m *GetAlertPolicyRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetAlertPolicyRequest.Merge(m, src)
+}
+func (m *GetAlertPolicyRequest) XXX_Size() int {
+ return xxx_messageInfo_GetAlertPolicyRequest.Size(m)
+}
+func (m *GetAlertPolicyRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetAlertPolicyRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetAlertPolicyRequest proto.InternalMessageInfo
+
+func (m *GetAlertPolicyRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The protocol for the `ListAlertPolicies` request.
+type ListAlertPoliciesRequest struct {
+ // The project whose alert policies are to be listed. The format is
+ //
+ // projects/[PROJECT_ID]
+ //
+ // Note that this field names the parent container in which the alerting
+ // policies to be listed are stored. To retrieve a single alerting policy
+ // by name, use the
+ // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy]
+ // operation, instead.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // If provided, this field specifies the criteria that must be met by
+ // alert policies to be included in the response.
+ //
+ // For more details, see [sorting and
+ // filtering](/monitoring/api/v3/sorting-and-filtering).
+ Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A comma-separated list of fields by which to sort the result. Supports
+ // the same set of field references as the `filter` field. Entries can be
+ // prefixed with a minus sign to sort by the field in descending order.
+ //
+ // For more details, see [sorting and
+ // filtering](/monitoring/api/v3/sorting-and-filtering).
+ OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+ // The maximum number of results to return in a single response.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return more results from the previous method call.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListAlertPoliciesRequest) Reset() { *m = ListAlertPoliciesRequest{} }
+func (m *ListAlertPoliciesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListAlertPoliciesRequest) ProtoMessage() {}
+func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c45362b2a456d1bf, []int{2}
+}
+
+func (m *ListAlertPoliciesRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListAlertPoliciesRequest.Unmarshal(m, b)
+}
+func (m *ListAlertPoliciesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListAlertPoliciesRequest.Marshal(b, m, deterministic)
+}
+func (m *ListAlertPoliciesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListAlertPoliciesRequest.Merge(m, src)
+}
+func (m *ListAlertPoliciesRequest) XXX_Size() int {
+ return xxx_messageInfo_ListAlertPoliciesRequest.Size(m)
+}
+func (m *ListAlertPoliciesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListAlertPoliciesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListAlertPoliciesRequest proto.InternalMessageInfo
+
+func (m *ListAlertPoliciesRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListAlertPoliciesRequest) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *ListAlertPoliciesRequest) GetOrderBy() string {
+ if m != nil {
+ return m.OrderBy
+ }
+ return ""
+}
+
+func (m *ListAlertPoliciesRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListAlertPoliciesRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListAlertPolicies` response.
+type ListAlertPoliciesResponse struct {
+ // The returned alert policies.
+ AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"`
+ // If there might be more results than were returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListAlertPoliciesResponse) Reset() { *m = ListAlertPoliciesResponse{} }
+func (m *ListAlertPoliciesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListAlertPoliciesResponse) ProtoMessage() {}
+func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c45362b2a456d1bf, []int{3}
+}
+
+func (m *ListAlertPoliciesResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListAlertPoliciesResponse.Unmarshal(m, b)
+}
+func (m *ListAlertPoliciesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListAlertPoliciesResponse.Marshal(b, m, deterministic)
+}
+func (m *ListAlertPoliciesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListAlertPoliciesResponse.Merge(m, src)
+}
+func (m *ListAlertPoliciesResponse) XXX_Size() int {
+ return xxx_messageInfo_ListAlertPoliciesResponse.Size(m)
+}
+func (m *ListAlertPoliciesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListAlertPoliciesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListAlertPoliciesResponse proto.InternalMessageInfo
+
+func (m *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy {
+ if m != nil {
+ return m.AlertPolicies
+ }
+ return nil
+}
+
+func (m *ListAlertPoliciesResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+// The protocol for the `UpdateAlertPolicy` request.
+type UpdateAlertPolicyRequest struct {
+ // Optional. A list of alerting policy field names. If this field is not
+ // empty, each listed field in the existing alerting policy is set to the
+ // value of the corresponding field in the supplied policy (`alert_policy`),
+ // or to the field's default value if the field is not in the supplied
+ // alerting policy. Fields not listed retain their previous value.
+ //
+ // Examples of valid field masks include `display_name`, `documentation`,
+ // `documentation.content`, `documentation.mime_type`, `user_labels`,
+ // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc.
+ //
+ // If this field is empty, then the supplied alerting policy replaces the
+ // existing policy. It is the same as deleting the existing policy and
+ // adding the supplied policy, except for the following:
+ //
+ // + The new policy will have the same `[ALERT_POLICY_ID]` as the former
+ // policy. This gives you continuity with the former policy in your
+ // notifications and incidents.
+ // + Conditions in the new policy will keep their former `[CONDITION_ID]` if
+ // the supplied condition includes the `name` field with that
+ // `[CONDITION_ID]`. If the supplied condition omits the `name` field,
+ // then a new `[CONDITION_ID]` is created.
+ UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // Required. The updated alerting policy or the updated values for the
+ // fields listed in `update_mask`.
+ // If `update_mask` is not empty, any fields in this policy that are
+ // not in `update_mask` are ignored.
+ AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdateAlertPolicyRequest) Reset() { *m = UpdateAlertPolicyRequest{} }
+func (m *UpdateAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateAlertPolicyRequest) ProtoMessage() {}
+func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c45362b2a456d1bf, []int{4}
+}
+
+func (m *UpdateAlertPolicyRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdateAlertPolicyRequest.Unmarshal(m, b)
+}
+func (m *UpdateAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdateAlertPolicyRequest.Marshal(b, m, deterministic)
+}
+func (m *UpdateAlertPolicyRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateAlertPolicyRequest.Merge(m, src)
+}
+func (m *UpdateAlertPolicyRequest) XXX_Size() int {
+ return xxx_messageInfo_UpdateAlertPolicyRequest.Size(m)
+}
+func (m *UpdateAlertPolicyRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateAlertPolicyRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateAlertPolicyRequest proto.InternalMessageInfo
+
+func (m *UpdateAlertPolicyRequest) GetUpdateMask() *field_mask.FieldMask {
+ if m != nil {
+ return m.UpdateMask
+ }
+ return nil
+}
+
+func (m *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
+ if m != nil {
+ return m.AlertPolicy
+ }
+ return nil
+}
+
+// The protocol for the `DeleteAlertPolicy` request.
+type DeleteAlertPolicyRequest struct {
+ // The alerting policy to delete. The format is:
+ //
+ // projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
+ //
+ // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy].
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteAlertPolicyRequest) Reset() { *m = DeleteAlertPolicyRequest{} }
+func (m *DeleteAlertPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteAlertPolicyRequest) ProtoMessage() {}
+func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c45362b2a456d1bf, []int{5}
+}
+
+func (m *DeleteAlertPolicyRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteAlertPolicyRequest.Unmarshal(m, b)
+}
+func (m *DeleteAlertPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteAlertPolicyRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteAlertPolicyRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteAlertPolicyRequest.Merge(m, src)
+}
+func (m *DeleteAlertPolicyRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteAlertPolicyRequest.Size(m)
+}
+func (m *DeleteAlertPolicyRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteAlertPolicyRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteAlertPolicyRequest proto.InternalMessageInfo
+
+func (m *DeleteAlertPolicyRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*CreateAlertPolicyRequest)(nil), "google.monitoring.v3.CreateAlertPolicyRequest")
+ proto.RegisterType((*GetAlertPolicyRequest)(nil), "google.monitoring.v3.GetAlertPolicyRequest")
+ proto.RegisterType((*ListAlertPoliciesRequest)(nil), "google.monitoring.v3.ListAlertPoliciesRequest")
+ proto.RegisterType((*ListAlertPoliciesResponse)(nil), "google.monitoring.v3.ListAlertPoliciesResponse")
+ proto.RegisterType((*UpdateAlertPolicyRequest)(nil), "google.monitoring.v3.UpdateAlertPolicyRequest")
+ proto.RegisterType((*DeleteAlertPolicyRequest)(nil), "google.monitoring.v3.DeleteAlertPolicyRequest")
+}
+
+func init() {
+ proto.RegisterFile("google/monitoring/v3/alert_service.proto", fileDescriptor_c45362b2a456d1bf)
+}
+
+var fileDescriptor_c45362b2a456d1bf = []byte{
+ // 656 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x6f, 0xd3, 0x4c,
+ 0x10, 0x95, 0x93, 0x36, 0x5f, 0xbb, 0xfd, 0x5a, 0x94, 0x15, 0x54, 0xae, 0x0b, 0x52, 0x30, 0x2a,
+ 0x54, 0xad, 0xb0, 0xa5, 0xf8, 0x04, 0x15, 0x48, 0xa4, 0x85, 0xf6, 0x40, 0xa5, 0x28, 0x85, 0x1e,
+ 0x50, 0xa4, 0x68, 0x93, 0x4c, 0xac, 0x25, 0x8e, 0xd7, 0x78, 0x37, 0x11, 0x29, 0xea, 0x85, 0x23,
+ 0x12, 0xe2, 0xc0, 0x99, 0x03, 0x47, 0x38, 0x20, 0x7e, 0x07, 0x57, 0xfe, 0x02, 0x3f, 0x04, 0x79,
+ 0xed, 0x34, 0x76, 0x6d, 0xab, 0x16, 0xb7, 0xcc, 0xce, 0xdb, 0x99, 0xb7, 0x6f, 0xde, 0x38, 0x68,
+ 0xdb, 0x66, 0xcc, 0x76, 0xc0, 0x1c, 0x31, 0x97, 0x0a, 0xe6, 0x53, 0xd7, 0x36, 0x27, 0x96, 0x49,
+ 0x1c, 0xf0, 0x45, 0x87, 0x83, 0x3f, 0xa1, 0x3d, 0x30, 0x3c, 0x9f, 0x09, 0x86, 0xaf, 0x87, 0x48,
+ 0x63, 0x8e, 0x34, 0x26, 0x96, 0x76, 0x33, 0xba, 0x4f, 0x3c, 0x6a, 0x12, 0xd7, 0x65, 0x82, 0x08,
+ 0xca, 0x5c, 0x1e, 0xde, 0xd1, 0x6a, 0xf9, 0xd5, 0x23, 0xc4, 0x66, 0x84, 0x90, 0x51, 0x77, 0x3c,
+ 0x30, 0x61, 0xe4, 0x89, 0xe9, 0xa5, 0xeb, 0x17, 0xc9, 0x01, 0x05, 0xa7, 0xdf, 0x19, 0x11, 0x3e,
+ 0x0c, 0x11, 0xba, 0x40, 0xea, 0xbe, 0x0f, 0x44, 0xc0, 0x93, 0xa0, 0x66, 0x93, 0x39, 0xb4, 0x37,
+ 0x6d, 0xc1, 0x9b, 0x31, 0x70, 0x81, 0x31, 0x5a, 0x70, 0xc9, 0x08, 0xd4, 0x72, 0x4d, 0xd9, 0x5e,
+ 0x6e, 0xc9, 0xdf, 0xf8, 0x00, 0xfd, 0x1f, 0xbe, 0xcd, 0x93, 0x50, 0xb5, 0x54, 0x53, 0xb6, 0x57,
+ 0xea, 0xb7, 0x8d, 0xac, 0xb7, 0x19, 0xf1, 0x9a, 0x2b, 0x64, 0x1e, 0xe8, 0xbb, 0xe8, 0xc6, 0x21,
+ 0x88, 0x62, 0x2d, 0xf5, 0x2f, 0x0a, 0x52, 0x9f, 0x53, 0x1e, 0x83, 0x53, 0xe0, 0x97, 0x2f, 0x2c,
+ 0xc4, 0x38, 0xae, 0xa3, 0xca, 0x80, 0x3a, 0x02, 0x7c, 0x75, 0x51, 0x9e, 0x46, 0x11, 0xde, 0x40,
+ 0x4b, 0xcc, 0xef, 0x83, 0xdf, 0xe9, 0x4e, 0xd5, 0x8a, 0xcc, 0xfc, 0x27, 0xe3, 0xc6, 0x14, 0x6f,
+ 0xa2, 0x65, 0x8f, 0xd8, 0xd0, 0xe1, 0xf4, 0x0c, 0xe4, 0x9b, 0x16, 0x5b, 0x4b, 0xc1, 0xc1, 0x09,
+ 0x3d, 0x03, 0x7c, 0x0b, 0x21, 0x99, 0x14, 0x6c, 0x08, 0x6e, 0x44, 0x4d, 0xc2, 0x5f, 0x04, 0x07,
+ 0xfa, 0x47, 0x05, 0x6d, 0x64, 0xf0, 0xe3, 0x1e, 0x73, 0x39, 0xe0, 0x23, 0xb4, 0x16, 0x13, 0x8c,
+ 0x02, 0x57, 0xcb, 0xb5, 0x72, 0x31, 0xc9, 0x56, 0x49, 0xbc, 0x22, 0xbe, 0x8b, 0xae, 0xb9, 0xf0,
+ 0x56, 0x74, 0x62, 0x5c, 0x4a, 0x92, 0xcb, 0x6a, 0x70, 0xdc, 0xbc, 0xe0, 0x13, 0xe8, 0xf5, 0xd2,
+ 0xeb, 0x67, 0xcf, 0x74, 0x0f, 0xad, 0x8c, 0x65, 0x4e, 0x9a, 0x20, 0x1a, 0x9f, 0x36, 0xe3, 0x32,
+ 0xf3, 0x89, 0xf1, 0x2c, 0xf0, 0xc9, 0x31, 0xe1, 0xc3, 0x16, 0x0a, 0xe1, 0xc1, 0xef, 0xd4, 0xf0,
+ 0xcb, 0xff, 0x34, 0x7c, 0x03, 0xa9, 0x07, 0xe0, 0x40, 0x51, 0xcb, 0xd5, 0x7f, 0x54, 0x10, 0x8e,
+ 0x41, 0x4f, 0xc2, 0xa5, 0xc2, 0x5f, 0x15, 0x54, 0x4d, 0xc9, 0x8e, 0x8d, 0x6c, 0x32, 0x79, 0xfe,
+ 0xd1, 0xcc, 0xc2, 0xf8, 0x70, 0x9e, 0xfa, 0xee, 0xfb, 0xdf, 0x7f, 0x3e, 0x97, 0xb6, 0xf0, 0x9d,
+ 0x60, 0x11, 0xdf, 0x05, 0x04, 0x1f, 0x79, 0x3e, 0x7b, 0x0d, 0x3d, 0xc1, 0xcd, 0x9d, 0x73, 0x33,
+ 0x39, 0xb2, 0x4f, 0x0a, 0x5a, 0x4b, 0x1a, 0x1d, 0xef, 0x66, 0x37, 0xcc, 0x5c, 0x07, 0xed, 0x6a,
+ 0x69, 0xf5, 0xfb, 0x92, 0xcf, 0x3d, 0xbc, 0x95, 0xc5, 0x27, 0x49, 0xc7, 0xdc, 0x39, 0x97, 0xaa,
+ 0xa5, 0x16, 0x3e, 0x4f, 0xb5, 0xbc, 0x2f, 0x43, 0x11, 0x5e, 0x0f, 0x24, 0x2f, 0x4b, 0x2f, 0xa2,
+ 0xd3, 0xc3, 0x84, 0xad, 0xf0, 0x07, 0x05, 0x55, 0x53, 0x0e, 0xc9, 0xe3, 0x98, 0x67, 0x25, 0x6d,
+ 0x3d, 0x65, 0xea, 0xa7, 0xc1, 0x97, 0x71, 0x26, 0xd8, 0x4e, 0x41, 0xc1, 0x7e, 0x2a, 0xa8, 0x9a,
+ 0xda, 0xa6, 0x3c, 0x32, 0x79, 0x6b, 0x57, 0x44, 0xb0, 0x23, 0xc9, 0xab, 0x51, 0xaf, 0x4b, 0x5e,
+ 0x71, 0x41, 0x8c, 0xab, 0x48, 0x26, 0xf5, 0x6b, 0x7c, 0x53, 0x90, 0xda, 0x63, 0xa3, 0xcc, 0x96,
+ 0x8d, 0xaa, 0xec, 0x19, 0x2d, 0x51, 0x33, 0x90, 0xa6, 0xa9, 0xbc, 0x7a, 0x1c, 0x41, 0x6d, 0xe6,
+ 0x10, 0xd7, 0x36, 0x98, 0x6f, 0x9b, 0x36, 0xb8, 0x52, 0x38, 0x33, 0x4c, 0x11, 0x8f, 0xf2, 0xe4,
+ 0xbf, 0xd0, 0xde, 0x3c, 0xfa, 0x5e, 0xd2, 0x0e, 0xc3, 0x02, 0xfb, 0x0e, 0x1b, 0xf7, 0x8d, 0xe3,
+ 0x79, 0xc7, 0x53, 0xeb, 0xd7, 0x2c, 0xd9, 0x96, 0xc9, 0xf6, 0x3c, 0xd9, 0x3e, 0xb5, 0xba, 0x15,
+ 0xd9, 0xc4, 0xfa, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x1f, 0xe6, 0xf0, 0x47, 0x07, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// AlertPolicyServiceClient is the client API for AlertPolicyService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AlertPolicyServiceClient interface {
+ // Lists the existing alerting policies for the project.
+ ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error)
+ // Gets a single alerting policy.
+ GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+ // Creates a new alerting policy.
+ CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+ // Deletes an alerting policy.
+ DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ // Updates an alerting policy. You can either replace the entire policy with
+ // a new one or replace only certain fields in the current alerting policy by
+ // specifying the fields to be updated via `updateMask`. Returns the
+ // updated alerting policy.
+ UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+}
+
+type alertPolicyServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewAlertPolicyServiceClient(cc *grpc.ClientConn) AlertPolicyServiceClient {
+ return &alertPolicyServiceClient{cc}
+}
+
+func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) {
+ out := new(ListAlertPoliciesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// AlertPolicyServiceServer is the server API for AlertPolicyService service.
+type AlertPolicyServiceServer interface {
+ // Lists the existing alerting policies for the project.
+ ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error)
+ // Gets a single alerting policy.
+ GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error)
+ // Creates a new alerting policy.
+ CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error)
+ // Deletes an alerting policy.
+ DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*empty.Empty, error)
+ // Updates an alerting policy. You can either replace the entire policy with
+ // a new one or replace only certain fields in the current alerting policy by
+ // specifying the fields to be updated via `updateMask`. Returns the
+ // updated alerting policy.
+ UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error)
+}
+
+func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) {
+ s.RegisterService(&_AlertPolicyService_serviceDesc, srv)
+}
+
+func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListAlertPoliciesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.AlertPolicyService",
+ HandlerType: (*AlertPolicyServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListAlertPolicies",
+ Handler: _AlertPolicyService_ListAlertPolicies_Handler,
+ },
+ {
+ MethodName: "GetAlertPolicy",
+ Handler: _AlertPolicyService_GetAlertPolicy_Handler,
+ },
+ {
+ MethodName: "CreateAlertPolicy",
+ Handler: _AlertPolicyService_CreateAlertPolicy_Handler,
+ },
+ {
+ MethodName: "DeleteAlertPolicy",
+ Handler: _AlertPolicyService_DeleteAlertPolicy_Handler,
+ },
+ {
+ MethodName: "UpdateAlertPolicy",
+ Handler: _AlertPolicyService_UpdateAlertPolicy_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/alert_service.proto",
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go
new file mode 100644
index 00000000000..4e48c0e099e
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go
@@ -0,0 +1,898 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/common.proto
+
+package monitoring
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ distribution "google.golang.org/genproto/googleapis/api/distribution"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Specifies an ordering relationship on two arguments, here called left and
+// right.
+type ComparisonType int32
+
+const (
+ // No ordering relationship is specified.
+ ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0
+ // The left argument is greater than the right argument.
+ ComparisonType_COMPARISON_GT ComparisonType = 1
+ // The left argument is greater than or equal to the right argument.
+ ComparisonType_COMPARISON_GE ComparisonType = 2
+ // The left argument is less than the right argument.
+ ComparisonType_COMPARISON_LT ComparisonType = 3
+ // The left argument is less than or equal to the right argument.
+ ComparisonType_COMPARISON_LE ComparisonType = 4
+ // The left argument is equal to the right argument.
+ ComparisonType_COMPARISON_EQ ComparisonType = 5
+ // The left argument is not equal to the right argument.
+ ComparisonType_COMPARISON_NE ComparisonType = 6
+)
+
+var ComparisonType_name = map[int32]string{
+ 0: "COMPARISON_UNSPECIFIED",
+ 1: "COMPARISON_GT",
+ 2: "COMPARISON_GE",
+ 3: "COMPARISON_LT",
+ 4: "COMPARISON_LE",
+ 5: "COMPARISON_EQ",
+ 6: "COMPARISON_NE",
+}
+
+var ComparisonType_value = map[string]int32{
+ "COMPARISON_UNSPECIFIED": 0,
+ "COMPARISON_GT": 1,
+ "COMPARISON_GE": 2,
+ "COMPARISON_LT": 3,
+ "COMPARISON_LE": 4,
+ "COMPARISON_EQ": 5,
+ "COMPARISON_NE": 6,
+}
+
+func (x ComparisonType) String() string {
+ return proto.EnumName(ComparisonType_name, int32(x))
+}
+
+func (ComparisonType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_013c57c1dcbb8d65, []int{0}
+}
+
+// The tier of service for a Stackdriver account. Please see the
+// [service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers)
+// for more details.
+type ServiceTier int32 // Deprecated: Do not use.
+const (
+ // An invalid sentinel value, used to indicate that a tier has not
+ // been provided explicitly.
+ ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0
+ // The Stackdriver Basic tier, a free tier of service that provides basic
+ // features, a moderate allotment of logs, and access to built-in metrics.
+ // A number of features are not available in this tier. For more details,
+ // see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers).
+ ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1
+ // The Stackdriver Premium tier, a higher, more expensive tier of service
+ // that provides access to all Stackdriver features, lets you use Stackdriver
+ // with AWS accounts, and has a larger allotments for logs and metrics. For
+ // more details, see [the service tiers documentation](https://cloud.google.com/monitoring/accounts/tiers).
+ ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2
+)
+
+var ServiceTier_name = map[int32]string{
+ 0: "SERVICE_TIER_UNSPECIFIED",
+ 1: "SERVICE_TIER_BASIC",
+ 2: "SERVICE_TIER_PREMIUM",
+}
+
+var ServiceTier_value = map[string]int32{
+ "SERVICE_TIER_UNSPECIFIED": 0,
+ "SERVICE_TIER_BASIC": 1,
+ "SERVICE_TIER_PREMIUM": 2,
+}
+
+func (x ServiceTier) String() string {
+ return proto.EnumName(ServiceTier_name, int32(x))
+}
+
+func (ServiceTier) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_013c57c1dcbb8d65, []int{1}
+}
+
+// The Aligner describes how to bring the data points in a single
+// time series into temporal alignment.
+type Aggregation_Aligner int32
+
+const (
+ // No alignment. Raw data is returned. Not valid if cross-time
+ // series reduction is requested. The value type of the result is
+ // the same as the value type of the input.
+ Aggregation_ALIGN_NONE Aggregation_Aligner = 0
+ // Align and convert to delta metric type. This alignment is valid
+ // for cumulative metrics and delta metrics. Aligning an existing
+ // delta metric to a delta metric requires that the alignment
+ // period be increased. The value type of the result is the same
+ // as the value type of the input.
+ //
+ // One can think of this aligner as a rate but without time units; that
+ // is, the output is conceptually (second_point - first_point).
+ Aggregation_ALIGN_DELTA Aggregation_Aligner = 1
+ // Align and convert to a rate. This alignment is valid for
+ // cumulative metrics and delta metrics with numeric values. The output is a
+ // gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ //
+ // One can think of this aligner as conceptually providing the slope of
+ // the line that passes through the value at the start and end of the
+ // window. In other words, this is conceptually ((y1 - y0)/(t1 - t0)),
+ // and the output unit is one that has a "/time" dimension.
+ //
+ // If, by rate, you are looking for percentage change, see the
+ // `ALIGN_PERCENT_CHANGE` aligner option.
+ Aggregation_ALIGN_RATE Aggregation_Aligner = 2
+ // Align by interpolating between adjacent points around the
+ // period boundary. This alignment is valid for gauge
+ // metrics with numeric values. The value type of the result is the same
+ // as the value type of the input.
+ Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3
+ // Align by shifting the oldest data point before the period
+ // boundary to the boundary. This alignment is valid for gauge
+ // metrics. The value type of the result is the same as the
+ // value type of the input.
+ Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the minimum of all data points in the
+ // period. This alignment is valid for gauge and delta metrics with numeric
+ // values. The value type of the result is the same as the value
+ // type of the input.
+ Aggregation_ALIGN_MIN Aggregation_Aligner = 10
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the maximum of all data points in the
+ // period. This alignment is valid for gauge and delta metrics with numeric
+ // values. The value type of the result is the same as the value
+ // type of the input.
+ Aggregation_ALIGN_MAX Aggregation_Aligner = 11
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the average or arithmetic mean of all
+ // data points in the period. This alignment is valid for gauge and delta
+ // metrics with numeric values. The value type of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_ALIGN_MEAN Aggregation_Aligner = 12
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the count of all data points in the
+ // period. This alignment is valid for gauge and delta metrics with numeric
+ // or Boolean values. The value type of the output is
+ // [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ Aggregation_ALIGN_COUNT Aggregation_Aligner = 13
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the sum of all data points in the
+ // period. This alignment is valid for gauge and delta metrics with numeric
+ // and distribution values. The value type of the output is the
+ // same as the value type of the input.
+ Aggregation_ALIGN_SUM Aggregation_Aligner = 14
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the standard deviation of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with numeric values. The value type of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the count of True-valued data points in the
+ // period. This alignment is valid for gauge metrics with
+ // Boolean values. The value type of the output is
+ // [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the count of False-valued data points in the
+ // period. This alignment is valid for gauge metrics with
+ // Boolean values. The value type of the output is
+ // [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the fraction of True-valued data points in the
+ // period. This alignment is valid for gauge metrics with Boolean values.
+ // The output value is in the range [0, 1] and has value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the 99th percentile of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with distribution values. The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the 95th percentile of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with distribution values. The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the 50th percentile of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with distribution values. The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20
+ // Align time series via aggregation. The resulting data point in
+ // the alignment period is the 5th percentile of all data
+ // points in the period. This alignment is valid for gauge and delta metrics
+ // with distribution values. The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21
+ // Align and convert to a percentage change. This alignment is valid for
+ // gauge and delta metrics with numeric values. This alignment conceptually
+ // computes the equivalent of "((current - previous)/previous)*100"
+ // where previous value is determined based on the alignmentPeriod.
+ // In the event that previous is 0 the calculated value is infinity with the
+ // exception that if both (current - previous) and previous are 0 the
+ // calculated value is 0.
+ // A 10 minute moving mean is computed at each point of the time window
+ // prior to the above calculation to smooth the metric and prevent false
+ // positives from very short lived spikes.
+ // Only applicable for data that is >= 0. Any values < 0 are treated as
+ // no data. While delta metrics are accepted by this alignment special care
+ // should be taken that the values for the metric will always be positive.
+ // The output is a gauge metric with value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23
+)
+
+var Aggregation_Aligner_name = map[int32]string{
+ 0: "ALIGN_NONE",
+ 1: "ALIGN_DELTA",
+ 2: "ALIGN_RATE",
+ 3: "ALIGN_INTERPOLATE",
+ 4: "ALIGN_NEXT_OLDER",
+ 10: "ALIGN_MIN",
+ 11: "ALIGN_MAX",
+ 12: "ALIGN_MEAN",
+ 13: "ALIGN_COUNT",
+ 14: "ALIGN_SUM",
+ 15: "ALIGN_STDDEV",
+ 16: "ALIGN_COUNT_TRUE",
+ 24: "ALIGN_COUNT_FALSE",
+ 17: "ALIGN_FRACTION_TRUE",
+ 18: "ALIGN_PERCENTILE_99",
+ 19: "ALIGN_PERCENTILE_95",
+ 20: "ALIGN_PERCENTILE_50",
+ 21: "ALIGN_PERCENTILE_05",
+ 23: "ALIGN_PERCENT_CHANGE",
+}
+
+var Aggregation_Aligner_value = map[string]int32{
+ "ALIGN_NONE": 0,
+ "ALIGN_DELTA": 1,
+ "ALIGN_RATE": 2,
+ "ALIGN_INTERPOLATE": 3,
+ "ALIGN_NEXT_OLDER": 4,
+ "ALIGN_MIN": 10,
+ "ALIGN_MAX": 11,
+ "ALIGN_MEAN": 12,
+ "ALIGN_COUNT": 13,
+ "ALIGN_SUM": 14,
+ "ALIGN_STDDEV": 15,
+ "ALIGN_COUNT_TRUE": 16,
+ "ALIGN_COUNT_FALSE": 24,
+ "ALIGN_FRACTION_TRUE": 17,
+ "ALIGN_PERCENTILE_99": 18,
+ "ALIGN_PERCENTILE_95": 19,
+ "ALIGN_PERCENTILE_50": 20,
+ "ALIGN_PERCENTILE_05": 21,
+ "ALIGN_PERCENT_CHANGE": 23,
+}
+
+func (x Aggregation_Aligner) String() string {
+ return proto.EnumName(Aggregation_Aligner_name, int32(x))
+}
+
+func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_013c57c1dcbb8d65, []int{2, 0}
+}
+
+// A Reducer describes how to aggregate data points from multiple
+// time series into a single time series.
+type Aggregation_Reducer int32
+
+const (
+ // No cross-time series reduction. The output of the aligner is
+ // returned.
+ Aggregation_REDUCE_NONE Aggregation_Reducer = 0
+ // Reduce by computing the mean across time series for each
+ // alignment period. This reducer is valid for delta and
+ // gauge metrics with numeric or distribution values. The value type of the
+ // output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_REDUCE_MEAN Aggregation_Reducer = 1
+ // Reduce by computing the minimum across time series for each
+ // alignment period. This reducer is valid for delta and
+ // gauge metrics with numeric values. The value type of the output
+ // is the same as the value type of the input.
+ Aggregation_REDUCE_MIN Aggregation_Reducer = 2
+ // Reduce by computing the maximum across time series for each
+ // alignment period. This reducer is valid for delta and
+ // gauge metrics with numeric values. The value type of the output
+ // is the same as the value type of the input.
+ Aggregation_REDUCE_MAX Aggregation_Reducer = 3
+ // Reduce by computing the sum across time series for each
+ // alignment period. This reducer is valid for delta and
+ // gauge metrics with numeric and distribution values. The value type of
+ // the output is the same as the value type of the input.
+ Aggregation_REDUCE_SUM Aggregation_Reducer = 4
+ // Reduce by computing the standard deviation across time series
+ // for each alignment period. This reducer is valid for delta
+ // and gauge metrics with numeric or distribution values. The value type of
+ // the output is [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5
+ // Reduce by computing the count of data points across time series
+ // for each alignment period. This reducer is valid for delta
+ // and gauge metrics of numeric, Boolean, distribution, and string value
+ // type. The value type of the output is
+ // [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ Aggregation_REDUCE_COUNT Aggregation_Reducer = 6
+ // Reduce by computing the count of True-valued data points across time
+ // series for each alignment period. This reducer is valid for delta
+ // and gauge metrics of Boolean value type. The value type of
+ // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7
+ // Reduce by computing the count of False-valued data points across time
+ // series for each alignment period. This reducer is valid for delta
+ // and gauge metrics of Boolean value type. The value type of
+ // the output is [INT64][google.api.MetricDescriptor.ValueType.INT64].
+ Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15
+ // Reduce by computing the fraction of True-valued data points across time
+ // series for each alignment period. This reducer is valid for delta
+ // and gauge metrics of Boolean value type. The output value is in the
+ // range [0, 1] and has value type
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8
+ // Reduce by computing 99th percentile of data points across time series
+ // for each alignment period. This reducer is valid for gauge and delta
+ // metrics of numeric and distribution type. The value of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
+ Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9
+ // Reduce by computing 95th percentile of data points across time series
+ // for each alignment period. This reducer is valid for gauge and delta
+ // metrics of numeric and distribution type. The value of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
+ Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10
+ // Reduce by computing 50th percentile of data points across time series
+ // for each alignment period. This reducer is valid for gauge and delta
+ // metrics of numeric and distribution type. The value of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
+ Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11
+ // Reduce by computing 5th percentile of data points across time series
+ // for each alignment period. This reducer is valid for gauge and delta
+ // metrics of numeric and distribution type. The value of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE]
+ Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12
+)
+
+var Aggregation_Reducer_name = map[int32]string{
+ 0: "REDUCE_NONE",
+ 1: "REDUCE_MEAN",
+ 2: "REDUCE_MIN",
+ 3: "REDUCE_MAX",
+ 4: "REDUCE_SUM",
+ 5: "REDUCE_STDDEV",
+ 6: "REDUCE_COUNT",
+ 7: "REDUCE_COUNT_TRUE",
+ 15: "REDUCE_COUNT_FALSE",
+ 8: "REDUCE_FRACTION_TRUE",
+ 9: "REDUCE_PERCENTILE_99",
+ 10: "REDUCE_PERCENTILE_95",
+ 11: "REDUCE_PERCENTILE_50",
+ 12: "REDUCE_PERCENTILE_05",
+}
+
+var Aggregation_Reducer_value = map[string]int32{
+ "REDUCE_NONE": 0,
+ "REDUCE_MEAN": 1,
+ "REDUCE_MIN": 2,
+ "REDUCE_MAX": 3,
+ "REDUCE_SUM": 4,
+ "REDUCE_STDDEV": 5,
+ "REDUCE_COUNT": 6,
+ "REDUCE_COUNT_TRUE": 7,
+ "REDUCE_COUNT_FALSE": 15,
+ "REDUCE_FRACTION_TRUE": 8,
+ "REDUCE_PERCENTILE_99": 9,
+ "REDUCE_PERCENTILE_95": 10,
+ "REDUCE_PERCENTILE_50": 11,
+ "REDUCE_PERCENTILE_05": 12,
+}
+
+func (x Aggregation_Reducer) String() string {
+ return proto.EnumName(Aggregation_Reducer_name, int32(x))
+}
+
+func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_013c57c1dcbb8d65, []int{2, 1}
+}
+
+// A single strongly-typed value.
+type TypedValue struct {
+ // The typed value field.
+ //
+ // Types that are valid to be assigned to Value:
+ // *TypedValue_BoolValue
+ // *TypedValue_Int64Value
+ // *TypedValue_DoubleValue
+ // *TypedValue_StringValue
+ // *TypedValue_DistributionValue
+ Value isTypedValue_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TypedValue) Reset() { *m = TypedValue{} }
+func (m *TypedValue) String() string { return proto.CompactTextString(m) }
+func (*TypedValue) ProtoMessage() {}
+func (*TypedValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_013c57c1dcbb8d65, []int{0}
+}
+
+func (m *TypedValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TypedValue.Unmarshal(m, b)
+}
+func (m *TypedValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TypedValue.Marshal(b, m, deterministic)
+}
+func (m *TypedValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TypedValue.Merge(m, src)
+}
+func (m *TypedValue) XXX_Size() int {
+ return xxx_messageInfo_TypedValue.Size(m)
+}
+func (m *TypedValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_TypedValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TypedValue proto.InternalMessageInfo
+
+type isTypedValue_Value interface {
+ isTypedValue_Value()
+}
+
+type TypedValue_BoolValue struct {
+ BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type TypedValue_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type TypedValue_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type TypedValue_StringValue struct {
+ StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type TypedValue_DistributionValue struct {
+ DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"`
+}
+
+func (*TypedValue_BoolValue) isTypedValue_Value() {}
+
+func (*TypedValue_Int64Value) isTypedValue_Value() {}
+
+func (*TypedValue_DoubleValue) isTypedValue_Value() {}
+
+func (*TypedValue_StringValue) isTypedValue_Value() {}
+
+func (*TypedValue_DistributionValue) isTypedValue_Value() {}
+
+func (m *TypedValue) GetValue() isTypedValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *TypedValue) GetBoolValue() bool {
+ if x, ok := m.GetValue().(*TypedValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *TypedValue) GetInt64Value() int64 {
+ if x, ok := m.GetValue().(*TypedValue_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (m *TypedValue) GetDoubleValue() float64 {
+ if x, ok := m.GetValue().(*TypedValue_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (m *TypedValue) GetStringValue() string {
+ if x, ok := m.GetValue().(*TypedValue_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (m *TypedValue) GetDistributionValue() *distribution.Distribution {
+ if x, ok := m.GetValue().(*TypedValue_DistributionValue); ok {
+ return x.DistributionValue
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*TypedValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _TypedValue_OneofMarshaler, _TypedValue_OneofUnmarshaler, _TypedValue_OneofSizer, []interface{}{
+ (*TypedValue_BoolValue)(nil),
+ (*TypedValue_Int64Value)(nil),
+ (*TypedValue_DoubleValue)(nil),
+ (*TypedValue_StringValue)(nil),
+ (*TypedValue_DistributionValue)(nil),
+ }
+}
+
+func _TypedValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*TypedValue)
+ // value
+ switch x := m.Value.(type) {
+ case *TypedValue_BoolValue:
+ t := uint64(0)
+ if x.BoolValue {
+ t = 1
+ }
+ b.EncodeVarint(1<<3 | proto.WireVarint)
+ b.EncodeVarint(t)
+ case *TypedValue_Int64Value:
+ b.EncodeVarint(2<<3 | proto.WireVarint)
+ b.EncodeVarint(uint64(x.Int64Value))
+ case *TypedValue_DoubleValue:
+ b.EncodeVarint(3<<3 | proto.WireFixed64)
+ b.EncodeFixed64(math.Float64bits(x.DoubleValue))
+ case *TypedValue_StringValue:
+ b.EncodeVarint(4<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.StringValue)
+ case *TypedValue_DistributionValue:
+ b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.DistributionValue); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("TypedValue.Value has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _TypedValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*TypedValue)
+ switch tag {
+ case 1: // value.bool_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &TypedValue_BoolValue{x != 0}
+ return true, err
+ case 2: // value.int64_value
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.Value = &TypedValue_Int64Value{int64(x)}
+ return true, err
+ case 3: // value.double_value
+ if wire != proto.WireFixed64 {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeFixed64()
+ m.Value = &TypedValue_DoubleValue{math.Float64frombits(x)}
+ return true, err
+ case 4: // value.string_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Value = &TypedValue_StringValue{x}
+ return true, err
+ case 5: // value.distribution_value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(distribution.Distribution)
+ err := b.DecodeMessage(msg)
+ m.Value = &TypedValue_DistributionValue{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _TypedValue_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*TypedValue)
+ // value
+ switch x := m.Value.(type) {
+ case *TypedValue_BoolValue:
+ n += 1 // tag and wire
+ n += 1
+ case *TypedValue_Int64Value:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(x.Int64Value))
+ case *TypedValue_DoubleValue:
+ n += 1 // tag and wire
+ n += 8
+ case *TypedValue_StringValue:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.StringValue)))
+ n += len(x.StringValue)
+ case *TypedValue_DistributionValue:
+ s := proto.Size(x.DistributionValue)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// A time interval extending just after a start time through an end time.
+// If the start time is the same as the end time, then the interval
+// represents a single point in time.
+type TimeInterval struct {
+ // Required. The end of the time interval.
+ EndTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ // Optional. The beginning of the time interval. The default value
+ // for the start time is the end time. The start time must not be
+ // later than the end time.
+ StartTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TimeInterval) Reset() { *m = TimeInterval{} }
+func (m *TimeInterval) String() string { return proto.CompactTextString(m) }
+func (*TimeInterval) ProtoMessage() {}
+func (*TimeInterval) Descriptor() ([]byte, []int) {
+ return fileDescriptor_013c57c1dcbb8d65, []int{1}
+}
+
+func (m *TimeInterval) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TimeInterval.Unmarshal(m, b)
+}
+func (m *TimeInterval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TimeInterval.Marshal(b, m, deterministic)
+}
+func (m *TimeInterval) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeInterval.Merge(m, src)
+}
+func (m *TimeInterval) XXX_Size() int {
+ return xxx_messageInfo_TimeInterval.Size(m)
+}
+func (m *TimeInterval) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeInterval.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeInterval proto.InternalMessageInfo
+
+func (m *TimeInterval) GetEndTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.EndTime
+ }
+ return nil
+}
+
+func (m *TimeInterval) GetStartTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.StartTime
+ }
+ return nil
+}
+
+// Describes how to combine multiple time series to provide different views of
+// the data. Aggregation consists of an alignment step on individual time
+// series (`alignment_period` and `per_series_aligner`) followed by an optional
+// reduction step of the data across the aligned time series
+// (`cross_series_reducer` and `group_by_fields`). For more details, see
+// [Aggregation](/monitoring/api/learn_more#aggregation).
+type Aggregation struct {
+ // The alignment period for per-[time series][google.monitoring.v3.TimeSeries]
+ // alignment. If present, `alignmentPeriod` must be at least 60
+ // seconds. After per-time series alignment, each time series will
+ // contain data points only on the period boundaries. If
+ // `perSeriesAligner` is not specified or equals `ALIGN_NONE`, then
+ // this field is ignored. If `perSeriesAligner` is specified and
+ // does not equal `ALIGN_NONE`, then this field must be defined;
+ // otherwise an error is returned.
+ AlignmentPeriod *duration.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"`
+ // The approach to be used to align individual time series. Not all
+ // alignment functions may be applied to all time series, depending
+ // on the metric type and value type of the original time
+ // series. Alignment may change the metric type or the value type of
+ // the time series.
+ //
+ // Time series data must be aligned in order to perform cross-time
+ // series reduction. If `crossSeriesReducer` is specified, then
+ // `perSeriesAligner` must be specified and not equal `ALIGN_NONE`
+ // and `alignmentPeriod` must be specified; otherwise, an error is
+ // returned.
+ PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"`
+ // The approach to be used to combine time series. Not all reducer
+ // functions may be applied to all time series, depending on the
+ // metric type and the value type of the original time
+ // series. Reduction may change the metric type of value type of the
+ // time series.
+ //
+ // Time series data must be aligned in order to perform cross-time
+ // series reduction. If `crossSeriesReducer` is specified, then
+ // `perSeriesAligner` must be specified and not equal `ALIGN_NONE`
+ // and `alignmentPeriod` must be specified; otherwise, an error is
+ // returned.
+ CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"`
+ // The set of fields to preserve when `crossSeriesReducer` is
+ // specified. The `groupByFields` determine how the time series are
+ // partitioned into subsets prior to applying the aggregation
+ // function. Each subset contains time series that have the same
+ // value for each of the grouping fields. Each individual time
+ // series is a member of exactly one subset. The
+ // `crossSeriesReducer` is applied to each subset of time series.
+ // It is not possible to reduce across different resource types, so
+ // this field implicitly contains `resource.type`. Fields not
+ // specified in `groupByFields` are aggregated away. If
+ // `groupByFields` is not specified and all the time series have
+ // the same resource type, then the time series are aggregated into
+ // a single output time series. If `crossSeriesReducer` is not
+ // defined, this field is ignored.
+ GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Aggregation) Reset() { *m = Aggregation{} }
+func (m *Aggregation) String() string { return proto.CompactTextString(m) }
+func (*Aggregation) ProtoMessage() {}
+func (*Aggregation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_013c57c1dcbb8d65, []int{2}
+}
+
+func (m *Aggregation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Aggregation.Unmarshal(m, b)
+}
+func (m *Aggregation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Aggregation.Marshal(b, m, deterministic)
+}
+func (m *Aggregation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Aggregation.Merge(m, src)
+}
+func (m *Aggregation) XXX_Size() int {
+ return xxx_messageInfo_Aggregation.Size(m)
+}
+func (m *Aggregation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Aggregation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Aggregation proto.InternalMessageInfo
+
+func (m *Aggregation) GetAlignmentPeriod() *duration.Duration {
+ if m != nil {
+ return m.AlignmentPeriod
+ }
+ return nil
+}
+
+func (m *Aggregation) GetPerSeriesAligner() Aggregation_Aligner {
+ if m != nil {
+ return m.PerSeriesAligner
+ }
+ return Aggregation_ALIGN_NONE
+}
+
+func (m *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer {
+ if m != nil {
+ return m.CrossSeriesReducer
+ }
+ return Aggregation_REDUCE_NONE
+}
+
+func (m *Aggregation) GetGroupByFields() []string {
+ if m != nil {
+ return m.GroupByFields
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("google.monitoring.v3.ComparisonType", ComparisonType_name, ComparisonType_value)
+ proto.RegisterEnum("google.monitoring.v3.ServiceTier", ServiceTier_name, ServiceTier_value)
+ proto.RegisterEnum("google.monitoring.v3.Aggregation_Aligner", Aggregation_Aligner_name, Aggregation_Aligner_value)
+ proto.RegisterEnum("google.monitoring.v3.Aggregation_Reducer", Aggregation_Reducer_name, Aggregation_Reducer_value)
+ proto.RegisterType((*TypedValue)(nil), "google.monitoring.v3.TypedValue")
+ proto.RegisterType((*TimeInterval)(nil), "google.monitoring.v3.TimeInterval")
+ proto.RegisterType((*Aggregation)(nil), "google.monitoring.v3.Aggregation")
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/common.proto", fileDescriptor_013c57c1dcbb8d65) }
+
+var fileDescriptor_013c57c1dcbb8d65 = []byte{
+ // 957 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xc1, 0x6e, 0xe3, 0x44,
+ 0x18, 0xc7, 0xe3, 0x64, 0xdb, 0x34, 0x9f, 0xdb, 0x66, 0x3a, 0xdb, 0xed, 0x86, 0x68, 0x61, 0xb3,
+ 0x45, 0x42, 0x61, 0x0f, 0x4e, 0xd5, 0x12, 0xa4, 0x0a, 0x09, 0xc9, 0x75, 0xa6, 0xad, 0xa5, 0xc4,
+ 0x09, 0x13, 0xa7, 0x54, 0x50, 0xc9, 0x72, 0x9a, 0x59, 0xcb, 0x52, 0xe2, 0xb1, 0x6c, 0xa7, 0x52,
+ 0x6f, 0xdc, 0x79, 0x07, 0x2e, 0xdc, 0xb8, 0xf1, 0x1a, 0x3c, 0x0c, 0x17, 0x5e, 0x00, 0x79, 0xc6,
+ 0x59, 0x3b, 0x21, 0x08, 0x8e, 0xdf, 0xef, 0xff, 0xff, 0xbe, 0x99, 0xf9, 0x8f, 0x35, 0x86, 0x77,
+ 0x1e, 0xe7, 0xde, 0x9c, 0x75, 0x16, 0x3c, 0xf0, 0x13, 0x1e, 0xf9, 0x81, 0xd7, 0x79, 0xba, 0xe8,
+ 0x3c, 0xf2, 0xc5, 0x82, 0x07, 0x5a, 0x18, 0xf1, 0x84, 0xe3, 0x63, 0x69, 0xd1, 0x72, 0x8b, 0xf6,
+ 0x74, 0xd1, 0x7c, 0x93, 0x35, 0xba, 0xa1, 0xdf, 0x71, 0x83, 0x80, 0x27, 0x6e, 0xe2, 0xf3, 0x20,
+ 0x96, 0x3d, 0xcd, 0x4f, 0x0b, 0xea, 0xcc, 0x8f, 0x93, 0xc8, 0x9f, 0x2e, 0x53, 0x3d, 0x93, 0x3f,
+ 0xcb, 0x64, 0x51, 0x4d, 0x97, 0x1f, 0x3a, 0xb3, 0x65, 0xe4, 0x16, 0xf4, 0xb7, 0x9b, 0x7a, 0xe2,
+ 0x2f, 0x58, 0x9c, 0xb8, 0x8b, 0x50, 0x1a, 0x4e, 0xff, 0x54, 0x00, 0xec, 0xe7, 0x90, 0xcd, 0xee,
+ 0xdc, 0xf9, 0x92, 0xe1, 0xb7, 0x00, 0x53, 0xce, 0xe7, 0xce, 0x53, 0x5a, 0x35, 0x94, 0x96, 0xd2,
+ 0xde, 0xbb, 0x2d, 0xd1, 0x5a, 0xca, 0xa4, 0xe1, 0x1d, 0xa8, 0x7e, 0x90, 0x7c, 0xfd, 0x55, 0xe6,
+ 0x28, 0xb7, 0x94, 0x76, 0xe5, 0xb6, 0x44, 0x41, 0x40, 0x69, 0xf9, 0x1c, 0xf6, 0x67, 0x7c, 0x39,
+ 0x9d, 0xb3, 0xcc, 0x53, 0x69, 0x29, 0x6d, 0xe5, 0xb6, 0x44, 0x55, 0x49, 0x3f, 0x9a, 0xd2, 0xc3,
+ 0x04, 0x5e, 0x66, 0x7a, 0xd1, 0x52, 0xda, 0xb5, 0xd4, 0x24, 0xa9, 0x34, 0x99, 0x80, 0x8b, 0x67,
+ 0xce, 0xac, 0x3b, 0x2d, 0xa5, 0xad, 0x9e, 0x37, 0xb4, 0x2c, 0x4d, 0x37, 0xf4, 0xb5, 0x5e, 0xc1,
+ 0x75, 0x5b, 0xa2, 0x47, 0xc5, 0x2e, 0x31, 0xea, 0xaa, 0x0a, 0x3b, 0xa2, 0xfb, 0xf4, 0x27, 0x05,
+ 0xf6, 0x6d, 0x7f, 0xc1, 0xcc, 0x20, 0x61, 0xd1, 0x93, 0x3b, 0xc7, 0x5d, 0xd8, 0x63, 0xc1, 0xcc,
+ 0x49, 0x83, 0x11, 0xc7, 0x51, 0xcf, 0x9b, 0xab, 0xd1, 0xab, 0xd4, 0x34, 0x7b, 0x95, 0x1a, 0xad,
+ 0xb2, 0x60, 0x96, 0x56, 0xf8, 0x12, 0x20, 0x4e, 0xdc, 0x28, 0x91, 0x8d, 0xca, 0x7f, 0x36, 0xd6,
+ 0x84, 0x3b, 0xad, 0x4f, 0xff, 0xaa, 0x82, 0xaa, 0x7b, 0x5e, 0xc4, 0x3c, 0x71, 0x55, 0xb8, 0x07,
+ 0xc8, 0x9d, 0xfb, 0x5e, 0xb0, 0x60, 0x41, 0xe2, 0x84, 0x2c, 0xf2, 0xf9, 0x2c, 0x1b, 0xf8, 0xc9,
+ 0x3f, 0x06, 0xf6, 0xb2, 0xfb, 0xa5, 0xf5, 0x8f, 0x2d, 0x23, 0xd1, 0x81, 0xbf, 0x07, 0x1c, 0xb2,
+ 0xc8, 0x89, 0x59, 0xe4, 0xb3, 0xd8, 0x11, 0x2a, 0x8b, 0xc4, 0x89, 0x0e, 0xcf, 0xbf, 0xd4, 0xb6,
+ 0x7d, 0x7a, 0x5a, 0x61, 0x13, 0x9a, 0x2e, 0x1b, 0x28, 0x0a, 0x59, 0x34, 0x16, 0x33, 0x32, 0x82,
+ 0x7f, 0x84, 0xe3, 0xc7, 0x88, 0xc7, 0xf1, 0x6a, 0x74, 0xc4, 0x66, 0xcb, 0x47, 0x16, 0x89, 0x2b,
+ 0xfb, 0x5f, 0xa3, 0xa9, 0x6c, 0xa0, 0x58, 0x8c, 0x91, 0xc3, 0x33, 0x86, 0xbf, 0x80, 0xba, 0x17,
+ 0xf1, 0x65, 0xe8, 0x4c, 0x9f, 0x9d, 0x0f, 0x3e, 0x9b, 0xcf, 0xe2, 0xc6, 0x4e, 0xab, 0xd2, 0xae,
+ 0xd1, 0x03, 0x81, 0xaf, 0x9e, 0xaf, 0x05, 0x3c, 0xfd, 0xb9, 0x02, 0xd5, 0xd5, 0x86, 0x0e, 0x01,
+ 0xf4, 0xbe, 0x79, 0x63, 0x39, 0xd6, 0xd0, 0x22, 0xa8, 0x84, 0xeb, 0xa0, 0xca, 0xba, 0x47, 0xfa,
+ 0xb6, 0x8e, 0x94, 0xdc, 0x40, 0x75, 0x9b, 0xa0, 0x32, 0x7e, 0x05, 0x47, 0xb2, 0x36, 0x2d, 0x9b,
+ 0xd0, 0xd1, 0xb0, 0x9f, 0xe2, 0x0a, 0x3e, 0x06, 0x94, 0xcd, 0x21, 0xf7, 0xb6, 0x33, 0xec, 0xf7,
+ 0x08, 0x45, 0x2f, 0xf0, 0x01, 0xd4, 0x24, 0x1d, 0x98, 0x16, 0x82, 0x42, 0xa9, 0xdf, 0x23, 0x35,
+ 0x1f, 0x3d, 0x20, 0xba, 0x85, 0xf6, 0xf3, 0xb5, 0x8d, 0xe1, 0xc4, 0xb2, 0xd1, 0x41, 0xee, 0x1f,
+ 0x4f, 0x06, 0xe8, 0x10, 0x23, 0xd8, 0xcf, 0x4a, 0xbb, 0xd7, 0x23, 0x77, 0xa8, 0x9e, 0xaf, 0x2a,
+ 0x3a, 0x1c, 0x9b, 0x4e, 0x08, 0x42, 0xf9, 0x16, 0x25, 0xbd, 0xd6, 0xfb, 0x63, 0x82, 0x1a, 0xf8,
+ 0x35, 0xbc, 0x94, 0xf8, 0x9a, 0xea, 0x86, 0x6d, 0x0e, 0x2d, 0xe9, 0x3f, 0xca, 0x85, 0x11, 0xa1,
+ 0x06, 0xb1, 0x6c, 0xb3, 0x4f, 0x9c, 0xcb, 0x4b, 0x84, 0xb7, 0x0b, 0x5d, 0xf4, 0x72, 0xab, 0xd0,
+ 0x3d, 0x43, 0xc7, 0x5b, 0x85, 0xb3, 0x2e, 0x7a, 0x85, 0x1b, 0x70, 0xbc, 0x26, 0x38, 0xc6, 0xad,
+ 0x6e, 0xdd, 0x10, 0xf4, 0xfa, 0xf4, 0xf7, 0x32, 0x54, 0x57, 0x37, 0x58, 0x07, 0x95, 0x92, 0xde,
+ 0xc4, 0x20, 0x85, 0xeb, 0xc8, 0x80, 0xc8, 0x48, 0x5c, 0xc7, 0x0a, 0x98, 0x16, 0x2a, 0x17, 0x6b,
+ 0xfd, 0x1e, 0x55, 0x0a, 0x75, 0x9a, 0xd9, 0x0b, 0x7c, 0x04, 0x07, 0xab, 0x5a, 0x86, 0xb6, 0x93,
+ 0xc6, 0x98, 0x21, 0x99, 0xf3, 0x6e, 0x1a, 0x58, 0x91, 0xc8, 0x5c, 0xaa, 0xf8, 0x04, 0xf0, 0x1a,
+ 0x96, 0x41, 0xd6, 0xd3, 0xb3, 0x64, 0x7c, 0x3d, 0xc9, 0xbd, 0x82, 0xb2, 0x1e, 0x65, 0xed, 0x5f,
+ 0x94, 0x2e, 0x82, 0xed, 0x4a, 0xf7, 0x0c, 0xa9, 0xdb, 0x95, 0xb3, 0x2e, 0xda, 0x7f, 0xff, 0x8b,
+ 0x02, 0x87, 0x06, 0x5f, 0x84, 0x6e, 0xe4, 0xc7, 0x3c, 0x48, 0xdf, 0x5c, 0xdc, 0x84, 0x13, 0x63,
+ 0x38, 0x18, 0xe9, 0xd4, 0x1c, 0x0f, 0x2d, 0x67, 0x62, 0x8d, 0x47, 0xc4, 0x30, 0xaf, 0x4d, 0xd2,
+ 0x43, 0xa5, 0x34, 0x84, 0x82, 0x76, 0x63, 0x23, 0x65, 0x13, 0xa5, 0x5f, 0xf6, 0x3a, 0xea, 0xdb,
+ 0xa8, 0xb2, 0x89, 0x88, 0x0c, 0xb4, 0x80, 0xc8, 0x77, 0x68, 0x67, 0x03, 0x59, 0x04, 0xed, 0xbe,
+ 0x77, 0x41, 0x1d, 0xb3, 0xe8, 0xc9, 0x7f, 0x64, 0xb6, 0xcf, 0x22, 0xfc, 0x06, 0x1a, 0x63, 0x42,
+ 0xef, 0x4c, 0x83, 0x38, 0xb6, 0x49, 0xe8, 0xc6, 0xf6, 0x4e, 0x00, 0xaf, 0xa9, 0x57, 0xfa, 0xd8,
+ 0x34, 0x90, 0x92, 0x9e, 0x7f, 0x8d, 0x8f, 0x28, 0x19, 0x98, 0x93, 0x01, 0x2a, 0x37, 0xcb, 0x0d,
+ 0xe5, 0xea, 0x57, 0x05, 0x1a, 0x8f, 0x7c, 0xb1, 0xf5, 0xc9, 0xb8, 0x52, 0x0d, 0xf1, 0xb3, 0x1c,
+ 0xa5, 0x4f, 0xdd, 0x48, 0xf9, 0xe1, 0xdb, 0xcc, 0xe4, 0xf1, 0xb9, 0x1b, 0x78, 0x1a, 0x8f, 0xbc,
+ 0x8e, 0xc7, 0x02, 0xf1, 0x10, 0x76, 0xa4, 0xe4, 0x86, 0x7e, 0xbc, 0xfe, 0xbf, 0xfd, 0x26, 0xaf,
+ 0x7e, 0x2b, 0x37, 0x6f, 0xe4, 0x00, 0x63, 0xce, 0x97, 0x33, 0x6d, 0x90, 0xaf, 0x75, 0x77, 0xf1,
+ 0xc7, 0x4a, 0x7c, 0x10, 0xe2, 0x43, 0x2e, 0x3e, 0xdc, 0x5d, 0x4c, 0x77, 0xc5, 0x22, 0x17, 0x7f,
+ 0x07, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x78, 0xd9, 0x96, 0xd3, 0x07, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go
new file mode 100644
index 00000000000..8dfdc45f0f1
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/dropped_labels.pb.go
@@ -0,0 +1,104 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/dropped_labels.proto
+
+package monitoring
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A set of (label, value) pairs which were dropped during aggregation, attached
+// to google.api.Distribution.Exemplars in google.api.Distribution values during
+// aggregation.
+//
+// These values are used in combination with the label values that remain on the
+// aggregated Distribution timeseries to construct the full label set for the
+// exemplar values. The resulting full label set may be used to identify the
+// specific task/job/instance (for example) which may be contributing to a
+// long-tail, while allowing the storage savings of only storing aggregated
+// distribution values for a large group.
+//
+// Note that there are no guarantees on ordering of the labels from
+// exemplar-to-exemplar and from distribution-to-distribution in the same
+// stream, and there may be duplicates. It is up to clients to resolve any
+// ambiguities.
+type DroppedLabels struct {
+ // Map from label to its value, for all labels dropped in any aggregation.
+ Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DroppedLabels) Reset() { *m = DroppedLabels{} }
+func (m *DroppedLabels) String() string { return proto.CompactTextString(m) }
+func (*DroppedLabels) ProtoMessage() {}
+func (*DroppedLabels) Descriptor() ([]byte, []int) {
+ return fileDescriptor_15749142c06d7f43, []int{0}
+}
+
+func (m *DroppedLabels) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DroppedLabels.Unmarshal(m, b)
+}
+func (m *DroppedLabels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DroppedLabels.Marshal(b, m, deterministic)
+}
+func (m *DroppedLabels) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DroppedLabels.Merge(m, src)
+}
+func (m *DroppedLabels) XXX_Size() int {
+ return xxx_messageInfo_DroppedLabels.Size(m)
+}
+func (m *DroppedLabels) XXX_DiscardUnknown() {
+ xxx_messageInfo_DroppedLabels.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DroppedLabels proto.InternalMessageInfo
+
+func (m *DroppedLabels) GetLabel() map[string]string {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*DroppedLabels)(nil), "google.monitoring.v3.DroppedLabels")
+ proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.DroppedLabels.LabelEntry")
+}
+
+func init() {
+ proto.RegisterFile("google/monitoring/v3/dropped_labels.proto", fileDescriptor_15749142c06d7f43)
+}
+
+var fileDescriptor_15749142c06d7f43 = []byte{
+ // 219 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33,
+ 0xd6, 0x4f, 0x29, 0xca, 0x2f, 0x28, 0x48, 0x4d, 0x89, 0xcf, 0x49, 0x4c, 0x4a, 0xcd, 0x29, 0xd6,
+ 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0x28, 0xd5, 0x43, 0x28, 0xd5, 0x2b, 0x33, 0x96,
+ 0x92, 0x81, 0x1a, 0x90, 0x58, 0x90, 0xa9, 0x9f, 0x98, 0x97, 0x97, 0x5f, 0x92, 0x58, 0x92, 0x99,
+ 0x9f, 0x07, 0xd5, 0xa3, 0xd4, 0xcf, 0xc8, 0xc5, 0xeb, 0x02, 0x31, 0xcc, 0x07, 0x6c, 0x96, 0x90,
+ 0x0b, 0x17, 0x2b, 0xd8, 0x54, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x3d, 0x3d, 0x6c, 0xa6,
+ 0xea, 0xa1, 0xe8, 0xd1, 0x03, 0x53, 0xae, 0x79, 0x25, 0x45, 0x95, 0x41, 0x10, 0xcd, 0x52, 0x16,
+ 0x5c, 0x5c, 0x08, 0x41, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d,
+ 0xce, 0x20, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c,
+ 0x06, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a, 0x39, 0x44, 0xd9, 0x41, 0x6d, 0x4c, 0xcf, 0xcf, 0x49,
+ 0xcc, 0x4b, 0xd7, 0xcb, 0x2f, 0x4a, 0xd7, 0x4f, 0x4f, 0xcd, 0x03, 0xbb, 0x57, 0x1f, 0x22, 0x95,
+ 0x58, 0x90, 0x59, 0x8c, 0x1a, 0x22, 0xd6, 0x08, 0x5e, 0x12, 0x1b, 0x58, 0xa9, 0x31, 0x20, 0x00,
+ 0x00, 0xff, 0xff, 0x7e, 0x29, 0xf8, 0x00, 0x3b, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go
new file mode 100644
index 00000000000..b63597853f2
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group.pb.go
@@ -0,0 +1,157 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/group.proto
+
+package monitoring
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The description of a dynamic collection of monitored resources. Each group
+// has a filter that is matched against monitored resources and their associated
+// metadata. If a group's filter matches an available monitored resource, then
+// that resource is a member of that group. Groups can contain any number of
+// monitored resources, and each monitored resource can be a member of any
+// number of groups.
+//
+// Groups can be nested in parent-child hierarchies. The `parentName` field
+// identifies an optional parent for each group. If a group has a parent, then
+// the only monitored resources available to be matched by the group's filter
+// are the resources contained in the parent group. In other words, a group
+// contains the monitored resources that match its filter and the filters of all
+// the group's ancestors. A group without a parent can contain any monitored
+// resource.
+//
+// For example, consider an infrastructure running a set of instances with two
+// user-defined tags: `"environment"` and `"role"`. A parent group has a filter,
+// `environment="production"`. A child of that parent group has a filter,
+// `role="transcoder"`. The parent group contains all instances in the
+// production environment, regardless of their roles. The child group contains
+// instances that have the transcoder role *and* are in the production
+// environment.
+//
+// The monitored resources contained in a group can change at any moment,
+// depending on what resources exist and what filters are associated with the
+// group and its ancestors.
+type Group struct {
+ // Output only. The name of this group. The format is
+ // `"projects/{project_id_or_number}/groups/{group_id}"`.
+ // When creating a group, this field is ignored and a new name is created
+ // consisting of the project specified in the call to `CreateGroup`
+ // and a unique `{group_id}` that is generated automatically.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A user-assigned name for this group, used only for display purposes.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The name of the group's parent, if it has one.
+ // The format is `"projects/{project_id_or_number}/groups/{group_id}"`.
+ // For groups with no parent, `parentName` is the empty string, `""`.
+ ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"`
+ // The filter used to determine which monitored resources belong to this group.
+ Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
+ // If true, the members of this group are considered to be a cluster.
+ // The system can perform additional analysis on groups that are clusters.
+ IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Group) Reset() { *m = Group{} }
+func (m *Group) String() string { return proto.CompactTextString(m) }
+func (*Group) ProtoMessage() {}
+func (*Group) Descriptor() ([]byte, []int) {
+ return fileDescriptor_907e30c1f087271d, []int{0}
+}
+
+func (m *Group) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Group.Unmarshal(m, b)
+}
+func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Group.Marshal(b, m, deterministic)
+}
+func (m *Group) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Group.Merge(m, src)
+}
+func (m *Group) XXX_Size() int {
+ return xxx_messageInfo_Group.Size(m)
+}
+func (m *Group) XXX_DiscardUnknown() {
+ xxx_messageInfo_Group.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Group proto.InternalMessageInfo
+
+func (m *Group) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Group) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *Group) GetParentName() string {
+ if m != nil {
+ return m.ParentName
+ }
+ return ""
+}
+
+func (m *Group) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *Group) GetIsCluster() bool {
+ if m != nil {
+ return m.IsCluster
+ }
+ return false
+}
+
+func init() {
+ proto.RegisterType((*Group)(nil), "google.monitoring.v3.Group")
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/group.proto", fileDescriptor_907e30c1f087271d) }
+
+var fileDescriptor_907e30c1f087271d = []byte{
+ // 261 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x4a, 0x2b, 0x31,
+ 0x14, 0x87, 0x49, 0xef, 0xed, 0x60, 0x4f, 0x5d, 0x0d, 0x22, 0x83, 0x20, 0x8e, 0xae, 0xba, 0xca,
+ 0x2c, 0xb2, 0x14, 0x5c, 0xb4, 0x8b, 0xae, 0x94, 0xd2, 0x45, 0x17, 0x32, 0x50, 0x62, 0x1b, 0x43,
+ 0x20, 0x93, 0x13, 0x92, 0x99, 0x82, 0x2f, 0xe2, 0x03, 0xb8, 0xf4, 0x51, 0x7c, 0x2a, 0x99, 0x93,
+ 0x91, 0x41, 0x70, 0x97, 0xf3, 0xfb, 0x3e, 0x72, 0xfe, 0x40, 0xa9, 0x11, 0xb5, 0x55, 0x55, 0x83,
+ 0xce, 0xb4, 0x18, 0x8c, 0xd3, 0xd5, 0x49, 0x54, 0x3a, 0x60, 0xe7, 0xb9, 0x0f, 0xd8, 0x62, 0x7e,
+ 0x91, 0x0c, 0x3e, 0x1a, 0xfc, 0x24, 0xee, 0xde, 0x19, 0x4c, 0xd7, 0xbd, 0x95, 0xe7, 0xf0, 0xdf,
+ 0xc9, 0x46, 0x15, 0xac, 0x64, 0x8b, 0xd9, 0x96, 0xde, 0xf9, 0x2d, 0x9c, 0x1f, 0x4d, 0xf4, 0x56,
+ 0xbe, 0xed, 0x89, 0x4d, 0x88, 0xcd, 0x87, 0xec, 0xa9, 0x57, 0x6e, 0x60, 0xee, 0x65, 0x50, 0xae,
+ 0x4d, 0xc6, 0x3f, 0x32, 0x20, 0x45, 0x24, 0x5c, 0x42, 0xf6, 0x6a, 0x6c, 0xab, 0x42, 0x31, 0x25,
+ 0x36, 0x54, 0xf9, 0x35, 0x80, 0x89, 0xfb, 0x83, 0xed, 0x62, 0xcf, 0xb2, 0x92, 0x2d, 0xce, 0xb6,
+ 0x33, 0x13, 0x57, 0x29, 0x58, 0x7e, 0x30, 0x28, 0x0e, 0xd8, 0xf0, 0xbf, 0xa6, 0x5e, 0x02, 0x8d,
+ 0xbc, 0xe9, 0xf7, 0xda, 0xb0, 0xe7, 0x87, 0xc1, 0xd1, 0x68, 0xa5, 0xd3, 0x1c, 0x83, 0xae, 0xb4,
+ 0x72, 0xb4, 0x75, 0x95, 0x90, 0xf4, 0x26, 0xfe, 0x3e, 0xcd, 0xfd, 0x58, 0x7d, 0x4e, 0xae, 0xd6,
+ 0xe9, 0x83, 0x95, 0xc5, 0xee, 0xc8, 0x1f, 0xc7, 0x56, 0x3b, 0xf1, 0xf5, 0x03, 0x6b, 0x82, 0xf5,
+ 0x08, 0xeb, 0x9d, 0x78, 0xc9, 0xa8, 0x89, 0xf8, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x95, 0xd1, 0xa1,
+ 0x34, 0x7e, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go
new file mode 100644
index 00000000000..4289273d9a8
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/group_service.pb.go
@@ -0,0 +1,948 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/group_service.proto
+
+package monitoring
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ empty "github.com/golang/protobuf/ptypes/empty"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The `ListGroup` request.
+type ListGroupsRequest struct {
+ // The project whose groups are to be listed. The format is
+ // `"projects/{project_id_or_number}"`.
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional filter consisting of a single group name. The filters limit the
+ // groups returned based on their parent-child relationship with the specified
+ // group. If no filter is specified, all groups are returned.
+ //
+ // Types that are valid to be assigned to Filter:
+ // *ListGroupsRequest_ChildrenOfGroup
+ // *ListGroupsRequest_AncestorsOfGroup
+ // *ListGroupsRequest_DescendantsOfGroup
+ Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListGroupsRequest) Reset() { *m = ListGroupsRequest{} }
+func (m *ListGroupsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListGroupsRequest) ProtoMessage() {}
+func (*ListGroupsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_21ad21d0ed55c55a, []int{0}
+}
+
+func (m *ListGroupsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListGroupsRequest.Unmarshal(m, b)
+}
+func (m *ListGroupsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListGroupsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListGroupsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListGroupsRequest.Merge(m, src)
+}
+func (m *ListGroupsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListGroupsRequest.Size(m)
+}
+func (m *ListGroupsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListGroupsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListGroupsRequest proto.InternalMessageInfo
+
+func (m *ListGroupsRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type isListGroupsRequest_Filter interface {
+ isListGroupsRequest_Filter()
+}
+
+type ListGroupsRequest_ChildrenOfGroup struct {
+ ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"`
+}
+
+type ListGroupsRequest_AncestorsOfGroup struct {
+ AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"`
+}
+
+type ListGroupsRequest_DescendantsOfGroup struct {
+ DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"`
+}
+
+func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {}
+
+func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {}
+
+func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {}
+
+func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *ListGroupsRequest) GetChildrenOfGroup() string {
+ if x, ok := m.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok {
+ return x.ChildrenOfGroup
+ }
+ return ""
+}
+
+func (m *ListGroupsRequest) GetAncestorsOfGroup() string {
+ if x, ok := m.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok {
+ return x.AncestorsOfGroup
+ }
+ return ""
+}
+
+func (m *ListGroupsRequest) GetDescendantsOfGroup() string {
+ if x, ok := m.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok {
+ return x.DescendantsOfGroup
+ }
+ return ""
+}
+
+func (m *ListGroupsRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListGroupsRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ListGroupsRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _ListGroupsRequest_OneofMarshaler, _ListGroupsRequest_OneofUnmarshaler, _ListGroupsRequest_OneofSizer, []interface{}{
+ (*ListGroupsRequest_ChildrenOfGroup)(nil),
+ (*ListGroupsRequest_AncestorsOfGroup)(nil),
+ (*ListGroupsRequest_DescendantsOfGroup)(nil),
+ }
+}
+
+func _ListGroupsRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*ListGroupsRequest)
+ // filter
+ switch x := m.Filter.(type) {
+ case *ListGroupsRequest_ChildrenOfGroup:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.ChildrenOfGroup)
+ case *ListGroupsRequest_AncestorsOfGroup:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.AncestorsOfGroup)
+ case *ListGroupsRequest_DescendantsOfGroup:
+ b.EncodeVarint(4<<3 | proto.WireBytes)
+ b.EncodeStringBytes(x.DescendantsOfGroup)
+ case nil:
+ default:
+ return fmt.Errorf("ListGroupsRequest.Filter has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _ListGroupsRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*ListGroupsRequest)
+ switch tag {
+ case 2: // filter.children_of_group
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Filter = &ListGroupsRequest_ChildrenOfGroup{x}
+ return true, err
+ case 3: // filter.ancestors_of_group
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Filter = &ListGroupsRequest_AncestorsOfGroup{x}
+ return true, err
+ case 4: // filter.descendants_of_group
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Filter = &ListGroupsRequest_DescendantsOfGroup{x}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _ListGroupsRequest_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*ListGroupsRequest)
+ // filter
+ switch x := m.Filter.(type) {
+ case *ListGroupsRequest_ChildrenOfGroup:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.ChildrenOfGroup)))
+ n += len(x.ChildrenOfGroup)
+ case *ListGroupsRequest_AncestorsOfGroup:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.AncestorsOfGroup)))
+ n += len(x.AncestorsOfGroup)
+ case *ListGroupsRequest_DescendantsOfGroup:
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(len(x.DescendantsOfGroup)))
+ n += len(x.DescendantsOfGroup)
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// The `ListGroups` response.
+type ListGroupsResponse struct {
+ // The groups that match the specified filters.
+ Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListGroupsResponse) Reset() { *m = ListGroupsResponse{} }
+func (m *ListGroupsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListGroupsResponse) ProtoMessage() {}
+func (*ListGroupsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_21ad21d0ed55c55a, []int{1}
+}
+
+func (m *ListGroupsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListGroupsResponse.Unmarshal(m, b)
+}
+func (m *ListGroupsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListGroupsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListGroupsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListGroupsResponse.Merge(m, src)
+}
+func (m *ListGroupsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListGroupsResponse.Size(m)
+}
+func (m *ListGroupsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListGroupsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListGroupsResponse proto.InternalMessageInfo
+
+func (m *ListGroupsResponse) GetGroup() []*Group {
+ if m != nil {
+ return m.Group
+ }
+ return nil
+}
+
+func (m *ListGroupsResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+// The `GetGroup` request.
+type GetGroupRequest struct {
+ // The group to retrieve. The format is
+ // `"projects/{project_id_or_number}/groups/{group_id}"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetGroupRequest) Reset() { *m = GetGroupRequest{} }
+func (m *GetGroupRequest) String() string { return proto.CompactTextString(m) }
+func (*GetGroupRequest) ProtoMessage() {}
+func (*GetGroupRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_21ad21d0ed55c55a, []int{2}
+}
+
+func (m *GetGroupRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetGroupRequest.Unmarshal(m, b)
+}
+func (m *GetGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetGroupRequest.Marshal(b, m, deterministic)
+}
+func (m *GetGroupRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetGroupRequest.Merge(m, src)
+}
+func (m *GetGroupRequest) XXX_Size() int {
+ return xxx_messageInfo_GetGroupRequest.Size(m)
+}
+func (m *GetGroupRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetGroupRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetGroupRequest proto.InternalMessageInfo
+
+func (m *GetGroupRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `CreateGroup` request.
+type CreateGroupRequest struct {
+ // The project in which to create the group. The format is
+ // `"projects/{project_id_or_number}"`.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // A group definition. It is an error to define the `name` field because
+ // the system assigns the name.
+ Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"`
+ // If true, validate this request but do not create the group.
+ ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateGroupRequest) Reset() { *m = CreateGroupRequest{} }
+func (m *CreateGroupRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateGroupRequest) ProtoMessage() {}
+func (*CreateGroupRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_21ad21d0ed55c55a, []int{3}
+}
+
+func (m *CreateGroupRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateGroupRequest.Unmarshal(m, b)
+}
+func (m *CreateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateGroupRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateGroupRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateGroupRequest.Merge(m, src)
+}
+func (m *CreateGroupRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateGroupRequest.Size(m)
+}
+func (m *CreateGroupRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateGroupRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateGroupRequest proto.InternalMessageInfo
+
+func (m *CreateGroupRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *CreateGroupRequest) GetGroup() *Group {
+ if m != nil {
+ return m.Group
+ }
+ return nil
+}
+
+func (m *CreateGroupRequest) GetValidateOnly() bool {
+ if m != nil {
+ return m.ValidateOnly
+ }
+ return false
+}
+
+// The `UpdateGroup` request.
+type UpdateGroupRequest struct {
+ // The new definition of the group. All fields of the existing group,
+ // excepting `name`, are replaced with the corresponding fields of this group.
+ Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"`
+ // If true, validate this request but do not update the existing group.
+ ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdateGroupRequest) Reset() { *m = UpdateGroupRequest{} }
+func (m *UpdateGroupRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateGroupRequest) ProtoMessage() {}
+func (*UpdateGroupRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_21ad21d0ed55c55a, []int{4}
+}
+
+func (m *UpdateGroupRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdateGroupRequest.Unmarshal(m, b)
+}
+func (m *UpdateGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdateGroupRequest.Marshal(b, m, deterministic)
+}
+func (m *UpdateGroupRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateGroupRequest.Merge(m, src)
+}
+func (m *UpdateGroupRequest) XXX_Size() int {
+ return xxx_messageInfo_UpdateGroupRequest.Size(m)
+}
+func (m *UpdateGroupRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateGroupRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateGroupRequest proto.InternalMessageInfo
+
+func (m *UpdateGroupRequest) GetGroup() *Group {
+ if m != nil {
+ return m.Group
+ }
+ return nil
+}
+
+func (m *UpdateGroupRequest) GetValidateOnly() bool {
+ if m != nil {
+ return m.ValidateOnly
+ }
+ return false
+}
+
+// The `DeleteGroup` request. You can only delete a group if it has no children.
+type DeleteGroupRequest struct {
+ // The group to delete. The format is
+ // `"projects/{project_id_or_number}/groups/{group_id}"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteGroupRequest) Reset() { *m = DeleteGroupRequest{} }
+func (m *DeleteGroupRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteGroupRequest) ProtoMessage() {}
+func (*DeleteGroupRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_21ad21d0ed55c55a, []int{5}
+}
+
+func (m *DeleteGroupRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteGroupRequest.Unmarshal(m, b)
+}
+func (m *DeleteGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteGroupRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteGroupRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteGroupRequest.Merge(m, src)
+}
+func (m *DeleteGroupRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteGroupRequest.Size(m)
+}
+func (m *DeleteGroupRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteGroupRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteGroupRequest proto.InternalMessageInfo
+
+func (m *DeleteGroupRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `ListGroupMembers` request.
+type ListGroupMembersRequest struct {
+ // The group whose members are listed. The format is
+ // `"projects/{project_id_or_number}/groups/{group_id}"`.
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // An optional [list filter](/monitoring/api/learn_more#filtering) describing
+ // the members to be returned. The filter may reference the type, labels, and
+ // metadata of monitored resources that comprise the group.
+ // For example, to return only resources representing Compute Engine VM
+ // instances, use this filter:
+ //
+ // resource.type = "gce_instance"
+ Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
+ // An optional time interval for which results should be returned. Only
+ // members that were part of the group during the specified interval are
+ // included in the response. If no interval is provided then the group
+ // membership over the last minute is returned.
+ Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListGroupMembersRequest) Reset() { *m = ListGroupMembersRequest{} }
+func (m *ListGroupMembersRequest) String() string { return proto.CompactTextString(m) }
+func (*ListGroupMembersRequest) ProtoMessage() {}
+func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_21ad21d0ed55c55a, []int{6}
+}
+
+func (m *ListGroupMembersRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListGroupMembersRequest.Unmarshal(m, b)
+}
+func (m *ListGroupMembersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListGroupMembersRequest.Marshal(b, m, deterministic)
+}
+func (m *ListGroupMembersRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListGroupMembersRequest.Merge(m, src)
+}
+func (m *ListGroupMembersRequest) XXX_Size() int {
+ return xxx_messageInfo_ListGroupMembersRequest.Size(m)
+}
+func (m *ListGroupMembersRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListGroupMembersRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListGroupMembersRequest proto.InternalMessageInfo
+
+func (m *ListGroupMembersRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListGroupMembersRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListGroupMembersRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+func (m *ListGroupMembersRequest) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *ListGroupMembersRequest) GetInterval() *TimeInterval {
+ if m != nil {
+ return m.Interval
+ }
+ return nil
+}
+
+// The `ListGroupMembers` response.
+type ListGroupMembersResponse struct {
+ // A set of monitored resources in the group.
+ Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"`
+ // If there are more results than have been returned, then this field is
+ // set to a non-empty value. To see the additional results, use that value as
+ // `pageToken` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of elements matching this request.
+ TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListGroupMembersResponse) Reset() { *m = ListGroupMembersResponse{} }
+func (m *ListGroupMembersResponse) String() string { return proto.CompactTextString(m) }
+func (*ListGroupMembersResponse) ProtoMessage() {}
+func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_21ad21d0ed55c55a, []int{7}
+}
+
+func (m *ListGroupMembersResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListGroupMembersResponse.Unmarshal(m, b)
+}
+func (m *ListGroupMembersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListGroupMembersResponse.Marshal(b, m, deterministic)
+}
+func (m *ListGroupMembersResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListGroupMembersResponse.Merge(m, src)
+}
+func (m *ListGroupMembersResponse) XXX_Size() int {
+ return xxx_messageInfo_ListGroupMembersResponse.Size(m)
+}
+func (m *ListGroupMembersResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListGroupMembersResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListGroupMembersResponse proto.InternalMessageInfo
+
+func (m *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+func (m *ListGroupMembersResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+func (m *ListGroupMembersResponse) GetTotalSize() int32 {
+ if m != nil {
+ return m.TotalSize
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*ListGroupsRequest)(nil), "google.monitoring.v3.ListGroupsRequest")
+ proto.RegisterType((*ListGroupsResponse)(nil), "google.monitoring.v3.ListGroupsResponse")
+ proto.RegisterType((*GetGroupRequest)(nil), "google.monitoring.v3.GetGroupRequest")
+ proto.RegisterType((*CreateGroupRequest)(nil), "google.monitoring.v3.CreateGroupRequest")
+ proto.RegisterType((*UpdateGroupRequest)(nil), "google.monitoring.v3.UpdateGroupRequest")
+ proto.RegisterType((*DeleteGroupRequest)(nil), "google.monitoring.v3.DeleteGroupRequest")
+ proto.RegisterType((*ListGroupMembersRequest)(nil), "google.monitoring.v3.ListGroupMembersRequest")
+ proto.RegisterType((*ListGroupMembersResponse)(nil), "google.monitoring.v3.ListGroupMembersResponse")
+}
+
+func init() {
+ proto.RegisterFile("google/monitoring/v3/group_service.proto", fileDescriptor_21ad21d0ed55c55a)
+}
+
+var fileDescriptor_21ad21d0ed55c55a = []byte{
+ // 826 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xd3, 0x4c,
+ 0x10, 0x7e, 0xdd, 0xa4, 0x69, 0xb2, 0x69, 0xd5, 0x76, 0x55, 0xf5, 0x8d, 0xdc, 0x0f, 0x05, 0xf7,
+ 0x83, 0xa8, 0x50, 0x5b, 0x24, 0x07, 0x24, 0x10, 0x3d, 0xb4, 0xa0, 0x82, 0x44, 0xd5, 0xca, 0x2d,
+ 0x3d, 0xa0, 0x4a, 0x91, 0x9b, 0x4c, 0x8c, 0xc1, 0xde, 0x35, 0xf6, 0x26, 0xd0, 0xa2, 0x4a, 0x80,
+ 0xc4, 0x81, 0x33, 0x37, 0x6e, 0x1c, 0xe1, 0x2f, 0x70, 0xe2, 0xca, 0x95, 0xbf, 0xc0, 0xff, 0x00,
+ 0x79, 0xbd, 0x9b, 0x38, 0x9f, 0xed, 0x85, 0x5b, 0xb2, 0xf3, 0x8c, 0x9f, 0x67, 0x66, 0x9f, 0x99,
+ 0x45, 0x25, 0x9b, 0x52, 0xdb, 0x05, 0xc3, 0xa3, 0xc4, 0x61, 0x34, 0x70, 0x88, 0x6d, 0xb4, 0x2a,
+ 0x86, 0x1d, 0xd0, 0xa6, 0x5f, 0x0d, 0x21, 0x68, 0x39, 0x35, 0xd0, 0xfd, 0x80, 0x32, 0x8a, 0xe7,
+ 0x62, 0xa4, 0xde, 0x41, 0xea, 0xad, 0x8a, 0xba, 0x28, 0xf2, 0x2d, 0xdf, 0x31, 0x2c, 0x42, 0x28,
+ 0xb3, 0x98, 0x43, 0x49, 0x18, 0xe7, 0xa8, 0x2b, 0x89, 0xa8, 0xc8, 0x83, 0x7a, 0x35, 0x80, 0x90,
+ 0x36, 0x03, 0xf9, 0x61, 0xf5, 0xda, 0x40, 0x09, 0x35, 0xea, 0x79, 0x94, 0x08, 0x48, 0x71, 0xb8,
+ 0x4a, 0x81, 0x58, 0x10, 0x08, 0xfe, 0xef, 0xb4, 0xd9, 0x30, 0xc0, 0xf3, 0xd9, 0x59, 0x1c, 0xd4,
+ 0xfe, 0x28, 0x68, 0xf6, 0xb1, 0x13, 0xb2, 0xdd, 0x28, 0x21, 0x34, 0xe1, 0x65, 0x13, 0x42, 0x86,
+ 0x31, 0x4a, 0x13, 0xcb, 0x83, 0xc2, 0x44, 0x51, 0x29, 0xe5, 0x4c, 0xfe, 0x1b, 0xdf, 0x44, 0xb3,
+ 0xb5, 0x67, 0x8e, 0x5b, 0x0f, 0x80, 0x54, 0x69, 0xa3, 0xca, 0x19, 0x0a, 0x63, 0x11, 0xe0, 0xe1,
+ 0x7f, 0xe6, 0xb4, 0x0c, 0xed, 0x37, 0xf8, 0x97, 0xb0, 0x8e, 0xb0, 0x45, 0x6a, 0x10, 0x32, 0x1a,
+ 0x84, 0x1d, 0x78, 0x4a, 0xc0, 0x67, 0xda, 0x31, 0x89, 0x2f, 0xa3, 0xb9, 0x3a, 0x84, 0x35, 0x20,
+ 0x75, 0x8b, 0xb0, 0x44, 0x46, 0x5a, 0x64, 0xe0, 0x44, 0x54, 0xe6, 0x2c, 0xa0, 0x9c, 0x6f, 0xd9,
+ 0x50, 0x0d, 0x9d, 0x73, 0x28, 0x8c, 0x17, 0x95, 0xd2, 0xb8, 0x99, 0x8d, 0x0e, 0x0e, 0x9d, 0x73,
+ 0xc0, 0x4b, 0x08, 0xf1, 0x20, 0xa3, 0x2f, 0x80, 0x14, 0x32, 0xbc, 0x10, 0x0e, 0x3f, 0x8a, 0x0e,
+ 0xb6, 0xb3, 0x28, 0xd3, 0x70, 0x5c, 0x06, 0x81, 0x46, 0x11, 0x4e, 0x36, 0x20, 0xf4, 0x29, 0x09,
+ 0x01, 0xdf, 0x42, 0xe3, 0xb1, 0x00, 0xa5, 0x98, 0x2a, 0xe5, 0xcb, 0x0b, 0xfa, 0xa0, 0x2b, 0xd6,
+ 0x79, 0x92, 0x19, 0x23, 0xf1, 0x3a, 0x9a, 0x26, 0xf0, 0x9a, 0x55, 0x13, 0xb4, 0xbc, 0x3d, 0xe6,
+ 0x54, 0x74, 0x7c, 0x20, 0xa9, 0xb5, 0x35, 0x34, 0xbd, 0x0b, 0x31, 0x5f, 0x6f, 0xbf, 0x53, 0x9d,
+ 0x7e, 0x6b, 0x6f, 0x15, 0x84, 0x77, 0x02, 0xb0, 0x18, 0x0c, 0x84, 0xa6, 0x13, 0x57, 0xd3, 0x16,
+ 0x1b, 0xf1, 0x5d, 0x4d, 0xec, 0x0a, 0x9a, 0x6a, 0x59, 0xae, 0x53, 0xb7, 0x18, 0x54, 0x29, 0x71,
+ 0xcf, 0x38, 0x75, 0xd6, 0x9c, 0x94, 0x87, 0xfb, 0xc4, 0x3d, 0xd3, 0x5c, 0x84, 0x9f, 0xf8, 0xf5,
+ 0x5e, 0x05, 0xff, 0x8a, 0xad, 0x84, 0xf0, 0x7d, 0x70, 0x61, 0x48, 0xbd, 0xc9, 0xd6, 0xfc, 0x50,
+ 0xd0, 0xff, 0xed, 0x3b, 0xdb, 0x03, 0xef, 0x14, 0x82, 0x91, 0xd6, 0xed, 0x32, 0x4a, 0x6a, 0xa4,
+ 0x51, 0xd2, 0x3d, 0x46, 0xc1, 0xf3, 0xd2, 0x28, 0xdc, 0x61, 0x39, 0x53, 0xfc, 0xc3, 0x5b, 0x28,
+ 0xeb, 0x10, 0x06, 0x41, 0xcb, 0x72, 0xb9, 0xbb, 0xf2, 0x65, 0x6d, 0x70, 0x23, 0x8e, 0x1c, 0x0f,
+ 0x1e, 0x09, 0xa4, 0xd9, 0xce, 0xd1, 0x3e, 0x2b, 0xa8, 0xd0, 0x5f, 0x83, 0x70, 0xdf, 0x6d, 0x34,
+ 0xe1, 0xc5, 0x47, 0xc2, 0x7f, 0x4b, 0xf2, 0xdb, 0x96, 0xef, 0xe8, 0x7b, 0x72, 0x5d, 0x98, 0x62,
+ 0x5b, 0x98, 0x12, 0x7d, 0x55, 0x0f, 0x46, 0x45, 0x33, 0xca, 0x2c, 0x37, 0xd9, 0x92, 0x1c, 0x3f,
+ 0x89, 0x7a, 0x52, 0xfe, 0x9e, 0x41, 0x93, 0x5c, 0xd8, 0x61, 0xbc, 0xe7, 0xf0, 0x07, 0x05, 0xa1,
+ 0xce, 0x94, 0xe0, 0xeb, 0x83, 0x4b, 0xed, 0x5b, 0x24, 0x6a, 0xe9, 0x72, 0x60, 0x5c, 0xb2, 0xb6,
+ 0xfa, 0xfe, 0xd7, 0xef, 0x4f, 0x63, 0xcb, 0x78, 0x31, 0x5a, 0x5f, 0x6f, 0xa2, 0x6b, 0xbb, 0xe7,
+ 0x07, 0xf4, 0x39, 0xd4, 0x58, 0x68, 0x6c, 0x5c, 0xc4, 0x0b, 0x2d, 0xc4, 0x2d, 0x94, 0x95, 0xb3,
+ 0x83, 0xd7, 0x86, 0x18, 0xaf, 0x7b, 0xb6, 0xd4, 0x51, 0xfe, 0xd4, 0xd6, 0x39, 0x6b, 0x11, 0x2f,
+ 0x0f, 0x62, 0x15, 0xa4, 0xc6, 0xc6, 0x05, 0x7e, 0xa7, 0xa0, 0x7c, 0x62, 0x18, 0xf1, 0x90, 0xba,
+ 0xfa, 0xe7, 0x75, 0x34, 0xfd, 0x0d, 0x4e, 0xbf, 0xa6, 0x8d, 0x2c, 0xfa, 0x8e, 0x18, 0xa2, 0x8f,
+ 0x0a, 0xca, 0x27, 0xc6, 0x71, 0x98, 0x86, 0xfe, 0x89, 0x1d, 0xad, 0xa1, 0xc2, 0x35, 0x6c, 0xaa,
+ 0xab, 0x5c, 0x43, 0xfc, 0x70, 0x0c, 0x6d, 0x84, 0xd4, 0xf2, 0x0a, 0xe5, 0x13, 0xb3, 0x3a, 0x4c,
+ 0x4a, 0xff, 0x38, 0xab, 0xf3, 0x12, 0x29, 0x5f, 0x23, 0xfd, 0x41, 0xf4, 0x1a, 0xc9, 0x8b, 0xd8,
+ 0xb8, 0xec, 0x22, 0xbe, 0x28, 0x68, 0xa6, 0x77, 0x6c, 0xf0, 0xe6, 0x25, 0x2e, 0xeb, 0x5e, 0x11,
+ 0xaa, 0x7e, 0x55, 0xb8, 0xb0, 0xa6, 0xce, 0xb5, 0x95, 0xf0, 0xfa, 0x68, 0x6d, 0x86, 0x18, 0xc2,
+ 0xed, 0xaf, 0x0a, 0x2a, 0xd4, 0xa8, 0x37, 0x90, 0x65, 0x7b, 0x36, 0x39, 0x57, 0x07, 0x51, 0x13,
+ 0x0e, 0x94, 0xa7, 0x5b, 0x02, 0x6a, 0x53, 0xd7, 0x22, 0xb6, 0x4e, 0x03, 0xdb, 0xb0, 0x81, 0xf0,
+ 0x16, 0x19, 0x71, 0xc8, 0xf2, 0x9d, 0xb0, 0xfb, 0x8d, 0xbf, 0xdb, 0xf9, 0xf7, 0x6d, 0x4c, 0xdd,
+ 0x8d, 0x3f, 0xb0, 0xe3, 0xd2, 0x66, 0x5d, 0x2e, 0x88, 0x88, 0xf1, 0xb8, 0xf2, 0x53, 0x06, 0x4f,
+ 0x78, 0xf0, 0xa4, 0x13, 0x3c, 0x39, 0xae, 0x9c, 0x66, 0x38, 0x49, 0xe5, 0x6f, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x86, 0x94, 0xf2, 0xde, 0xed, 0x08, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// GroupServiceClient is the client API for GroupService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type GroupServiceClient interface {
+ // Lists the existing groups.
+ ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error)
+ // Gets a single group.
+ GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error)
+ // Creates a new group.
+ CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error)
+ // Updates an existing group.
+ // You can change any group attributes except `name`.
+ UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error)
+ // Deletes an existing group.
+ DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ // Lists the monitored resources that are members of a group.
+ ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error)
+}
+
+type groupServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewGroupServiceClient(cc *grpc.ClientConn) GroupServiceClient {
+ return &groupServiceClient{cc}
+}
+
+func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) {
+ out := new(ListGroupsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) {
+ out := new(Group)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) {
+ out := new(Group)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) {
+ out := new(Group)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) {
+ out := new(ListGroupMembersResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// GroupServiceServer is the server API for GroupService service.
+type GroupServiceServer interface {
+ // Lists the existing groups.
+ ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error)
+ // Gets a single group.
+ GetGroup(context.Context, *GetGroupRequest) (*Group, error)
+ // Creates a new group.
+ CreateGroup(context.Context, *CreateGroupRequest) (*Group, error)
+ // Updates an existing group.
+ // You can change any group attributes except `name`.
+ UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error)
+ // Deletes an existing group.
+ DeleteGroup(context.Context, *DeleteGroupRequest) (*empty.Empty, error)
+ // Lists the monitored resources that are members of a group.
+ ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error)
+}
+
+func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) {
+ s.RegisterService(&_GroupService_serviceDesc, srv)
+}
+
+func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListGroupsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).ListGroups(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/ListGroups",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).GetGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/GetGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).CreateGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/CreateGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).UpdateGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).DeleteGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListGroupMembersRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).ListGroupMembers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _GroupService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.GroupService",
+ HandlerType: (*GroupServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListGroups",
+ Handler: _GroupService_ListGroups_Handler,
+ },
+ {
+ MethodName: "GetGroup",
+ Handler: _GroupService_GetGroup_Handler,
+ },
+ {
+ MethodName: "CreateGroup",
+ Handler: _GroupService_CreateGroup_Handler,
+ },
+ {
+ MethodName: "UpdateGroup",
+ Handler: _GroupService_UpdateGroup_Handler,
+ },
+ {
+ MethodName: "DeleteGroup",
+ Handler: _GroupService_DeleteGroup_Handler,
+ },
+ {
+ MethodName: "ListGroupMembers",
+ Handler: _GroupService_ListGroupMembers_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/group_service.proto",
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go
new file mode 100644
index 00000000000..e4929901df9
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric.pb.go
@@ -0,0 +1,234 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/metric.proto
+
+package monitoring
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ _ "google.golang.org/genproto/googleapis/api/label"
+ metric "google.golang.org/genproto/googleapis/api/metric"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A single data point in a time series.
+type Point struct {
+ // The time interval to which the data point applies. For `GAUGE` metrics,
+ // only the end time of the interval is used. For `DELTA` metrics, the start
+ // and end time should specify a non-zero interval, with subsequent points
+ // specifying contiguous and non-overlapping intervals. For `CUMULATIVE`
+ // metrics, the start and end time should specify a non-zero interval, with
+ // subsequent points specifying the same start time and increasing end times,
+ // until an event resets the cumulative value to zero and sets a new start
+ // time for the following points.
+ Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"`
+ // The value of the data point.
+ Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Point) Reset() { *m = Point{} }
+func (m *Point) String() string { return proto.CompactTextString(m) }
+func (*Point) ProtoMessage() {}
+func (*Point) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c76199a3d2c4c21e, []int{0}
+}
+
+func (m *Point) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Point.Unmarshal(m, b)
+}
+func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Point.Marshal(b, m, deterministic)
+}
+func (m *Point) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Point.Merge(m, src)
+}
+func (m *Point) XXX_Size() int {
+ return xxx_messageInfo_Point.Size(m)
+}
+func (m *Point) XXX_DiscardUnknown() {
+ xxx_messageInfo_Point.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Point proto.InternalMessageInfo
+
+func (m *Point) GetInterval() *TimeInterval {
+ if m != nil {
+ return m.Interval
+ }
+ return nil
+}
+
+func (m *Point) GetValue() *TypedValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+// A collection of data points that describes the time-varying values
+// of a metric. A time series is identified by a combination of a
+// fully-specified monitored resource and a fully-specified metric.
+// This type is used for both listing and creating time series.
+type TimeSeries struct {
+ // The associated metric. A fully-specified metric used to identify the time
+ // series.
+ Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"`
+ // The associated monitored resource. Custom metrics can use only certain
+ // monitored resource types in their time series data.
+ Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Output only. The associated monitored resource metadata. When reading a
+ // a timeseries, this field will include metadata labels that are explicitly
+ // named in the reduction. When creating a timeseries, this field is ignored.
+ Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // The metric kind of the time series. When listing time series, this metric
+ // kind might be different from the metric kind of the associated metric if
+ // this time series is an alignment or reduction of other time series.
+ //
+ // When creating a time series, this field is optional. If present, it must be
+ // the same as the metric kind of the associated metric. If the associated
+ // metric's descriptor must be auto-created, then this field specifies the
+ // metric kind of the new descriptor and must be either `GAUGE` (the default)
+ // or `CUMULATIVE`.
+ MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"`
+ // The value type of the time series. When listing time series, this value
+ // type might be different from the value type of the associated metric if
+ // this time series is an alignment or reduction of other time series.
+ //
+ // When creating a time series, this field is optional. If present, it must be
+ // the same as the type of the data in the `points` field.
+ ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"`
+ // The data points of this time series. When listing time series, points are
+ // returned in reverse time order.
+ //
+ // When creating a time series, this field must contain exactly one point and
+ // the point's type must be the same as the value type of the associated
+ // metric. If the associated metric's descriptor must be auto-created, then
+ // the value type of the descriptor is determined by the point's type, which
+ // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`.
+ Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TimeSeries) Reset() { *m = TimeSeries{} }
+func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
+func (*TimeSeries) ProtoMessage() {}
+func (*TimeSeries) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c76199a3d2c4c21e, []int{1}
+}
+
+func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TimeSeries.Unmarshal(m, b)
+}
+func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
+}
+func (m *TimeSeries) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeries.Merge(m, src)
+}
+func (m *TimeSeries) XXX_Size() int {
+ return xxx_messageInfo_TimeSeries.Size(m)
+}
+func (m *TimeSeries) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeSeries.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
+
+func (m *TimeSeries) GetMetric() *metric.Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetResource() *monitoredres.MonitoredResource {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata {
+ if m != nil {
+ return m.Metadata
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind {
+ if m != nil {
+ return m.MetricKind
+ }
+ return metric.MetricDescriptor_METRIC_KIND_UNSPECIFIED
+}
+
+func (m *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType {
+ if m != nil {
+ return m.ValueType
+ }
+ return metric.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
+}
+
+func (m *TimeSeries) GetPoints() []*Point {
+ if m != nil {
+ return m.Points
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Point)(nil), "google.monitoring.v3.Point")
+ proto.RegisterType((*TimeSeries)(nil), "google.monitoring.v3.TimeSeries")
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/metric.proto", fileDescriptor_c76199a3d2c4c21e) }
+
+var fileDescriptor_c76199a3d2c4c21e = []byte{
+ // 441 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x51, 0xab, 0xd3, 0x30,
+ 0x14, 0xc7, 0xe9, 0xae, 0x9b, 0x33, 0x03, 0x1f, 0x82, 0x68, 0x99, 0x0a, 0x73, 0xa2, 0x0e, 0x1f,
+ 0x5a, 0x58, 0x41, 0x10, 0xe1, 0x82, 0x57, 0x45, 0x45, 0x2e, 0x8c, 0x28, 0x7b, 0x90, 0xc1, 0xc8,
+ 0x6d, 0x0f, 0x25, 0xd8, 0xe4, 0x84, 0x34, 0x2b, 0xdc, 0x27, 0x3f, 0x8c, 0x6f, 0x7e, 0x14, 0x3f,
+ 0x93, 0x0f, 0xd2, 0x24, 0xdd, 0x76, 0xb1, 0xf7, 0xbe, 0xb5, 0xf9, 0xff, 0xfe, 0xe7, 0x7f, 0x72,
+ 0x72, 0xc8, 0x93, 0x12, 0xb1, 0xac, 0x20, 0x95, 0xa8, 0x84, 0x45, 0x23, 0x54, 0x99, 0x36, 0x59,
+ 0x2a, 0xc1, 0x1a, 0x91, 0x27, 0xda, 0xa0, 0x45, 0x7a, 0xcf, 0x23, 0xc9, 0x01, 0x49, 0x9a, 0x6c,
+ 0xfa, 0x28, 0x18, 0xb9, 0x16, 0x29, 0x57, 0x0a, 0x2d, 0xb7, 0x02, 0x55, 0xed, 0x3d, 0xd3, 0xfb,
+ 0x47, 0x6a, 0xc5, 0x2f, 0xa0, 0x0a, 0xe7, 0x0f, 0x8e, 0xce, 0x8f, 0x43, 0xa6, 0x4f, 0x8f, 0x05,
+ 0x1f, 0x04, 0xc5, 0xd6, 0x40, 0x8d, 0x3b, 0x93, 0x43, 0x80, 0xfa, 0x9b, 0xcd, 0x51, 0x4a, 0x54,
+ 0x1e, 0x99, 0xff, 0x24, 0xc3, 0x15, 0x0a, 0x65, 0xe9, 0x29, 0x19, 0x0b, 0x65, 0xc1, 0x34, 0xbc,
+ 0x8a, 0xa3, 0x59, 0xb4, 0x98, 0x2c, 0xe7, 0x49, 0xdf, 0x45, 0x92, 0x6f, 0x42, 0xc2, 0xe7, 0x40,
+ 0xb2, 0xbd, 0x87, 0xbe, 0x22, 0xc3, 0x86, 0x57, 0x3b, 0x88, 0x07, 0xce, 0x3c, 0xbb, 0xc6, 0x7c,
+ 0xa9, 0xa1, 0x58, 0xb7, 0x1c, 0xf3, 0xf8, 0xfc, 0xef, 0x80, 0x90, 0xb6, 0xe4, 0x57, 0x30, 0x02,
+ 0x6a, 0xfa, 0x92, 0x8c, 0xfc, 0x3d, 0x43, 0x13, 0xb4, 0xab, 0xc3, 0xb5, 0x48, 0xce, 0x9d, 0xc2,
+ 0x02, 0x41, 0x5f, 0x93, 0x71, 0x77, 0xe1, 0x90, 0xfa, 0xf8, 0x0a, 0xdd, 0x8d, 0x85, 0x05, 0x88,
+ 0xed, 0x71, 0xfa, 0x96, 0x8c, 0x25, 0x58, 0x5e, 0x70, 0xcb, 0xe3, 0xdb, 0xce, 0xfa, 0xec, 0x46,
+ 0xeb, 0x79, 0x80, 0xd9, 0xde, 0x46, 0x3f, 0x91, 0x89, 0xef, 0x63, 0xfb, 0x43, 0xa8, 0x22, 0x3e,
+ 0x99, 0x45, 0x8b, 0xbb, 0xcb, 0x17, 0xff, 0xb7, 0xfb, 0x1e, 0xea, 0xdc, 0x08, 0x6d, 0xd1, 0x84,
+ 0x83, 0x2f, 0x42, 0x15, 0x8c, 0xc8, 0xfd, 0x37, 0xfd, 0x40, 0x88, 0x9b, 0xc5, 0xd6, 0x5e, 0x6a,
+ 0x88, 0x6f, 0xb9, 0x42, 0xcf, 0x6f, 0x2c, 0xe4, 0x26, 0xd8, 0xce, 0x92, 0xdd, 0x69, 0xba, 0x4f,
+ 0x9a, 0x91, 0x91, 0x6e, 0x9f, 0xb2, 0x8e, 0x87, 0xb3, 0x93, 0xc5, 0x64, 0xf9, 0xb0, 0xff, 0x09,
+ 0xdc, 0x73, 0xb3, 0x80, 0x9e, 0xfd, 0x8a, 0x48, 0x9c, 0xa3, 0xec, 0x45, 0xcf, 0x26, 0x3e, 0x78,
+ 0xd5, 0x6e, 0xca, 0x2a, 0xfa, 0x7e, 0x1a, 0xa0, 0x12, 0x2b, 0xae, 0xca, 0x04, 0x4d, 0x99, 0x96,
+ 0xa0, 0xdc, 0x1e, 0xa5, 0x5e, 0xe2, 0x5a, 0xd4, 0x57, 0xb7, 0xed, 0xcd, 0xe1, 0xef, 0xf7, 0x60,
+ 0xfa, 0xd1, 0x17, 0x78, 0x57, 0xe1, 0xae, 0xe8, 0x86, 0xdc, 0x66, 0xad, 0xb3, 0x3f, 0x9d, 0xb8,
+ 0x71, 0xe2, 0xe6, 0x20, 0x6e, 0xd6, 0xd9, 0xc5, 0xc8, 0x85, 0x64, 0xff, 0x02, 0x00, 0x00, 0xff,
+ 0xff, 0x5a, 0x88, 0xc9, 0x0b, 0x7e, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go
new file mode 100644
index 00000000000..6fdd5daa1cd
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/metric_service.pb.go
@@ -0,0 +1,1222 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/metric_service.proto
+
+package monitoring
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ _ "github.com/golang/protobuf/ptypes/duration"
+ empty "github.com/golang/protobuf/ptypes/empty"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ metric "google.golang.org/genproto/googleapis/api/metric"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Controls which fields are returned by `ListTimeSeries`.
+type ListTimeSeriesRequest_TimeSeriesView int32
+
+const (
+ // Returns the identity of the metric(s), the time series,
+ // and the time series data.
+ ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0
+ // Returns the identity of the metric and the time series resource,
+ // but not the time series data.
+ ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1
+)
+
+var ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{
+ 0: "FULL",
+ 1: "HEADERS",
+}
+
+var ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{
+ "FULL": 0,
+ "HEADERS": 1,
+}
+
+func (x ListTimeSeriesRequest_TimeSeriesView) String() string {
+ return proto.EnumName(ListTimeSeriesRequest_TimeSeriesView_name, int32(x))
+}
+
+func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{8, 0}
+}
+
+// The `ListMonitoredResourceDescriptors` request.
+type ListMonitoredResourceDescriptorsRequest struct {
+ // The project on which to execute the request. The format is
+ // `"projects/{project_id_or_number}"`.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional [filter](/monitoring/api/v3/filters) describing
+ // the descriptors to be returned. The filter can reference
+ // the descriptor's type and labels. For example, the
+ // following filter returns only Google Compute Engine descriptors
+ // that have an `id` label:
+ //
+ // resource.type = starts_with("gce_") AND resource.label:id
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListMonitoredResourceDescriptorsRequest) Reset() {
+ *m = ListMonitoredResourceDescriptorsRequest{}
+}
+func (m *ListMonitoredResourceDescriptorsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {}
+func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{0}
+}
+
+func (m *ListMonitoredResourceDescriptorsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Unmarshal(m, b)
+}
+func (m *ListMonitoredResourceDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListMonitoredResourceDescriptorsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Merge(m, src)
+}
+func (m *ListMonitoredResourceDescriptorsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.Size(m)
+}
+func (m *ListMonitoredResourceDescriptorsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListMonitoredResourceDescriptorsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListMonitoredResourceDescriptorsRequest proto.InternalMessageInfo
+
+func (m *ListMonitoredResourceDescriptorsRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListMonitoredResourceDescriptorsRequest) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListMonitoredResourceDescriptorsRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The `ListMonitoredResourceDescriptors` response.
+type ListMonitoredResourceDescriptorsResponse struct {
+ // The monitored resource descriptors that are available to this project
+ // and that match `filter`, if present.
+ ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListMonitoredResourceDescriptorsResponse) Reset() {
+ *m = ListMonitoredResourceDescriptorsResponse{}
+}
+func (m *ListMonitoredResourceDescriptorsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {}
+func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{1}
+}
+
+func (m *ListMonitoredResourceDescriptorsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Unmarshal(m, b)
+}
+func (m *ListMonitoredResourceDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListMonitoredResourceDescriptorsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Merge(m, src)
+}
+func (m *ListMonitoredResourceDescriptorsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.Size(m)
+}
+func (m *ListMonitoredResourceDescriptorsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListMonitoredResourceDescriptorsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListMonitoredResourceDescriptorsResponse proto.InternalMessageInfo
+
+func (m *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor {
+ if m != nil {
+ return m.ResourceDescriptors
+ }
+ return nil
+}
+
+func (m *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+// The `GetMonitoredResourceDescriptor` request.
+type GetMonitoredResourceDescriptorRequest struct {
+ // The monitored resource descriptor to get. The format is
+ // `"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}"`.
+ // The `{resource_type}` is a predefined type, such as
+ // `cloudsql_database`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetMonitoredResourceDescriptorRequest) Reset() { *m = GetMonitoredResourceDescriptorRequest{} }
+func (m *GetMonitoredResourceDescriptorRequest) String() string { return proto.CompactTextString(m) }
+func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {}
+func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{2}
+}
+
+func (m *GetMonitoredResourceDescriptorRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Unmarshal(m, b)
+}
+func (m *GetMonitoredResourceDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Marshal(b, m, deterministic)
+}
+func (m *GetMonitoredResourceDescriptorRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Merge(m, src)
+}
+func (m *GetMonitoredResourceDescriptorRequest) XXX_Size() int {
+ return xxx_messageInfo_GetMonitoredResourceDescriptorRequest.Size(m)
+}
+func (m *GetMonitoredResourceDescriptorRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetMonitoredResourceDescriptorRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetMonitoredResourceDescriptorRequest proto.InternalMessageInfo
+
+func (m *GetMonitoredResourceDescriptorRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `ListMetricDescriptors` request.
+type ListMetricDescriptorsRequest struct {
+ // The project on which to execute the request. The format is
+ // `"projects/{project_id_or_number}"`.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // If this field is empty, all custom and
+ // system-defined metric descriptors are returned.
+ // Otherwise, the [filter](/monitoring/api/v3/filters)
+ // specifies which metric descriptors are to be
+ // returned. For example, the following filter matches all
+ // [custom metrics](/monitoring/custom-metrics):
+ //
+ // metric.type = starts_with("custom.googleapis.com/")
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListMetricDescriptorsRequest) Reset() { *m = ListMetricDescriptorsRequest{} }
+func (m *ListMetricDescriptorsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListMetricDescriptorsRequest) ProtoMessage() {}
+func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{3}
+}
+
+func (m *ListMetricDescriptorsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListMetricDescriptorsRequest.Unmarshal(m, b)
+}
+func (m *ListMetricDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListMetricDescriptorsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListMetricDescriptorsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListMetricDescriptorsRequest.Merge(m, src)
+}
+func (m *ListMetricDescriptorsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListMetricDescriptorsRequest.Size(m)
+}
+func (m *ListMetricDescriptorsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListMetricDescriptorsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListMetricDescriptorsRequest proto.InternalMessageInfo
+
+func (m *ListMetricDescriptorsRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListMetricDescriptorsRequest) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *ListMetricDescriptorsRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListMetricDescriptorsRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The `ListMetricDescriptors` response.
+type ListMetricDescriptorsResponse struct {
+ // The metric descriptors that are available to the project
+ // and that match the value of `filter`, if present.
+ MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListMetricDescriptorsResponse) Reset() { *m = ListMetricDescriptorsResponse{} }
+func (m *ListMetricDescriptorsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListMetricDescriptorsResponse) ProtoMessage() {}
+func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{4}
+}
+
+func (m *ListMetricDescriptorsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListMetricDescriptorsResponse.Unmarshal(m, b)
+}
+func (m *ListMetricDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListMetricDescriptorsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListMetricDescriptorsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListMetricDescriptorsResponse.Merge(m, src)
+}
+func (m *ListMetricDescriptorsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListMetricDescriptorsResponse.Size(m)
+}
+func (m *ListMetricDescriptorsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListMetricDescriptorsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListMetricDescriptorsResponse proto.InternalMessageInfo
+
+func (m *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor {
+ if m != nil {
+ return m.MetricDescriptors
+ }
+ return nil
+}
+
+func (m *ListMetricDescriptorsResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+// The `GetMetricDescriptor` request.
+type GetMetricDescriptorRequest struct {
+ // The metric descriptor on which to execute the request. The format is
+ // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`.
+ // An example value of `{metric_id}` is
+ // `"compute.googleapis.com/instance/disk/read_bytes_count"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetMetricDescriptorRequest) Reset() { *m = GetMetricDescriptorRequest{} }
+func (m *GetMetricDescriptorRequest) String() string { return proto.CompactTextString(m) }
+func (*GetMetricDescriptorRequest) ProtoMessage() {}
+func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{5}
+}
+
+func (m *GetMetricDescriptorRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetMetricDescriptorRequest.Unmarshal(m, b)
+}
+func (m *GetMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetMetricDescriptorRequest.Marshal(b, m, deterministic)
+}
+func (m *GetMetricDescriptorRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetMetricDescriptorRequest.Merge(m, src)
+}
+func (m *GetMetricDescriptorRequest) XXX_Size() int {
+ return xxx_messageInfo_GetMetricDescriptorRequest.Size(m)
+}
+func (m *GetMetricDescriptorRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetMetricDescriptorRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetMetricDescriptorRequest proto.InternalMessageInfo
+
+func (m *GetMetricDescriptorRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `CreateMetricDescriptor` request.
+type CreateMetricDescriptorRequest struct {
+ // The project on which to execute the request. The format is
+ // `"projects/{project_id_or_number}"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // The new [custom metric](/monitoring/custom-metrics)
+ // descriptor.
+ MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateMetricDescriptorRequest) Reset() { *m = CreateMetricDescriptorRequest{} }
+func (m *CreateMetricDescriptorRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateMetricDescriptorRequest) ProtoMessage() {}
+func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{6}
+}
+
+func (m *CreateMetricDescriptorRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateMetricDescriptorRequest.Unmarshal(m, b)
+}
+func (m *CreateMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateMetricDescriptorRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateMetricDescriptorRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateMetricDescriptorRequest.Merge(m, src)
+}
+func (m *CreateMetricDescriptorRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateMetricDescriptorRequest.Size(m)
+}
+func (m *CreateMetricDescriptorRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateMetricDescriptorRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateMetricDescriptorRequest proto.InternalMessageInfo
+
+func (m *CreateMetricDescriptorRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor {
+ if m != nil {
+ return m.MetricDescriptor
+ }
+ return nil
+}
+
+// The `DeleteMetricDescriptor` request.
+type DeleteMetricDescriptorRequest struct {
+ // The metric descriptor on which to execute the request. The format is
+ // `"projects/{project_id_or_number}/metricDescriptors/{metric_id}"`.
+ // An example of `{metric_id}` is:
+ // `"custom.googleapis.com/my_test_metric"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteMetricDescriptorRequest) Reset() { *m = DeleteMetricDescriptorRequest{} }
+func (m *DeleteMetricDescriptorRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteMetricDescriptorRequest) ProtoMessage() {}
+func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{7}
+}
+
+func (m *DeleteMetricDescriptorRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteMetricDescriptorRequest.Unmarshal(m, b)
+}
+func (m *DeleteMetricDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteMetricDescriptorRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteMetricDescriptorRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteMetricDescriptorRequest.Merge(m, src)
+}
+func (m *DeleteMetricDescriptorRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteMetricDescriptorRequest.Size(m)
+}
+func (m *DeleteMetricDescriptorRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteMetricDescriptorRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteMetricDescriptorRequest proto.InternalMessageInfo
+
+func (m *DeleteMetricDescriptorRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `ListTimeSeries` request.
+type ListTimeSeriesRequest struct {
+ // The project on which to execute the request. The format is
+ // "projects/{project_id_or_number}".
+ Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"`
+ // A [monitoring filter](/monitoring/api/v3/filters) that specifies which time
+ // series should be returned. The filter must specify a single metric type,
+ // and can additionally specify metric labels and other information. For
+ // example:
+ //
+ // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND
+ // metric.label.instance_name = "my-instance-name"
+ //
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // The time interval for which results should be returned. Only time series
+ // that contain data points in the specified interval are included
+ // in the response.
+ Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"`
+ // By default, the raw time series data is returned.
+ // Use this field to combine multiple time series for different
+ // views of the data.
+ Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"`
+ // Unsupported: must be left blank. The points in each time series are
+ // returned in reverse time order.
+ OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+ // Specifies which information is returned about the time series.
+ View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"`
+ // A positive number that is the maximum number of results to return. If
+ // `page_size` is empty or more than 100,000 results, the effective
+ // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the
+ // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is
+ // the maximum number of `TimeSeries` returned.
+ PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListTimeSeriesRequest) Reset() { *m = ListTimeSeriesRequest{} }
+func (m *ListTimeSeriesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListTimeSeriesRequest) ProtoMessage() {}
+func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{8}
+}
+
+func (m *ListTimeSeriesRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListTimeSeriesRequest.Unmarshal(m, b)
+}
+func (m *ListTimeSeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListTimeSeriesRequest.Marshal(b, m, deterministic)
+}
+func (m *ListTimeSeriesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListTimeSeriesRequest.Merge(m, src)
+}
+func (m *ListTimeSeriesRequest) XXX_Size() int {
+ return xxx_messageInfo_ListTimeSeriesRequest.Size(m)
+}
+func (m *ListTimeSeriesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListTimeSeriesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListTimeSeriesRequest proto.InternalMessageInfo
+
+func (m *ListTimeSeriesRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListTimeSeriesRequest) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *ListTimeSeriesRequest) GetInterval() *TimeInterval {
+ if m != nil {
+ return m.Interval
+ }
+ return nil
+}
+
+func (m *ListTimeSeriesRequest) GetAggregation() *Aggregation {
+ if m != nil {
+ return m.Aggregation
+ }
+ return nil
+}
+
+func (m *ListTimeSeriesRequest) GetOrderBy() string {
+ if m != nil {
+ return m.OrderBy
+ }
+ return ""
+}
+
+func (m *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView {
+ if m != nil {
+ return m.View
+ }
+ return ListTimeSeriesRequest_FULL
+}
+
+func (m *ListTimeSeriesRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListTimeSeriesRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The `ListTimeSeries` response.
+type ListTimeSeriesResponse struct {
+ // One or more time series that match the filter included in the request.
+ TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `pageToken` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // Query execution errors that may have caused the time series data returned
+ // to be incomplete.
+ ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListTimeSeriesResponse) Reset() { *m = ListTimeSeriesResponse{} }
+func (m *ListTimeSeriesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListTimeSeriesResponse) ProtoMessage() {}
+func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{9}
+}
+
+func (m *ListTimeSeriesResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListTimeSeriesResponse.Unmarshal(m, b)
+}
+func (m *ListTimeSeriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListTimeSeriesResponse.Marshal(b, m, deterministic)
+}
+func (m *ListTimeSeriesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListTimeSeriesResponse.Merge(m, src)
+}
+func (m *ListTimeSeriesResponse) XXX_Size() int {
+ return xxx_messageInfo_ListTimeSeriesResponse.Size(m)
+}
+func (m *ListTimeSeriesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListTimeSeriesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListTimeSeriesResponse proto.InternalMessageInfo
+
+func (m *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries {
+ if m != nil {
+ return m.TimeSeries
+ }
+ return nil
+}
+
+func (m *ListTimeSeriesResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+func (m *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status {
+ if m != nil {
+ return m.ExecutionErrors
+ }
+ return nil
+}
+
+// The `CreateTimeSeries` request.
+type CreateTimeSeriesRequest struct {
+ // The project on which to execute the request. The format is
+ // `"projects/{project_id_or_number}"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // The new data to be added to a list of time series.
+ // Adds at most one data point to each of several time series. The new data
+ // point must be more recent than any other point in its time series. Each
+ // `TimeSeries` value must fully specify a unique time series by supplying
+ // all label values for the metric and the monitored resource.
+ TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateTimeSeriesRequest) Reset() { *m = CreateTimeSeriesRequest{} }
+func (m *CreateTimeSeriesRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateTimeSeriesRequest) ProtoMessage() {}
+func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{10}
+}
+
+func (m *CreateTimeSeriesRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateTimeSeriesRequest.Unmarshal(m, b)
+}
+func (m *CreateTimeSeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateTimeSeriesRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateTimeSeriesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateTimeSeriesRequest.Merge(m, src)
+}
+func (m *CreateTimeSeriesRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateTimeSeriesRequest.Size(m)
+}
+func (m *CreateTimeSeriesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateTimeSeriesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateTimeSeriesRequest proto.InternalMessageInfo
+
+func (m *CreateTimeSeriesRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries {
+ if m != nil {
+ return m.TimeSeries
+ }
+ return nil
+}
+
+// Describes the result of a failed request to write data to a time series.
+type CreateTimeSeriesError struct {
+ // The time series, including the `Metric`, `MonitoredResource`,
+ // and `Point`s (including timestamp and value) that resulted
+ // in the error. This field provides all of the context that
+ // would be needed to retry the operation.
+ TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+ // The status of the requested write operation.
+ Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateTimeSeriesError) Reset() { *m = CreateTimeSeriesError{} }
+func (m *CreateTimeSeriesError) String() string { return proto.CompactTextString(m) }
+func (*CreateTimeSeriesError) ProtoMessage() {}
+func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7b3d47b45a293957, []int{11}
+}
+
+func (m *CreateTimeSeriesError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateTimeSeriesError.Unmarshal(m, b)
+}
+func (m *CreateTimeSeriesError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateTimeSeriesError.Marshal(b, m, deterministic)
+}
+func (m *CreateTimeSeriesError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateTimeSeriesError.Merge(m, src)
+}
+func (m *CreateTimeSeriesError) XXX_Size() int {
+ return xxx_messageInfo_CreateTimeSeriesError.Size(m)
+}
+func (m *CreateTimeSeriesError) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateTimeSeriesError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateTimeSeriesError proto.InternalMessageInfo
+
+func (m *CreateTimeSeriesError) GetTimeSeries() *TimeSeries {
+ if m != nil {
+ return m.TimeSeries
+ }
+ return nil
+}
+
+func (m *CreateTimeSeriesError) GetStatus() *status.Status {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView", ListTimeSeriesRequest_TimeSeriesView_name, ListTimeSeriesRequest_TimeSeriesView_value)
+ proto.RegisterType((*ListMonitoredResourceDescriptorsRequest)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsRequest")
+ proto.RegisterType((*ListMonitoredResourceDescriptorsResponse)(nil), "google.monitoring.v3.ListMonitoredResourceDescriptorsResponse")
+ proto.RegisterType((*GetMonitoredResourceDescriptorRequest)(nil), "google.monitoring.v3.GetMonitoredResourceDescriptorRequest")
+ proto.RegisterType((*ListMetricDescriptorsRequest)(nil), "google.monitoring.v3.ListMetricDescriptorsRequest")
+ proto.RegisterType((*ListMetricDescriptorsResponse)(nil), "google.monitoring.v3.ListMetricDescriptorsResponse")
+ proto.RegisterType((*GetMetricDescriptorRequest)(nil), "google.monitoring.v3.GetMetricDescriptorRequest")
+ proto.RegisterType((*CreateMetricDescriptorRequest)(nil), "google.monitoring.v3.CreateMetricDescriptorRequest")
+ proto.RegisterType((*DeleteMetricDescriptorRequest)(nil), "google.monitoring.v3.DeleteMetricDescriptorRequest")
+ proto.RegisterType((*ListTimeSeriesRequest)(nil), "google.monitoring.v3.ListTimeSeriesRequest")
+ proto.RegisterType((*ListTimeSeriesResponse)(nil), "google.monitoring.v3.ListTimeSeriesResponse")
+ proto.RegisterType((*CreateTimeSeriesRequest)(nil), "google.monitoring.v3.CreateTimeSeriesRequest")
+ proto.RegisterType((*CreateTimeSeriesError)(nil), "google.monitoring.v3.CreateTimeSeriesError")
+}
+
+func init() {
+ proto.RegisterFile("google/monitoring/v3/metric_service.proto", fileDescriptor_7b3d47b45a293957)
+}
+
+var fileDescriptor_7b3d47b45a293957 = []byte{
+ // 1049 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4f, 0x6f, 0x1b, 0x45,
+ 0x14, 0x67, 0xe2, 0x34, 0x71, 0x9e, 0xd5, 0xd4, 0x9d, 0xb6, 0xae, 0xd9, 0x26, 0x95, 0xbb, 0xa8,
+ 0xc4, 0x75, 0xcb, 0x6e, 0x65, 0x57, 0x1c, 0x92, 0x26, 0x52, 0xfe, 0x51, 0x2a, 0x02, 0x8a, 0xd6,
+ 0x25, 0x87, 0x2a, 0x92, 0xb5, 0xb1, 0xa7, 0xab, 0x01, 0xef, 0xce, 0x32, 0x3b, 0x76, 0x9b, 0xa2,
+ 0x70, 0xe0, 0xd0, 0x3b, 0x02, 0x24, 0xf8, 0x0a, 0x39, 0x80, 0xf8, 0x0a, 0x88, 0x13, 0x57, 0xce,
+ 0xdc, 0xf8, 0x0a, 0xdc, 0xd1, 0xce, 0xee, 0xc6, 0xf6, 0x7a, 0x77, 0x6d, 0x73, 0xe9, 0xcd, 0x3b,
+ 0xef, 0xcd, 0x7b, 0xbf, 0xf7, 0x9b, 0x79, 0xef, 0x37, 0x86, 0x7b, 0x16, 0x63, 0x56, 0x97, 0xe8,
+ 0x36, 0x73, 0xa8, 0x60, 0x9c, 0x3a, 0x96, 0xde, 0x6f, 0xe8, 0x36, 0x11, 0x9c, 0xb6, 0x5b, 0x1e,
+ 0xe1, 0x7d, 0xda, 0x26, 0x9a, 0xcb, 0x99, 0x60, 0xf8, 0x7a, 0xe0, 0xaa, 0x0d, 0x5c, 0xb5, 0x7e,
+ 0x43, 0x59, 0x09, 0x03, 0x98, 0x2e, 0xd5, 0x4d, 0xc7, 0x61, 0xc2, 0x14, 0x94, 0x39, 0x5e, 0xb0,
+ 0x47, 0xb9, 0x39, 0x64, 0x0d, 0x82, 0x86, 0x86, 0xf7, 0x86, 0x0d, 0x41, 0x40, 0xd2, 0x69, 0x71,
+ 0xe2, 0xb1, 0x1e, 0x8f, 0x32, 0x2a, 0x77, 0x12, 0xc1, 0xb5, 0x99, 0x6d, 0x33, 0x27, 0xd3, 0x65,
+ 0x24, 0xd5, 0xed, 0xd0, 0x45, 0x7e, 0x9d, 0xf4, 0x5e, 0xe8, 0x9d, 0x1e, 0x97, 0x20, 0x43, 0xfb,
+ 0xad, 0xb8, 0x9d, 0xd8, 0xae, 0x38, 0x8d, 0x15, 0xc0, 0xdd, 0xb6, 0xee, 0x09, 0x53, 0xf4, 0xc2,
+ 0xca, 0xd4, 0xef, 0x10, 0xac, 0x1d, 0x50, 0x4f, 0x7c, 0x1a, 0x81, 0x37, 0x42, 0xec, 0x7b, 0xc4,
+ 0x6b, 0x73, 0xea, 0x0a, 0xc6, 0x3d, 0x83, 0x7c, 0xd5, 0x23, 0x9e, 0xc0, 0x18, 0xe6, 0x1d, 0xd3,
+ 0x26, 0xe5, 0x4b, 0x15, 0x54, 0x5d, 0x32, 0xe4, 0x6f, 0x5c, 0x82, 0x85, 0x17, 0xb4, 0x2b, 0x08,
+ 0x2f, 0xcf, 0xc9, 0xd5, 0xf0, 0x0b, 0xdf, 0x82, 0x25, 0xd7, 0xb4, 0x48, 0xcb, 0xa3, 0xaf, 0x49,
+ 0x39, 0x57, 0x41, 0xd5, 0x4b, 0x46, 0xde, 0x5f, 0x68, 0xd2, 0xd7, 0x04, 0xaf, 0x02, 0x48, 0xa3,
+ 0x60, 0x5f, 0x12, 0xa7, 0x3c, 0x2f, 0x37, 0x4a, 0xf7, 0x67, 0xfe, 0x82, 0xfa, 0x0b, 0x82, 0xea,
+ 0x64, 0x4c, 0x9e, 0xcb, 0x1c, 0x8f, 0xe0, 0xe7, 0x70, 0x3d, 0xa2, 0xbb, 0xd5, 0x19, 0xd8, 0xcb,
+ 0xa8, 0x92, 0xab, 0x16, 0xea, 0x6b, 0x5a, 0x78, 0xda, 0xa6, 0x4b, 0xb5, 0x8c, 0x78, 0xc6, 0x35,
+ 0x3e, 0x9e, 0x03, 0xbf, 0x0f, 0x57, 0x1c, 0xf2, 0x4a, 0xb4, 0x86, 0xc0, 0x06, 0x55, 0x5e, 0xf6,
+ 0x97, 0x0f, 0x2f, 0x00, 0x6f, 0xc0, 0xdd, 0x27, 0x24, 0x0b, 0x6e, 0x9c, 0xc1, 0xdc, 0x80, 0x41,
+ 0xf5, 0x0d, 0x82, 0x15, 0x59, 0xad, 0x3c, 0xec, 0xb7, 0x48, 0xfb, 0x0f, 0x08, 0x56, 0x53, 0x80,
+ 0x84, 0x5c, 0x7f, 0x02, 0x38, 0x6c, 0xa9, 0x71, 0xa6, 0x57, 0x46, 0x98, 0x8e, 0x85, 0x30, 0xae,
+ 0xda, 0xf1, 0xa0, 0x53, 0x93, 0xfb, 0x10, 0x14, 0x9f, 0xdc, 0x78, 0xc4, 0x0c, 0x46, 0xbf, 0x81,
+ 0xd5, 0x5d, 0x4e, 0x4c, 0x41, 0x66, 0xd8, 0x84, 0x9f, 0xc2, 0xd5, 0xb1, 0xda, 0x24, 0xa0, 0x49,
+ 0xa5, 0x15, 0xe3, 0xa5, 0xa9, 0x0d, 0x58, 0xdd, 0x23, 0x5d, 0x32, 0x53, 0x7e, 0xf5, 0xa7, 0x1c,
+ 0xdc, 0xf0, 0xd9, 0x7f, 0x46, 0x6d, 0xd2, 0x24, 0x9c, 0x92, 0xb1, 0xf3, 0x87, 0x29, 0xce, 0x7f,
+ 0x0b, 0xf2, 0xd4, 0x11, 0x84, 0xf7, 0xcd, 0xae, 0x3c, 0xe0, 0x42, 0x5d, 0xd5, 0x92, 0xe6, 0x9d,
+ 0xe6, 0xa7, 0x79, 0x1a, 0x7a, 0x1a, 0x17, 0x7b, 0xf0, 0x2e, 0x14, 0x4c, 0xcb, 0xe2, 0xc4, 0x92,
+ 0x93, 0x45, 0x5e, 0xb9, 0x42, 0xfd, 0x4e, 0x72, 0x88, 0xed, 0x81, 0xa3, 0x31, 0xbc, 0x0b, 0xbf,
+ 0x0b, 0x79, 0xc6, 0x3b, 0x84, 0xb7, 0x4e, 0x4e, 0xcb, 0x0b, 0x12, 0xde, 0xa2, 0xfc, 0xde, 0x39,
+ 0xc5, 0x9f, 0xc1, 0x7c, 0x9f, 0x92, 0x97, 0xe5, 0xc5, 0x0a, 0xaa, 0x2e, 0xd7, 0xd7, 0x93, 0x03,
+ 0x27, 0xd2, 0xa0, 0x0d, 0x56, 0x8e, 0x28, 0x79, 0x69, 0xc8, 0x38, 0xa3, 0xf7, 0x3d, 0x9f, 0x79,
+ 0xdf, 0x97, 0xe2, 0xf7, 0x7d, 0x0d, 0x96, 0x47, 0x63, 0xe2, 0x3c, 0xcc, 0x7f, 0xf4, 0xf9, 0xc1,
+ 0x41, 0xf1, 0x1d, 0x5c, 0x80, 0xc5, 0x8f, 0xf7, 0xb7, 0xf7, 0xf6, 0x8d, 0x66, 0x11, 0xa9, 0xbf,
+ 0x23, 0x28, 0xc5, 0x31, 0x85, 0x1d, 0xb1, 0x0d, 0x05, 0x41, 0x6d, 0xe2, 0x4b, 0x0c, 0x25, 0x51,
+ 0x2b, 0x54, 0xd2, 0x29, 0x0f, 0xb7, 0x83, 0xb8, 0xf8, 0x3d, 0x6d, 0x1f, 0xe0, 0x4d, 0x28, 0x92,
+ 0x57, 0xa4, 0xdd, 0xf3, 0x29, 0x6e, 0x11, 0xce, 0xfd, 0xd6, 0xcb, 0xc9, 0x7c, 0x38, 0xca, 0xc7,
+ 0xdd, 0xb6, 0xd6, 0x94, 0xd3, 0xdd, 0xb8, 0x72, 0xe1, 0xbb, 0x2f, 0x5d, 0x55, 0x17, 0x6e, 0x06,
+ 0x4d, 0x91, 0x7e, 0xc1, 0x86, 0xdb, 0x21, 0x56, 0xd8, 0xdc, 0xec, 0x85, 0xf9, 0x83, 0xed, 0x46,
+ 0x3c, 0xa5, 0x04, 0x33, 0xce, 0x1a, 0x9a, 0x99, 0xb5, 0x1a, 0x2c, 0x04, 0x3a, 0x16, 0xf6, 0x68,
+ 0x12, 0x07, 0xa1, 0x47, 0xfd, 0x5f, 0x80, 0xcb, 0x41, 0x2b, 0x36, 0x83, 0x97, 0x00, 0xfe, 0x1b,
+ 0x41, 0x65, 0x92, 0xc2, 0xe0, 0xcd, 0xf4, 0xdb, 0x39, 0x85, 0x5a, 0x2a, 0x5b, 0xff, 0x77, 0x7b,
+ 0x70, 0xb5, 0xd4, 0xf5, 0x6f, 0xff, 0xfa, 0xe7, 0xfb, 0xb9, 0x47, 0xb8, 0xee, 0xbf, 0x04, 0xbe,
+ 0xf6, 0x0f, 0x65, 0xd3, 0xe5, 0xec, 0x0b, 0xd2, 0x16, 0x9e, 0x5e, 0x3b, 0x1b, 0xbc, 0x36, 0x92,
+ 0xa0, 0xff, 0x81, 0xe0, 0x76, 0xb6, 0x22, 0xe1, 0x8d, 0x64, 0x78, 0x53, 0xe9, 0x98, 0x32, 0xad,
+ 0xac, 0xaa, 0x8f, 0x65, 0x11, 0x1f, 0xe2, 0x47, 0x49, 0x45, 0x64, 0xd6, 0xa0, 0xd7, 0xce, 0xf0,
+ 0x6f, 0x28, 0x98, 0x89, 0x63, 0x8a, 0x84, 0xeb, 0x19, 0xe4, 0xa6, 0xe8, 0xa8, 0xd2, 0x98, 0x69,
+ 0x4f, 0x78, 0x0a, 0xba, 0x2c, 0xe0, 0x1e, 0x5e, 0x4b, 0x39, 0x85, 0x31, 0x64, 0x3f, 0x23, 0xb8,
+ 0x96, 0xa0, 0x57, 0xf8, 0x61, 0x3a, 0xdf, 0xc9, 0x2a, 0xa1, 0x64, 0xca, 0x8e, 0x5a, 0x97, 0xc0,
+ 0x1e, 0xe0, 0x5a, 0x32, 0xb3, 0x71, 0x5c, 0x7a, 0xad, 0x76, 0x86, 0x7f, 0x45, 0x50, 0x4a, 0x56,
+ 0x46, 0x9c, 0x42, 0x4e, 0xa6, 0x8e, 0x4e, 0x40, 0xb8, 0x23, 0x11, 0x3e, 0x56, 0xa7, 0xa5, 0x6e,
+ 0x7d, 0x5c, 0x80, 0x7d, 0x36, 0x4b, 0xc9, 0x5a, 0x9a, 0x86, 0x38, 0x53, 0x79, 0x95, 0x52, 0xb4,
+ 0x29, 0x7a, 0x25, 0x6b, 0xfb, 0xfe, 0x2b, 0x39, 0x62, 0xb3, 0x36, 0x0b, 0x9b, 0x3f, 0x22, 0x58,
+ 0x1e, 0x95, 0x05, 0x7c, 0x7f, 0x06, 0x41, 0x53, 0x1e, 0x4c, 0xe7, 0x1c, 0x5e, 0xc4, 0xaa, 0x44,
+ 0xa8, 0xe2, 0x4a, 0x32, 0x9b, 0x43, 0xa3, 0xf1, 0x0d, 0x82, 0x62, 0x7c, 0xee, 0xe2, 0x0f, 0xb2,
+ 0xce, 0x77, 0x1c, 0x5b, 0x1a, 0x4f, 0xf7, 0x25, 0x8a, 0xbb, 0xea, 0x44, 0x14, 0xeb, 0xa8, 0xb6,
+ 0x73, 0x8e, 0xa0, 0xdc, 0x66, 0x76, 0x62, 0xe6, 0x1d, 0x3c, 0x32, 0x91, 0x0f, 0xfd, 0x34, 0x87,
+ 0xe8, 0xf9, 0x56, 0xe8, 0x6b, 0xb1, 0xae, 0xe9, 0x58, 0x1a, 0xe3, 0x96, 0x6e, 0x11, 0x47, 0x82,
+ 0xd0, 0x03, 0x93, 0xe9, 0x52, 0x6f, 0xf4, 0x6f, 0xd2, 0xc6, 0xe0, 0xeb, 0x7c, 0x4e, 0x79, 0x12,
+ 0x04, 0xd8, 0xed, 0xb2, 0x5e, 0x27, 0x1a, 0x4d, 0x7e, 0xca, 0xa3, 0xc6, 0x9f, 0x91, 0xf1, 0x58,
+ 0x1a, 0x8f, 0x07, 0xc6, 0xe3, 0xa3, 0xc6, 0xc9, 0x82, 0x4c, 0xd2, 0xf8, 0x2f, 0x00, 0x00, 0xff,
+ 0xff, 0x79, 0x2b, 0x3b, 0x90, 0x4a, 0x0e, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MetricServiceClient is the client API for MetricService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MetricServiceClient interface {
+ // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
+ ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error)
+ // Gets a single monitored resource descriptor. This method does not require a Stackdriver account.
+ GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error)
+ // Lists metric descriptors that match a filter. This method does not require a Stackdriver account.
+ ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error)
+ // Gets a single metric descriptor. This method does not require a Stackdriver account.
+ GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error)
+ // Creates a new metric descriptor.
+ // User-created metric descriptors define
+ // [custom metrics](/monitoring/custom-metrics).
+ CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error)
+ // Deletes a metric descriptor. Only user-created
+ // [custom metrics](/monitoring/custom-metrics) can be deleted.
+ DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ // Lists time series that match a filter. This method does not require a Stackdriver account.
+ ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error)
+ // Creates or adds data to one or more time series.
+ // The response is empty if all time series in the request were written.
+ // If any time series could not be written, a corresponding failure message is
+ // included in the error response.
+ CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+}
+
+type metricServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewMetricServiceClient(cc *grpc.ClientConn) MetricServiceClient {
+ return &metricServiceClient{cc}
+}
+
+func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) {
+ out := new(ListMonitoredResourceDescriptorsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) {
+ out := new(monitoredres.MonitoredResourceDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) {
+ out := new(ListMetricDescriptorsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) {
+ out := new(metric.MetricDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) {
+ out := new(metric.MetricDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) {
+ out := new(ListTimeSeriesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MetricServiceServer is the server API for MetricService service.
+type MetricServiceServer interface {
+ // Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.
+ ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error)
+ // Gets a single monitored resource descriptor. This method does not require a Stackdriver account.
+ GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error)
+ // Lists metric descriptors that match a filter. This method does not require a Stackdriver account.
+ ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error)
+ // Gets a single metric descriptor. This method does not require a Stackdriver account.
+ GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error)
+ // Creates a new metric descriptor.
+ // User-created metric descriptors define
+ // [custom metrics](/monitoring/custom-metrics).
+ CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error)
+ // Deletes a metric descriptor. Only user-created
+ // [custom metrics](/monitoring/custom-metrics) can be deleted.
+ DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*empty.Empty, error)
+ // Lists time series that match a filter. This method does not require a Stackdriver account.
+ ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error)
+ // Creates or adds data to one or more time series.
+ // The response is empty if all time series in the request were written.
+ // If any time series could not be written, a corresponding failure message is
+ // included in the error response.
+ CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*empty.Empty, error)
+}
+
+func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) {
+ s.RegisterService(&_MetricService_serviceDesc, srv)
+}
+
+func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListMonitoredResourceDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetMonitoredResourceDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListMetricDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetMetricDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateMetricDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteMetricDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).ListTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).CreateTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _MetricService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.MetricService",
+ HandlerType: (*MetricServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListMonitoredResourceDescriptors",
+ Handler: _MetricService_ListMonitoredResourceDescriptors_Handler,
+ },
+ {
+ MethodName: "GetMonitoredResourceDescriptor",
+ Handler: _MetricService_GetMonitoredResourceDescriptor_Handler,
+ },
+ {
+ MethodName: "ListMetricDescriptors",
+ Handler: _MetricService_ListMetricDescriptors_Handler,
+ },
+ {
+ MethodName: "GetMetricDescriptor",
+ Handler: _MetricService_GetMetricDescriptor_Handler,
+ },
+ {
+ MethodName: "CreateMetricDescriptor",
+ Handler: _MetricService_CreateMetricDescriptor_Handler,
+ },
+ {
+ MethodName: "DeleteMetricDescriptor",
+ Handler: _MetricService_DeleteMetricDescriptor_Handler,
+ },
+ {
+ MethodName: "ListTimeSeries",
+ Handler: _MetricService_ListTimeSeries_Handler,
+ },
+ {
+ MethodName: "CreateTimeSeries",
+ Handler: _MetricService_CreateTimeSeries_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/metric_service.proto",
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go
new file mode 100644
index 00000000000..a4dcb16c1ea
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go
@@ -0,0 +1,100 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/mutation_record.proto
+
+package monitoring
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Describes a change made to a configuration.
+type MutationRecord struct {
+ // When the change occurred.
+ MutateTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"`
+ // The email address of the user making the change.
+ MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MutationRecord) Reset() { *m = MutationRecord{} }
+func (m *MutationRecord) String() string { return proto.CompactTextString(m) }
+func (*MutationRecord) ProtoMessage() {}
+func (*MutationRecord) Descriptor() ([]byte, []int) {
+ return fileDescriptor_83c24e690bdb9101, []int{0}
+}
+
+func (m *MutationRecord) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MutationRecord.Unmarshal(m, b)
+}
+func (m *MutationRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MutationRecord.Marshal(b, m, deterministic)
+}
+func (m *MutationRecord) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MutationRecord.Merge(m, src)
+}
+func (m *MutationRecord) XXX_Size() int {
+ return xxx_messageInfo_MutationRecord.Size(m)
+}
+func (m *MutationRecord) XXX_DiscardUnknown() {
+ xxx_messageInfo_MutationRecord.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MutationRecord proto.InternalMessageInfo
+
+func (m *MutationRecord) GetMutateTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.MutateTime
+ }
+ return nil
+}
+
+func (m *MutationRecord) GetMutatedBy() string {
+ if m != nil {
+ return m.MutatedBy
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*MutationRecord)(nil), "google.monitoring.v3.MutationRecord")
+}
+
+func init() {
+ proto.RegisterFile("google/monitoring/v3/mutation_record.proto", fileDescriptor_83c24e690bdb9101)
+}
+
+var fileDescriptor_83c24e690bdb9101 = []byte{
+ // 251 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33,
+ 0xd6, 0xcf, 0x2d, 0x2d, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a,
+ 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0xa8, 0xd5, 0x43, 0xa8, 0xd5, 0x2b, 0x33,
+ 0x96, 0x92, 0x87, 0x9a, 0x00, 0x56, 0x93, 0x54, 0x9a, 0xa6, 0x5f, 0x92, 0x99, 0x9b, 0x5a, 0x5c,
+ 0x92, 0x98, 0x5b, 0x00, 0xd1, 0xa6, 0x94, 0xc3, 0xc5, 0xe7, 0x0b, 0x35, 0x2f, 0x08, 0x6c, 0x9c,
+ 0x90, 0x35, 0x17, 0x37, 0xd8, 0x86, 0xd4, 0x78, 0x90, 0x5a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e,
+ 0x23, 0x29, 0x3d, 0xa8, 0xf1, 0x30, 0x83, 0xf4, 0x42, 0x60, 0x06, 0x05, 0x71, 0x41, 0x94, 0x83,
+ 0x04, 0x84, 0x64, 0xb9, 0xa0, 0xbc, 0x94, 0xf8, 0xa4, 0x4a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xce,
+ 0x20, 0x4e, 0xa8, 0x88, 0x53, 0xa5, 0xd3, 0x6a, 0x46, 0x2e, 0x89, 0xe4, 0xfc, 0x5c, 0x3d, 0x6c,
+ 0x6e, 0x75, 0x12, 0x46, 0x75, 0x48, 0x00, 0xc8, 0xa6, 0x00, 0xc6, 0x28, 0x3b, 0xa8, 0xe2, 0xf4,
+ 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x3b, 0xf4,
+ 0x21, 0x52, 0x89, 0x05, 0x99, 0xc5, 0xa8, 0x61, 0x64, 0x8d, 0xe0, 0xad, 0x62, 0x92, 0x72, 0x87,
+ 0x18, 0xe0, 0x9c, 0x93, 0x5f, 0x9a, 0xa2, 0xe7, 0x8b, 0xb0, 0x33, 0xcc, 0xf8, 0x14, 0x4c, 0x32,
+ 0x06, 0x2c, 0x19, 0x83, 0x90, 0x8c, 0x09, 0x33, 0x4e, 0x62, 0x03, 0x5b, 0x62, 0x0c, 0x08, 0x00,
+ 0x00, 0xff, 0xff, 0x95, 0xa7, 0xf3, 0xbd, 0x87, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go
new file mode 100644
index 00000000000..6147a91d7bc
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go
@@ -0,0 +1,374 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/notification.proto
+
+package monitoring
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ label "google.golang.org/genproto/googleapis/api/label"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Indicates whether the channel has been verified or not. It is illegal
+// to specify this field in a
+// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel]
+// or an
+// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
+// operation.
+type NotificationChannel_VerificationStatus int32
+
+const (
+ // Sentinel value used to indicate that the state is unknown, omitted, or
+ // is not applicable (as in the case of channels that neither support
+ // nor require verification in order to function).
+ NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0
+ // The channel has yet to be verified and requires verification to function.
+ // Note that this state also applies to the case where the verification
+ // process has been initiated by sending a verification code but where
+ // the verification code has not been submitted to complete the process.
+ NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1
+ // It has been proven that notifications can be received on this
+ // notification channel and that someone on the project has access
+ // to messages that are delivered to that channel.
+ NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2
+)
+
+var NotificationChannel_VerificationStatus_name = map[int32]string{
+ 0: "VERIFICATION_STATUS_UNSPECIFIED",
+ 1: "UNVERIFIED",
+ 2: "VERIFIED",
+}
+
+var NotificationChannel_VerificationStatus_value = map[string]int32{
+ "VERIFICATION_STATUS_UNSPECIFIED": 0,
+ "UNVERIFIED": 1,
+ "VERIFIED": 2,
+}
+
+func (x NotificationChannel_VerificationStatus) String() string {
+ return proto.EnumName(NotificationChannel_VerificationStatus_name, int32(x))
+}
+
+func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_4399f1e4bc1a75ef, []int{1, 0}
+}
+
+// A description of a notification channel. The descriptor includes
+// the properties of the channel and the set of labels or fields that
+// must be specified to configure channels of a given type.
+type NotificationChannelDescriptor struct {
+ // The full REST resource name for this descriptor. The syntax is:
+ //
+ // projects/[PROJECT_ID]/notificationChannelDescriptors/[TYPE]
+ //
+ // In the above, `[TYPE]` is the value of the `type` field.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ // The type of notification channel, such as "email", "sms", etc.
+ // Notification channel types are globally unique.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // A human-readable name for the notification channel type. This
+ // form of the name is suitable for a user interface.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // A human-readable description of the notification channel
+ // type. The description may include a description of the properties
+ // of the channel and pointers to external documentation.
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ // The set of labels that must be defined to identify a particular
+ // channel of the corresponding type. Each label includes a
+ // description for how that field should be populated.
+ Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
+ // The tiers that support this notification channel; the project service tier
+ // must be one of the supported_tiers.
+ SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"` // Deprecated: Do not use.
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NotificationChannelDescriptor) Reset() { *m = NotificationChannelDescriptor{} }
+func (m *NotificationChannelDescriptor) String() string { return proto.CompactTextString(m) }
+func (*NotificationChannelDescriptor) ProtoMessage() {}
+func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4399f1e4bc1a75ef, []int{0}
+}
+
+func (m *NotificationChannelDescriptor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NotificationChannelDescriptor.Unmarshal(m, b)
+}
+func (m *NotificationChannelDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NotificationChannelDescriptor.Marshal(b, m, deterministic)
+}
+func (m *NotificationChannelDescriptor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NotificationChannelDescriptor.Merge(m, src)
+}
+func (m *NotificationChannelDescriptor) XXX_Size() int {
+ return xxx_messageInfo_NotificationChannelDescriptor.Size(m)
+}
+func (m *NotificationChannelDescriptor) XXX_DiscardUnknown() {
+ xxx_messageInfo_NotificationChannelDescriptor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NotificationChannelDescriptor proto.InternalMessageInfo
+
+func (m *NotificationChannelDescriptor) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *NotificationChannelDescriptor) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *NotificationChannelDescriptor) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *NotificationChannelDescriptor) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (m *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier {
+ if m != nil {
+ return m.SupportedTiers
+ }
+ return nil
+}
+
+// A `NotificationChannel` is a medium through which an alert is
+// delivered when a policy violation is detected. Examples of channels
+// include email, SMS, and third-party messaging applications. Fields
+// containing sensitive information like authentication tokens or
+// contact info are only partially populated on retrieval.
+type NotificationChannel struct {
+ // The type of the notification channel. This field matches the
+ // value of the [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type] field.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // The full REST resource name for this channel. The syntax is:
+ //
+ // projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
+ //
+ // The `[CHANNEL_ID]` is automatically assigned by the server on creation.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional human-readable name for this notification channel. It is
+ // recommended that you specify a non-empty and unique name in order to
+ // make it easier to identify the channels in your project, though this is
+ // not enforced. The display name is limited to 512 Unicode characters.
+ DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // An optional human-readable description of this notification channel. This
+ // description may provide additional details, beyond the display
+ // name, for the channel. This may not exceeed 1024 Unicode characters.
+ Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+ // Configuration fields that define the channel and its behavior. The
+ // permissible and required labels are specified in the
+ // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels] of the
+ // `NotificationChannelDescriptor` corresponding to the `type` field.
+ Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // User-supplied key/value data that does not need to conform to
+ // the corresponding `NotificationChannelDescriptor`'s schema, unlike
+ // the `labels` field. This field is intended to be used for organizing
+ // and identifying the `NotificationChannel` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Indicates whether this channel has been verified or not. On a
+ // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
+ // or
+ // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]
+ // operation, this field is expected to be populated.
+ //
+ // If the value is `UNVERIFIED`, then it indicates that the channel is
+ // non-functioning (it both requires verification and lacks verification);
+ // otherwise, it is assumed that the channel works.
+ //
+ // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that
+ // the channel is of a type that does not require verification or that
+ // this specific channel has been exempted from verification because it was
+ // created prior to verification being required for channels of this type.
+ //
+ // This field cannot be modified using a standard
+ // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
+ // operation. To change the value of this field, you must call
+ // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel].
+ VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"`
+ // Whether notifications are forwarded to the described channel. This makes
+ // it possible to disable delivery of notifications to a particular channel
+ // without removing the channel from all alerting policies that reference
+ // the channel. This is a more convenient approach when the change is
+ // temporary and you want to receive notifications from the same set
+ // of alerting policies on the channel at some point in the future.
+ Enabled *wrappers.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NotificationChannel) Reset() { *m = NotificationChannel{} }
+func (m *NotificationChannel) String() string { return proto.CompactTextString(m) }
+func (*NotificationChannel) ProtoMessage() {}
+func (*NotificationChannel) Descriptor() ([]byte, []int) {
+ return fileDescriptor_4399f1e4bc1a75ef, []int{1}
+}
+
+func (m *NotificationChannel) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NotificationChannel.Unmarshal(m, b)
+}
+func (m *NotificationChannel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NotificationChannel.Marshal(b, m, deterministic)
+}
+func (m *NotificationChannel) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NotificationChannel.Merge(m, src)
+}
+func (m *NotificationChannel) XXX_Size() int {
+ return xxx_messageInfo_NotificationChannel.Size(m)
+}
+func (m *NotificationChannel) XXX_DiscardUnknown() {
+ xxx_messageInfo_NotificationChannel.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NotificationChannel proto.InternalMessageInfo
+
+func (m *NotificationChannel) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *NotificationChannel) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *NotificationChannel) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *NotificationChannel) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *NotificationChannel) GetLabels() map[string]string {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *NotificationChannel) GetUserLabels() map[string]string {
+ if m != nil {
+ return m.UserLabels
+ }
+ return nil
+}
+
+func (m *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus {
+ if m != nil {
+ return m.VerificationStatus
+ }
+ return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED
+}
+
+func (m *NotificationChannel) GetEnabled() *wrappers.BoolValue {
+ if m != nil {
+ return m.Enabled
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("google.monitoring.v3.NotificationChannel_VerificationStatus", NotificationChannel_VerificationStatus_name, NotificationChannel_VerificationStatus_value)
+ proto.RegisterType((*NotificationChannelDescriptor)(nil), "google.monitoring.v3.NotificationChannelDescriptor")
+ proto.RegisterType((*NotificationChannel)(nil), "google.monitoring.v3.NotificationChannel")
+ proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.LabelsEntry")
+ proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.NotificationChannel.UserLabelsEntry")
+}
+
+func init() {
+ proto.RegisterFile("google/monitoring/v3/notification.proto", fileDescriptor_4399f1e4bc1a75ef)
+}
+
+var fileDescriptor_4399f1e4bc1a75ef = []byte{
+ // 602 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x6d, 0x6b, 0xdb, 0x3c,
+ 0x14, 0x7d, 0x9c, 0x34, 0x7d, 0x5a, 0xb9, 0xa4, 0x9d, 0x5a, 0x86, 0xf1, 0xde, 0xd2, 0xee, 0xc3,
+ 0xf2, 0xc9, 0x86, 0x64, 0x83, 0x75, 0x6f, 0xd0, 0xa4, 0xe9, 0x08, 0xac, 0x59, 0xc9, 0xdb, 0xa0,
+ 0x14, 0x82, 0x92, 0xa8, 0x9e, 0x98, 0x2d, 0x19, 0x49, 0xf6, 0xc8, 0xcf, 0xd8, 0x8f, 0xd8, 0x87,
+ 0xed, 0xa7, 0xec, 0x57, 0x0d, 0xcb, 0x8a, 0xed, 0xb5, 0x86, 0x75, 0xdf, 0x74, 0xcf, 0x3d, 0xe7,
+ 0xdc, 0x7b, 0x4f, 0x4c, 0xc0, 0x33, 0x8f, 0x31, 0xcf, 0xc7, 0x6e, 0xc0, 0x28, 0x91, 0x8c, 0x13,
+ 0xea, 0xb9, 0x71, 0xdb, 0xa5, 0x4c, 0x92, 0x6b, 0xb2, 0x40, 0x92, 0x30, 0xea, 0x84, 0x9c, 0x49,
+ 0x06, 0x0f, 0x52, 0xa2, 0x93, 0x13, 0x9d, 0xb8, 0x6d, 0x3f, 0xd4, 0x72, 0x14, 0x12, 0x17, 0x51,
+ 0xca, 0xa4, 0x92, 0x88, 0x54, 0x63, 0xdf, 0x2f, 0x74, 0x7d, 0x34, 0xc7, 0xbe, 0xc6, 0x0f, 0x4b,
+ 0x87, 0x2e, 0x58, 0x10, 0xac, 0xc7, 0xd9, 0x8f, 0x35, 0x45, 0x55, 0xf3, 0xe8, 0xda, 0xfd, 0xca,
+ 0x51, 0x18, 0x62, 0xae, 0xad, 0x8f, 0xbe, 0x55, 0xc0, 0xa3, 0x41, 0x61, 0xcb, 0xee, 0x67, 0x44,
+ 0x29, 0xf6, 0x4f, 0xb1, 0x58, 0x70, 0x12, 0x4a, 0xc6, 0x21, 0x04, 0x1b, 0x14, 0x05, 0xd8, 0xda,
+ 0x6c, 0x18, 0xcd, 0xed, 0xa1, 0x7a, 0x27, 0x98, 0x5c, 0x85, 0xd8, 0x32, 0x52, 0x2c, 0x79, 0xc3,
+ 0x43, 0xb0, 0xb3, 0x24, 0x22, 0xf4, 0xd1, 0x6a, 0xa6, 0xf8, 0x15, 0xd5, 0x33, 0x35, 0x36, 0x48,
+ 0x64, 0x0d, 0x60, 0x2e, 0xb5, 0x31, 0x61, 0xd4, 0xaa, 0x6a, 0x46, 0x0e, 0xc1, 0x36, 0xd8, 0x54,
+ 0x07, 0x0a, 0x6b, 0xa3, 0x51, 0x6d, 0x9a, 0xad, 0x07, 0x8e, 0x8e, 0x0b, 0x85, 0xc4, 0xf9, 0x90,
+ 0x74, 0xf2, 0xcd, 0x86, 0x9a, 0x0a, 0x07, 0x60, 0x57, 0x44, 0x61, 0xc8, 0xb8, 0xc4, 0xcb, 0x99,
+ 0x24, 0x98, 0x0b, 0xab, 0xd6, 0xa8, 0x36, 0xeb, 0xad, 0x43, 0xa7, 0x2c, 0x6c, 0x67, 0x84, 0x79,
+ 0x4c, 0x16, 0x78, 0x4c, 0x30, 0xef, 0x54, 0x2c, 0x63, 0x58, 0xcf, 0xd4, 0x09, 0x24, 0x8e, 0xbe,
+ 0xd7, 0xc0, 0x7e, 0x49, 0x26, 0xa5, 0x57, 0x97, 0xa5, 0x73, 0x33, 0x89, 0xea, 0x5f, 0x93, 0xd8,
+ 0xb8, 0x9d, 0xc4, 0x79, 0x96, 0x44, 0x4d, 0x25, 0xf1, 0xa2, 0xfc, 0x96, 0x92, 0x3d, 0xd3, 0x9c,
+ 0x44, 0x8f, 0x4a, 0xbe, 0xca, 0x32, 0xba, 0x04, 0x66, 0x24, 0x30, 0x9f, 0x69, 0xcf, 0x2d, 0xe5,
+ 0x79, 0x7c, 0x77, 0xcf, 0x89, 0xc0, 0xbc, 0xe8, 0x0b, 0xa2, 0x0c, 0x80, 0x01, 0xd8, 0x8f, 0x31,
+ 0xcf, 0x24, 0x33, 0x21, 0x91, 0x8c, 0x84, 0xb5, 0xdd, 0x30, 0x9a, 0xf5, 0xd6, 0x9b, 0xbb, 0xcf,
+ 0x98, 0x16, 0x4c, 0x46, 0xca, 0x63, 0x08, 0xe3, 0x5b, 0x18, 0x7c, 0x0e, 0xfe, 0xc7, 0x14, 0xcd,
+ 0x7d, 0xbc, 0xb4, 0xcc, 0x86, 0xd1, 0x34, 0x5b, 0xf6, 0x7a, 0xc4, 0xfa, 0x23, 0x77, 0x3a, 0x8c,
+ 0xf9, 0x53, 0xe4, 0x47, 0x78, 0xb8, 0xa6, 0xda, 0xc7, 0xc0, 0x2c, 0xec, 0x0f, 0xf7, 0x40, 0xf5,
+ 0x0b, 0x5e, 0xe9, 0x9f, 0x32, 0x79, 0xc2, 0x03, 0x50, 0x8b, 0x13, 0x89, 0xfe, 0x70, 0xd3, 0xe2,
+ 0x55, 0xe5, 0xa5, 0x61, 0xbf, 0x05, 0xbb, 0x37, 0xce, 0xff, 0x17, 0xf9, 0xd1, 0x27, 0x00, 0x6f,
+ 0x5f, 0x06, 0x9f, 0x82, 0x27, 0xd3, 0xde, 0xb0, 0x7f, 0xd6, 0xef, 0x9e, 0x8c, 0xfb, 0x1f, 0x07,
+ 0xb3, 0xd1, 0xf8, 0x64, 0x3c, 0x19, 0xcd, 0x26, 0x83, 0xd1, 0x45, 0xaf, 0xdb, 0x3f, 0xeb, 0xf7,
+ 0x4e, 0xf7, 0xfe, 0x83, 0x75, 0x00, 0x26, 0x83, 0x94, 0xd6, 0x3b, 0xdd, 0x33, 0xe0, 0x0e, 0xd8,
+ 0xca, 0xaa, 0x4a, 0xe7, 0x87, 0x01, 0xac, 0x05, 0x0b, 0x4a, 0x03, 0xee, 0xdc, 0x2b, 0x26, 0x7c,
+ 0x91, 0x04, 0x73, 0x61, 0x5c, 0xbe, 0xd3, 0x54, 0x8f, 0xf9, 0x88, 0x7a, 0x0e, 0xe3, 0x9e, 0xeb,
+ 0x61, 0xaa, 0x62, 0x73, 0xd3, 0x16, 0x0a, 0x89, 0xf8, 0xf3, 0xff, 0xe4, 0x75, 0x5e, 0xfd, 0xac,
+ 0xd8, 0xef, 0x53, 0x83, 0xae, 0xcf, 0xa2, 0xa5, 0x73, 0x9e, 0x4f, 0x9c, 0xb6, 0x7f, 0xad, 0x9b,
+ 0x57, 0xaa, 0x79, 0x95, 0x37, 0xaf, 0xa6, 0xed, 0xf9, 0xa6, 0x1a, 0xd2, 0xfe, 0x1d, 0x00, 0x00,
+ 0xff, 0xff, 0xf7, 0x1b, 0x09, 0x21, 0x28, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go
new file mode 100644
index 00000000000..35c25b7d2b9
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go
@@ -0,0 +1,1319 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/notification_service.proto
+
+package monitoring
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ empty "github.com/golang/protobuf/ptypes/empty"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ field_mask "google.golang.org/genproto/protobuf/field_mask"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The `ListNotificationChannelDescriptors` request.
+type ListNotificationChannelDescriptorsRequest struct {
+ // The REST resource name of the parent from which to retrieve
+ // the notification channel descriptors. The expected syntax is:
+ //
+ // projects/[PROJECT_ID]
+ //
+ // Note that this names the parent container in which to look for the
+ // descriptors; to retrieve a single descriptor by name, use the
+ // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor]
+ // operation, instead.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // The maximum number of results to return in a single response. If
+ // not set to a positive number, a reasonable value will be chosen by the
+ // service.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` must contain a value returned as the
+ // `next_page_token` in a previous response to request the next set
+ // of results.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListNotificationChannelDescriptorsRequest) Reset() {
+ *m = ListNotificationChannelDescriptorsRequest{}
+}
+func (m *ListNotificationChannelDescriptorsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {}
+func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{0}
+}
+
+func (m *ListNotificationChannelDescriptorsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Unmarshal(m, b)
+}
+func (m *ListNotificationChannelDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListNotificationChannelDescriptorsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Merge(m, src)
+}
+func (m *ListNotificationChannelDescriptorsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListNotificationChannelDescriptorsRequest.Size(m)
+}
+func (m *ListNotificationChannelDescriptorsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListNotificationChannelDescriptorsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListNotificationChannelDescriptorsRequest proto.InternalMessageInfo
+
+func (m *ListNotificationChannelDescriptorsRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListNotificationChannelDescriptorsRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The `ListNotificationChannelDescriptors` response.
+type ListNotificationChannelDescriptorsResponse struct {
+ // The monitored resource descriptors supported for the specified
+ // project, optionally filtered.
+ ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"`
+ // If not empty, indicates that there may be more results that match
+ // the request. Use the value in the `page_token` field in a
+ // subsequent request to fetch the next set of results. If empty,
+ // all results have been returned.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListNotificationChannelDescriptorsResponse) Reset() {
+ *m = ListNotificationChannelDescriptorsResponse{}
+}
+func (m *ListNotificationChannelDescriptorsResponse) String() string {
+ return proto.CompactTextString(m)
+}
+func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {}
+func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{1}
+}
+
+func (m *ListNotificationChannelDescriptorsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Unmarshal(m, b)
+}
+func (m *ListNotificationChannelDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListNotificationChannelDescriptorsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Merge(m, src)
+}
+func (m *ListNotificationChannelDescriptorsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListNotificationChannelDescriptorsResponse.Size(m)
+}
+func (m *ListNotificationChannelDescriptorsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListNotificationChannelDescriptorsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListNotificationChannelDescriptorsResponse proto.InternalMessageInfo
+
+func (m *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor {
+ if m != nil {
+ return m.ChannelDescriptors
+ }
+ return nil
+}
+
+func (m *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+// The `GetNotificationChannelDescriptor` response.
+type GetNotificationChannelDescriptorRequest struct {
+ // The channel type for which to execute the request. The format is
+ // `projects/[PROJECT_ID]/notificationChannelDescriptors/{channel_type}`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetNotificationChannelDescriptorRequest) Reset() {
+ *m = GetNotificationChannelDescriptorRequest{}
+}
+func (m *GetNotificationChannelDescriptorRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {}
+func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{2}
+}
+
+func (m *GetNotificationChannelDescriptorRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Unmarshal(m, b)
+}
+func (m *GetNotificationChannelDescriptorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Marshal(b, m, deterministic)
+}
+func (m *GetNotificationChannelDescriptorRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetNotificationChannelDescriptorRequest.Merge(m, src)
+}
+func (m *GetNotificationChannelDescriptorRequest) XXX_Size() int {
+ return xxx_messageInfo_GetNotificationChannelDescriptorRequest.Size(m)
+}
+func (m *GetNotificationChannelDescriptorRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetNotificationChannelDescriptorRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNotificationChannelDescriptorRequest proto.InternalMessageInfo
+
+func (m *GetNotificationChannelDescriptorRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `CreateNotificationChannel` request.
+type CreateNotificationChannelRequest struct {
+ // The project on which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID]
+ //
+ // Note that this names the container into which the channel will be
+ // written. This does not name the newly created channel. The resulting
+ // channel's name will have a normalized version of this field as a prefix,
+ // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // The definition of the `NotificationChannel` to create.
+ NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateNotificationChannelRequest) Reset() { *m = CreateNotificationChannelRequest{} }
+func (m *CreateNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateNotificationChannelRequest) ProtoMessage() {}
+func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{3}
+}
+
+func (m *CreateNotificationChannelRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateNotificationChannelRequest.Unmarshal(m, b)
+}
+func (m *CreateNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateNotificationChannelRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateNotificationChannelRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateNotificationChannelRequest.Merge(m, src)
+}
+func (m *CreateNotificationChannelRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateNotificationChannelRequest.Size(m)
+}
+func (m *CreateNotificationChannelRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateNotificationChannelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateNotificationChannelRequest proto.InternalMessageInfo
+
+func (m *CreateNotificationChannelRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
+ if m != nil {
+ return m.NotificationChannel
+ }
+ return nil
+}
+
+// The `ListNotificationChannels` request.
+type ListNotificationChannelsRequest struct {
+ // The project on which to execute the request. The format is
+ // `projects/[PROJECT_ID]`. That is, this names the container
+ // in which to look for the notification channels; it does not name a
+ // specific channel. To query a specific channel by REST resource name, use
+ // the
+ // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel] operation.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // If provided, this field specifies the criteria that must be met by
+ // notification channels to be included in the response.
+ //
+ // For more details, see [sorting and
+ // filtering](/monitoring/api/v3/sorting-and-filtering).
+ Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A comma-separated list of fields by which to sort the result. Supports
+ // the same set of fields as in `filter`. Entries can be prefixed with
+ // a minus sign to sort in descending rather than ascending order.
+ //
+ // For more details, see [sorting and
+ // filtering](/monitoring/api/v3/sorting-and-filtering).
+ OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+ // The maximum number of results to return in a single response. If
+ // not set to a positive number, a reasonable value will be chosen by the
+ // service.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` must contain a value returned as the
+ // `next_page_token` in a previous response to request the next set
+ // of results.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListNotificationChannelsRequest) Reset() { *m = ListNotificationChannelsRequest{} }
+func (m *ListNotificationChannelsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListNotificationChannelsRequest) ProtoMessage() {}
+func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{4}
+}
+
+func (m *ListNotificationChannelsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListNotificationChannelsRequest.Unmarshal(m, b)
+}
+func (m *ListNotificationChannelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListNotificationChannelsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListNotificationChannelsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListNotificationChannelsRequest.Merge(m, src)
+}
+func (m *ListNotificationChannelsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListNotificationChannelsRequest.Size(m)
+}
+func (m *ListNotificationChannelsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListNotificationChannelsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListNotificationChannelsRequest proto.InternalMessageInfo
+
+func (m *ListNotificationChannelsRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *ListNotificationChannelsRequest) GetFilter() string {
+ if m != nil {
+ return m.Filter
+ }
+ return ""
+}
+
+func (m *ListNotificationChannelsRequest) GetOrderBy() string {
+ if m != nil {
+ return m.OrderBy
+ }
+ return ""
+}
+
+func (m *ListNotificationChannelsRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListNotificationChannelsRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The `ListNotificationChannels` response.
+type ListNotificationChannelsResponse struct {
+ // The notification channels defined for the specified project.
+ NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"`
+ // If not empty, indicates that there may be more results that match
+ // the request. Use the value in the `page_token` field in a
+ // subsequent request to fetch the next set of results. If empty,
+ // all results have been returned.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListNotificationChannelsResponse) Reset() { *m = ListNotificationChannelsResponse{} }
+func (m *ListNotificationChannelsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListNotificationChannelsResponse) ProtoMessage() {}
+func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{5}
+}
+
+func (m *ListNotificationChannelsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListNotificationChannelsResponse.Unmarshal(m, b)
+}
+func (m *ListNotificationChannelsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListNotificationChannelsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListNotificationChannelsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListNotificationChannelsResponse.Merge(m, src)
+}
+func (m *ListNotificationChannelsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListNotificationChannelsResponse.Size(m)
+}
+func (m *ListNotificationChannelsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListNotificationChannelsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListNotificationChannelsResponse proto.InternalMessageInfo
+
+func (m *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel {
+ if m != nil {
+ return m.NotificationChannels
+ }
+ return nil
+}
+
+func (m *ListNotificationChannelsResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+// The `GetNotificationChannel` request.
+type GetNotificationChannelRequest struct {
+ // The channel for which to execute the request. The format is
+ // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetNotificationChannelRequest) Reset() { *m = GetNotificationChannelRequest{} }
+func (m *GetNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNotificationChannelRequest) ProtoMessage() {}
+func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{6}
+}
+
+func (m *GetNotificationChannelRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetNotificationChannelRequest.Unmarshal(m, b)
+}
+func (m *GetNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetNotificationChannelRequest.Marshal(b, m, deterministic)
+}
+func (m *GetNotificationChannelRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetNotificationChannelRequest.Merge(m, src)
+}
+func (m *GetNotificationChannelRequest) XXX_Size() int {
+ return xxx_messageInfo_GetNotificationChannelRequest.Size(m)
+}
+func (m *GetNotificationChannelRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetNotificationChannelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNotificationChannelRequest proto.InternalMessageInfo
+
+func (m *GetNotificationChannelRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `UpdateNotificationChannel` request.
+type UpdateNotificationChannelRequest struct {
+ // The fields to update.
+ UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // A description of the changes to be applied to the specified
+ // notification channel. The description must provide a definition for
+ // fields to be updated; the names of these fields should also be
+ // included in the `update_mask`.
+ NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdateNotificationChannelRequest) Reset() { *m = UpdateNotificationChannelRequest{} }
+func (m *UpdateNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateNotificationChannelRequest) ProtoMessage() {}
+func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{7}
+}
+
+func (m *UpdateNotificationChannelRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdateNotificationChannelRequest.Unmarshal(m, b)
+}
+func (m *UpdateNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdateNotificationChannelRequest.Marshal(b, m, deterministic)
+}
+func (m *UpdateNotificationChannelRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateNotificationChannelRequest.Merge(m, src)
+}
+func (m *UpdateNotificationChannelRequest) XXX_Size() int {
+ return xxx_messageInfo_UpdateNotificationChannelRequest.Size(m)
+}
+func (m *UpdateNotificationChannelRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateNotificationChannelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateNotificationChannelRequest proto.InternalMessageInfo
+
+func (m *UpdateNotificationChannelRequest) GetUpdateMask() *field_mask.FieldMask {
+ if m != nil {
+ return m.UpdateMask
+ }
+ return nil
+}
+
+func (m *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
+ if m != nil {
+ return m.NotificationChannel
+ }
+ return nil
+}
+
+// The `DeleteNotificationChannel` request.
+type DeleteNotificationChannelRequest struct {
+ // The channel for which to execute the request. The format is
+ // `projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // If true, the notification channel will be deleted regardless of its
+ // use in alert policies (the policies will be updated to remove the
+ // channel). If false, channels that are still referenced by an existing
+ // alerting policy will fail to be deleted in a delete operation.
+ Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteNotificationChannelRequest) Reset() { *m = DeleteNotificationChannelRequest{} }
+func (m *DeleteNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteNotificationChannelRequest) ProtoMessage() {}
+func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{8}
+}
+
+func (m *DeleteNotificationChannelRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteNotificationChannelRequest.Unmarshal(m, b)
+}
+func (m *DeleteNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteNotificationChannelRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteNotificationChannelRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteNotificationChannelRequest.Merge(m, src)
+}
+func (m *DeleteNotificationChannelRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteNotificationChannelRequest.Size(m)
+}
+func (m *DeleteNotificationChannelRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteNotificationChannelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteNotificationChannelRequest proto.InternalMessageInfo
+
+func (m *DeleteNotificationChannelRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *DeleteNotificationChannelRequest) GetForce() bool {
+ if m != nil {
+ return m.Force
+ }
+ return false
+}
+
+// The `SendNotificationChannelVerificationCode` request.
+type SendNotificationChannelVerificationCodeRequest struct {
+ // The notification channel to which to send a verification code.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SendNotificationChannelVerificationCodeRequest) Reset() {
+ *m = SendNotificationChannelVerificationCodeRequest{}
+}
+func (m *SendNotificationChannelVerificationCodeRequest) String() string {
+ return proto.CompactTextString(m)
+}
+func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {}
+func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{9}
+}
+
+func (m *SendNotificationChannelVerificationCodeRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Unmarshal(m, b)
+}
+func (m *SendNotificationChannelVerificationCodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Marshal(b, m, deterministic)
+}
+func (m *SendNotificationChannelVerificationCodeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Merge(m, src)
+}
+func (m *SendNotificationChannelVerificationCodeRequest) XXX_Size() int {
+ return xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.Size(m)
+}
+func (m *SendNotificationChannelVerificationCodeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SendNotificationChannelVerificationCodeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SendNotificationChannelVerificationCodeRequest proto.InternalMessageInfo
+
+func (m *SendNotificationChannelVerificationCodeRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The `GetNotificationChannelVerificationCode` request.
+type GetNotificationChannelVerificationCodeRequest struct {
+ // The notification channel for which a verification code is to be generated
+ // and retrieved. This must name a channel that is already verified; if
+ // the specified channel is not verified, the request will fail.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The desired expiration time. If specified, the API will guarantee that
+ // the returned code will not be valid after the specified timestamp;
+ // however, the API cannot guarantee that the returned code will be
+ // valid for at least as long as the requested time (the API puts an upper
+ // bound on the amount of time for which a code may be valid). If omitted,
+ // a default expiration will be used, which may be less than the max
+ // permissible expiration (so specifying an expiration may extend the
+ // code's lifetime over omitting an expiration, even though the API does
+ // impose an upper limit on the maximum expiration that is permitted).
+ ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetNotificationChannelVerificationCodeRequest) Reset() {
+ *m = GetNotificationChannelVerificationCodeRequest{}
+}
+func (m *GetNotificationChannelVerificationCodeRequest) String() string {
+ return proto.CompactTextString(m)
+}
+func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {}
+func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{10}
+}
+
+func (m *GetNotificationChannelVerificationCodeRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Unmarshal(m, b)
+}
+func (m *GetNotificationChannelVerificationCodeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Marshal(b, m, deterministic)
+}
+func (m *GetNotificationChannelVerificationCodeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Merge(m, src)
+}
+func (m *GetNotificationChannelVerificationCodeRequest) XXX_Size() int {
+ return xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.Size(m)
+}
+func (m *GetNotificationChannelVerificationCodeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetNotificationChannelVerificationCodeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNotificationChannelVerificationCodeRequest proto.InternalMessageInfo
+
+func (m *GetNotificationChannelVerificationCodeRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.ExpireTime
+ }
+ return nil
+}
+
+// The `GetNotificationChannelVerificationCode` request.
+type GetNotificationChannelVerificationCodeResponse struct {
+ // The verification code, which may be used to verify other channels
+ // that have an equivalent identity (i.e. other channels of the same
+ // type with the same fingerprint such as other email channels with
+ // the same email address or other sms channels with the same number).
+ Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"`
+ // The expiration time associated with the code that was returned. If
+ // an expiration was provided in the request, this is the minimum of the
+ // requested expiration in the request and the max permitted expiration.
+ ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetNotificationChannelVerificationCodeResponse) Reset() {
+ *m = GetNotificationChannelVerificationCodeResponse{}
+}
+func (m *GetNotificationChannelVerificationCodeResponse) String() string {
+ return proto.CompactTextString(m)
+}
+func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {}
+func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{11}
+}
+
+func (m *GetNotificationChannelVerificationCodeResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Unmarshal(m, b)
+}
+func (m *GetNotificationChannelVerificationCodeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Marshal(b, m, deterministic)
+}
+func (m *GetNotificationChannelVerificationCodeResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Merge(m, src)
+}
+func (m *GetNotificationChannelVerificationCodeResponse) XXX_Size() int {
+ return xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.Size(m)
+}
+func (m *GetNotificationChannelVerificationCodeResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetNotificationChannelVerificationCodeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNotificationChannelVerificationCodeResponse proto.InternalMessageInfo
+
+func (m *GetNotificationChannelVerificationCodeResponse) GetCode() string {
+ if m != nil {
+ return m.Code
+ }
+ return ""
+}
+
+func (m *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.ExpireTime
+ }
+ return nil
+}
+
+// The `VerifyNotificationChannel` request.
+type VerifyNotificationChannelRequest struct {
+ // The notification channel to verify.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The verification code that was delivered to the channel as
+ // a result of invoking the `SendNotificationChannelVerificationCode` API
+ // method or that was retrieved from a verified channel via
+ // `GetNotificationChannelVerificationCode`. For example, one might have
+ // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only
+ // guaranteed that the code is valid UTF-8; one should not
+ // make any assumptions regarding the structure or format of the code).
+ Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *VerifyNotificationChannelRequest) Reset() { *m = VerifyNotificationChannelRequest{} }
+func (m *VerifyNotificationChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*VerifyNotificationChannelRequest) ProtoMessage() {}
+func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7e2bcd7194b305fe, []int{12}
+}
+
+func (m *VerifyNotificationChannelRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_VerifyNotificationChannelRequest.Unmarshal(m, b)
+}
+func (m *VerifyNotificationChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_VerifyNotificationChannelRequest.Marshal(b, m, deterministic)
+}
+func (m *VerifyNotificationChannelRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VerifyNotificationChannelRequest.Merge(m, src)
+}
+func (m *VerifyNotificationChannelRequest) XXX_Size() int {
+ return xxx_messageInfo_VerifyNotificationChannelRequest.Size(m)
+}
+func (m *VerifyNotificationChannelRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_VerifyNotificationChannelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VerifyNotificationChannelRequest proto.InternalMessageInfo
+
+func (m *VerifyNotificationChannelRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *VerifyNotificationChannelRequest) GetCode() string {
+ if m != nil {
+ return m.Code
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*ListNotificationChannelDescriptorsRequest)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsRequest")
+ proto.RegisterType((*ListNotificationChannelDescriptorsResponse)(nil), "google.monitoring.v3.ListNotificationChannelDescriptorsResponse")
+ proto.RegisterType((*GetNotificationChannelDescriptorRequest)(nil), "google.monitoring.v3.GetNotificationChannelDescriptorRequest")
+ proto.RegisterType((*CreateNotificationChannelRequest)(nil), "google.monitoring.v3.CreateNotificationChannelRequest")
+ proto.RegisterType((*ListNotificationChannelsRequest)(nil), "google.monitoring.v3.ListNotificationChannelsRequest")
+ proto.RegisterType((*ListNotificationChannelsResponse)(nil), "google.monitoring.v3.ListNotificationChannelsResponse")
+ proto.RegisterType((*GetNotificationChannelRequest)(nil), "google.monitoring.v3.GetNotificationChannelRequest")
+ proto.RegisterType((*UpdateNotificationChannelRequest)(nil), "google.monitoring.v3.UpdateNotificationChannelRequest")
+ proto.RegisterType((*DeleteNotificationChannelRequest)(nil), "google.monitoring.v3.DeleteNotificationChannelRequest")
+ proto.RegisterType((*SendNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.SendNotificationChannelVerificationCodeRequest")
+ proto.RegisterType((*GetNotificationChannelVerificationCodeRequest)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeRequest")
+ proto.RegisterType((*GetNotificationChannelVerificationCodeResponse)(nil), "google.monitoring.v3.GetNotificationChannelVerificationCodeResponse")
+ proto.RegisterType((*VerifyNotificationChannelRequest)(nil), "google.monitoring.v3.VerifyNotificationChannelRequest")
+}
+
+func init() {
+ proto.RegisterFile("google/monitoring/v3/notification_service.proto", fileDescriptor_7e2bcd7194b305fe)
+}
+
+var fileDescriptor_7e2bcd7194b305fe = []byte{
+ // 1011 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x41, 0x6f, 0xdc, 0x44,
+ 0x14, 0xd6, 0xec, 0x26, 0x69, 0xfa, 0x22, 0x04, 0x9a, 0x86, 0xc8, 0xbb, 0xa5, 0xaa, 0xe5, 0x43,
+ 0x93, 0xae, 0x8a, 0x2d, 0xad, 0x4b, 0x84, 0x52, 0x52, 0xda, 0x64, 0xdb, 0x22, 0x48, 0x51, 0xb4,
+ 0x29, 0x91, 0x40, 0x11, 0x2b, 0xc7, 0x7e, 0x6b, 0x4c, 0x76, 0x67, 0x8c, 0x3d, 0x89, 0x9a, 0x56,
+ 0x95, 0x0a, 0x7f, 0x01, 0xfe, 0x00, 0x12, 0xa7, 0x1e, 0x10, 0x67, 0x50, 0x39, 0x23, 0xae, 0x08,
+ 0xae, 0x5c, 0xe0, 0x7f, 0x20, 0x8f, 0xbd, 0xd9, 0xcd, 0x66, 0xbc, 0x6b, 0x37, 0xdc, 0x3c, 0xf3,
+ 0xde, 0xbc, 0xf7, 0xbd, 0xef, 0x7d, 0x9e, 0x67, 0x83, 0xe5, 0x73, 0xee, 0xf7, 0xd0, 0xea, 0x73,
+ 0x16, 0x08, 0x1e, 0x05, 0xcc, 0xb7, 0x8e, 0x6c, 0x8b, 0x71, 0x11, 0x74, 0x03, 0xd7, 0x11, 0x01,
+ 0x67, 0x9d, 0x18, 0xa3, 0xa3, 0xc0, 0x45, 0x33, 0x8c, 0xb8, 0xe0, 0x74, 0x31, 0x3d, 0x60, 0x0e,
+ 0x0f, 0x98, 0x47, 0x76, 0xfd, 0xad, 0x2c, 0x8c, 0x13, 0x06, 0x96, 0xc3, 0x18, 0x17, 0xf2, 0x68,
+ 0x9c, 0x9e, 0xa9, 0x2f, 0x4f, 0x4d, 0x92, 0x39, 0x5e, 0xce, 0x1c, 0xe5, 0x6a, 0xff, 0xb0, 0x6b,
+ 0x61, 0x3f, 0x14, 0xc7, 0x99, 0x51, 0x1f, 0x37, 0x76, 0x03, 0xec, 0x79, 0x9d, 0xbe, 0x13, 0x1f,
+ 0x64, 0x1e, 0x57, 0xc7, 0x3d, 0x44, 0xd0, 0xc7, 0x58, 0x38, 0xfd, 0x30, 0x75, 0x30, 0x9e, 0xc2,
+ 0xf5, 0xad, 0x20, 0x16, 0x1f, 0x8f, 0x64, 0xde, 0xfc, 0xc2, 0x61, 0x0c, 0x7b, 0x2d, 0x8c, 0xdd,
+ 0x28, 0x08, 0x05, 0x8f, 0xe2, 0x36, 0x7e, 0x75, 0x88, 0xb1, 0xa0, 0x14, 0x66, 0x98, 0xd3, 0x47,
+ 0x6d, 0x46, 0x27, 0x2b, 0x17, 0xdb, 0xf2, 0x99, 0x5e, 0x86, 0x8b, 0xa1, 0xe3, 0x63, 0x27, 0x0e,
+ 0x9e, 0xa0, 0x56, 0xd1, 0xc9, 0xca, 0x6c, 0x7b, 0x3e, 0xd9, 0xd8, 0x09, 0x9e, 0x20, 0xbd, 0x02,
+ 0x20, 0x8d, 0x82, 0x1f, 0x20, 0xd3, 0xaa, 0xf2, 0x98, 0x74, 0x7f, 0x94, 0x6c, 0x18, 0x3f, 0x13,
+ 0x68, 0x14, 0xc9, 0x1e, 0x87, 0x9c, 0xc5, 0x48, 0x3d, 0xb8, 0xe4, 0xa6, 0xd6, 0x8e, 0x37, 0x34,
+ 0x6b, 0x44, 0xaf, 0xae, 0x2c, 0x34, 0x6d, 0x53, 0xd5, 0x06, 0x73, 0x62, 0xe8, 0x36, 0x75, 0xcf,
+ 0x64, 0xa3, 0xd7, 0xe0, 0x75, 0x86, 0x8f, 0x45, 0x67, 0x04, 0x78, 0x45, 0x02, 0x7f, 0x2d, 0xd9,
+ 0xde, 0x3e, 0x01, 0xbf, 0x0e, 0xcb, 0x0f, 0x70, 0x32, 0xf4, 0x71, 0xde, 0xaa, 0x43, 0xde, 0x8c,
+ 0xef, 0x08, 0xe8, 0x9b, 0x11, 0x3a, 0x02, 0x15, 0x21, 0x26, 0x1c, 0xa4, 0x7b, 0xb0, 0x78, 0x4a,
+ 0x8c, 0x59, 0x09, 0x12, 0xe4, 0x42, 0xf3, 0x7a, 0x61, 0x1a, 0xda, 0x97, 0xd8, 0xd9, 0x4d, 0xe3,
+ 0x07, 0x02, 0x57, 0x73, 0x5a, 0x72, 0x46, 0x06, 0xb3, 0x23, 0xa8, 0x96, 0x60, 0xae, 0x1b, 0xf4,
+ 0x04, 0x46, 0xda, 0x9c, 0xdc, 0xcd, 0x56, 0xb4, 0x06, 0xf3, 0x3c, 0xf2, 0x30, 0xea, 0xec, 0x1f,
+ 0x6b, 0x17, 0xa4, 0xe5, 0x82, 0x5c, 0x6f, 0x1c, 0x9f, 0x56, 0x4e, 0x75, 0xa2, 0x72, 0x66, 0xc6,
+ 0x95, 0xf3, 0x82, 0x80, 0x9e, 0x0f, 0x33, 0xd3, 0xcb, 0xe7, 0xf0, 0xa6, 0x8a, 0xa9, 0x58, 0xab,
+ 0x4a, 0xc5, 0x94, 0xa0, 0x6a, 0x51, 0x41, 0x55, 0x71, 0xa5, 0xd8, 0x70, 0x45, 0xad, 0x94, 0x49,
+ 0xfa, 0x78, 0x49, 0x40, 0xff, 0x24, 0xf4, 0x26, 0xeb, 0xe3, 0x16, 0x2c, 0x1c, 0x4a, 0x1f, 0xf9,
+ 0xce, 0x67, 0x12, 0xa8, 0x0f, 0xea, 0x1a, 0xbc, 0xf4, 0xe6, 0xfd, 0xe4, 0x5a, 0x78, 0xe8, 0xc4,
+ 0x07, 0x6d, 0x48, 0xdd, 0x93, 0xe7, 0x5c, 0x21, 0x55, 0xff, 0x17, 0x21, 0x6d, 0x81, 0xde, 0xc2,
+ 0x1e, 0x96, 0x96, 0xf7, 0x22, 0xcc, 0x76, 0x79, 0xe4, 0xa6, 0xea, 0x9a, 0x6f, 0xa7, 0x0b, 0xa3,
+ 0x05, 0xe6, 0x0e, 0x32, 0x4f, 0x11, 0x6b, 0x17, 0xa3, 0xe1, 0x16, 0xf7, 0x70, 0x3c, 0x36, 0x19,
+ 0xe1, 0xf4, 0x39, 0x81, 0xb7, 0xd5, 0x9d, 0x28, 0x11, 0x25, 0x21, 0x1d, 0x1f, 0x87, 0x41, 0x84,
+ 0x9d, 0xe4, 0x32, 0xcd, 0x25, 0xfd, 0xd1, 0xe0, 0xa6, 0x6d, 0x43, 0xea, 0x9e, 0x6c, 0x18, 0x5f,
+ 0x13, 0x30, 0x8b, 0x42, 0xc8, 0x64, 0x4c, 0x61, 0xc6, 0xe5, 0xde, 0x09, 0x86, 0xe4, 0xf9, 0x7c,
+ 0x18, 0x3e, 0x04, 0x5d, 0x26, 0x3b, 0x2e, 0xd0, 0x9a, 0xd1, 0xc2, 0x07, 0x40, 0x2a, 0x43, 0x20,
+ 0xcd, 0x5f, 0xde, 0x80, 0xba, 0x22, 0xcc, 0x4e, 0x3a, 0x21, 0xe9, 0xbf, 0x04, 0x8c, 0xe9, 0x37,
+ 0x3c, 0x7d, 0x5f, 0x2d, 0xb6, 0xc2, 0x93, 0xa9, 0x7e, 0xe7, 0xd5, 0x03, 0xa4, 0x2c, 0x1b, 0xef,
+ 0x7d, 0xf3, 0xc7, 0x3f, 0xdf, 0x56, 0x56, 0xe9, 0xcd, 0x64, 0x10, 0x3f, 0x4d, 0xea, 0x5d, 0x0f,
+ 0x23, 0xfe, 0x25, 0xba, 0x22, 0xb6, 0x1a, 0xcf, 0x2c, 0x36, 0xb9, 0x80, 0xbf, 0x08, 0xe8, 0xd3,
+ 0xa6, 0x01, 0x5d, 0x57, 0x83, 0x2c, 0x38, 0x45, 0xea, 0xaf, 0x32, 0xe1, 0x8c, 0xdb, 0xb2, 0xac,
+ 0x77, 0xe9, 0xaa, 0xaa, 0xac, 0x29, 0x55, 0x59, 0x8d, 0x67, 0xf4, 0x25, 0x01, 0x2d, 0xef, 0xa2,
+ 0xa5, 0xef, 0x94, 0x62, 0xfd, 0xa4, 0x59, 0xab, 0x65, 0x8f, 0x65, 0x2d, 0x6a, 0xca, 0x5a, 0x6e,
+ 0xd0, 0x46, 0xe1, 0x16, 0xc5, 0xf4, 0x47, 0x02, 0x4b, 0x6a, 0x82, 0xa9, 0x5d, 0xa6, 0x1d, 0x03,
+ 0xec, 0xc5, 0xaf, 0x45, 0xe3, 0xa6, 0x84, 0x6b, 0xd2, 0x1b, 0x45, 0xa9, 0x97, 0x84, 0xff, 0x46,
+ 0xa0, 0x96, 0xfb, 0x5d, 0x40, 0x73, 0xa8, 0x9b, 0xf6, 0x21, 0x51, 0x06, 0xf6, 0x07, 0x12, 0xf6,
+ 0x86, 0x51, 0x82, 0xe5, 0x35, 0xe5, 0x20, 0xa1, 0x7f, 0x13, 0xa8, 0xe5, 0x8e, 0xb0, 0xbc, 0x52,
+ 0xa6, 0xcd, 0xbc, 0x32, 0xa5, 0x74, 0x64, 0x29, 0x9f, 0x36, 0xef, 0xa6, 0xa5, 0x28, 0x30, 0x9a,
+ 0x05, 0xdb, 0x92, 0x53, 0xe1, 0xf7, 0x04, 0x6a, 0xb9, 0x53, 0x2e, 0xaf, 0xc2, 0x69, 0x63, 0xb1,
+ 0xbe, 0x74, 0xe6, 0x1e, 0xbf, 0x97, 0x7c, 0xf4, 0x0f, 0x04, 0xd5, 0x28, 0x27, 0xa8, 0x3f, 0x09,
+ 0x2c, 0x17, 0x9c, 0x9d, 0xb4, 0xa5, 0x46, 0x5c, 0x6e, 0xf4, 0xe6, 0xe2, 0xdf, 0x92, 0xf8, 0xef,
+ 0x1b, 0x77, 0xcb, 0xe0, 0x5f, 0x8b, 0x91, 0x79, 0xe3, 0x99, 0xd6, 0x48, 0x83, 0x3e, 0xaf, 0xc0,
+ 0xb5, 0x62, 0x93, 0x94, 0x6e, 0x96, 0x79, 0xd3, 0xf3, 0xaa, 0x6a, 0x9d, 0x2f, 0x48, 0x76, 0x87,
+ 0x7d, 0x24, 0x39, 0xb8, 0x67, 0xdc, 0x29, 0xc5, 0x81, 0x8f, 0x42, 0x45, 0xc1, 0xaf, 0x04, 0x6a,
+ 0xb9, 0x93, 0x3c, 0x4f, 0x7e, 0xd3, 0x46, 0x7f, 0x99, 0x17, 0x2c, 0x9b, 0x2e, 0x86, 0x5d, 0xaa,
+ 0x9a, 0x23, 0x89, 0x60, 0x8d, 0x34, 0x36, 0x7e, 0x22, 0xa0, 0xb9, 0xbc, 0xaf, 0x4c, 0xb8, 0xa1,
+ 0x8d, 0x66, 0xcc, 0x3e, 0x28, 0xb6, 0x13, 0x45, 0x6d, 0x93, 0xcf, 0x6e, 0x67, 0x27, 0x7c, 0xde,
+ 0x73, 0x98, 0x6f, 0xf2, 0xc8, 0xb7, 0x7c, 0x64, 0x52, 0x6f, 0xd9, 0xff, 0xbb, 0x13, 0x06, 0xf1,
+ 0xe9, 0xdf, 0xeb, 0x5b, 0xc3, 0xd5, 0x8b, 0x4a, 0xfd, 0x41, 0x1a, 0x60, 0xb3, 0xc7, 0x0f, 0x3d,
+ 0xf3, 0xe1, 0x30, 0xf1, 0xae, 0xfd, 0xfb, 0xc0, 0xb8, 0x27, 0x8d, 0x7b, 0x43, 0xe3, 0xde, 0xae,
+ 0xbd, 0x3f, 0x27, 0x93, 0xd8, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x3b, 0xf3, 0x96, 0xf5, 0x27,
+ 0x10, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// NotificationChannelServiceClient is the client API for NotificationChannelService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type NotificationChannelServiceClient interface {
+ // Lists the descriptors for supported channel types. The use of descriptors
+ // makes it possible for new channel types to be dynamically added.
+ ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error)
+ // Gets a single channel descriptor. The descriptor indicates which fields
+ // are expected / permitted for a notification channel of the given type.
+ GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error)
+ // Lists the notification channels that have been created for the project.
+ ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error)
+ // Gets a single notification channel. The channel includes the relevant
+ // configuration details with which the channel was created. However, the
+ // response may truncate or omit passwords, API keys, or other private key
+ // matter and thus the response may not be 100% identical to the information
+ // that was supplied in the call to the create method.
+ GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Creates a new notification channel, representing a single notification
+ // endpoint such as an email address, SMS number, or pagerduty service.
+ CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Updates a notification channel. Fields not specified in the field mask
+ // remain unchanged.
+ UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Deletes a notification channel.
+ DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ // Causes a verification code to be delivered to the channel. The code
+ // can then be supplied in `VerifyNotificationChannel` to verify the channel.
+ SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ // Requests a verification code for an already verified channel that can then
+ // be used in a call to VerifyNotificationChannel() on a different channel
+ // with an equivalent identity in the same or in a different project. This
+ // makes it possible to copy a channel between projects without requiring
+ // manual reverification of the channel. If the channel is not in the
+ // verified state, this method will fail (in other words, this may only be
+ // used if the SendNotificationChannelVerificationCode and
+ // VerifyNotificationChannel paths have already been used to put the given
+ // channel into the verified state).
+ //
+ // There is no guarantee that the verification codes returned by this method
+ // will be of a similar structure or form as the ones that are delivered
+ // to the channel via SendNotificationChannelVerificationCode; while
+ // VerifyNotificationChannel() will recognize both the codes delivered via
+ // SendNotificationChannelVerificationCode() and returned from
+ // GetNotificationChannelVerificationCode(), it is typically the case that
+ // the verification codes delivered via
+ // SendNotificationChannelVerificationCode() will be shorter and also
+ // have a shorter expiration (e.g. codes such as "G-123456") whereas
+ // GetVerificationCode() will typically return a much longer, websafe base
+ // 64 encoded string that has a longer expiration time.
+ GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error)
+ // Verifies a `NotificationChannel` by proving receipt of the code
+ // delivered to the channel as a result of calling
+ // `SendNotificationChannelVerificationCode`.
+ VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+}
+
+type notificationChannelServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewNotificationChannelServiceClient(cc *grpc.ClientConn) NotificationChannelServiceClient {
+ return ¬ificationChannelServiceClient{cc}
+}
+
+func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) {
+ out := new(ListNotificationChannelDescriptorsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) {
+ out := new(NotificationChannelDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) {
+ out := new(ListNotificationChannelsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) {
+ out := new(GetNotificationChannelVerificationCodeResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// NotificationChannelServiceServer is the server API for NotificationChannelService service.
+type NotificationChannelServiceServer interface {
+ // Lists the descriptors for supported channel types. The use of descriptors
+ // makes it possible for new channel types to be dynamically added.
+ ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error)
+ // Gets a single channel descriptor. The descriptor indicates which fields
+ // are expected / permitted for a notification channel of the given type.
+ GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error)
+ // Lists the notification channels that have been created for the project.
+ ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error)
+ // Gets a single notification channel. The channel includes the relevant
+ // configuration details with which the channel was created. However, the
+ // response may truncate or omit passwords, API keys, or other private key
+ // matter and thus the response may not be 100% identical to the information
+ // that was supplied in the call to the create method.
+ GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error)
+ // Creates a new notification channel, representing a single notification
+ // endpoint such as an email address, SMS number, or pagerduty service.
+ CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error)
+ // Updates a notification channel. Fields not specified in the field mask
+ // remain unchanged.
+ UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error)
+ // Deletes a notification channel.
+ DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*empty.Empty, error)
+ // Causes a verification code to be delivered to the channel. The code
+ // can then be supplied in `VerifyNotificationChannel` to verify the channel.
+ SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*empty.Empty, error)
+ // Requests a verification code for an already verified channel that can then
+ // be used in a call to VerifyNotificationChannel() on a different channel
+ // with an equivalent identity in the same or in a different project. This
+ // makes it possible to copy a channel between projects without requiring
+ // manual reverification of the channel. If the channel is not in the
+ // verified state, this method will fail (in other words, this may only be
+ // used if the SendNotificationChannelVerificationCode and
+ // VerifyNotificationChannel paths have already been used to put the given
+ // channel into the verified state).
+ //
+ // There is no guarantee that the verification codes returned by this method
+ // will be of a similar structure or form as the ones that are delivered
+ // to the channel via SendNotificationChannelVerificationCode; while
+ // VerifyNotificationChannel() will recognize both the codes delivered via
+ // SendNotificationChannelVerificationCode() and returned from
+ // GetNotificationChannelVerificationCode(), it is typically the case that
+ // the verification codes delivered via
+ // SendNotificationChannelVerificationCode() will be shorter and also
+ // have a shorter expiration (e.g. codes such as "G-123456") whereas
+ // GetVerificationCode() will typically return a much longer, websafe base
+ // 64 encoded string that has a longer expiration time.
+ GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error)
+ // Verifies a `NotificationChannel` by proving receipt of the code
+ // delivered to the channel as a result of calling
+ // `SendNotificationChannelVerificationCode`.
+ VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error)
+}
+
+func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) {
+ s.RegisterService(&_NotificationChannelService_serviceDesc, srv)
+}
+
+func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationChannelDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationChannelsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SendNotificationChannelVerificationCodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelVerificationCodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VerifyNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.NotificationChannelService",
+ HandlerType: (*NotificationChannelServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListNotificationChannelDescriptors",
+ Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannelDescriptor",
+ Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler,
+ },
+ {
+ MethodName: "ListNotificationChannels",
+ Handler: _NotificationChannelService_ListNotificationChannels_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannel",
+ Handler: _NotificationChannelService_GetNotificationChannel_Handler,
+ },
+ {
+ MethodName: "CreateNotificationChannel",
+ Handler: _NotificationChannelService_CreateNotificationChannel_Handler,
+ },
+ {
+ MethodName: "UpdateNotificationChannel",
+ Handler: _NotificationChannelService_UpdateNotificationChannel_Handler,
+ },
+ {
+ MethodName: "DeleteNotificationChannel",
+ Handler: _NotificationChannelService_DeleteNotificationChannel_Handler,
+ },
+ {
+ MethodName: "SendNotificationChannelVerificationCode",
+ Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannelVerificationCode",
+ Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler,
+ },
+ {
+ MethodName: "VerifyNotificationChannel",
+ Handler: _NotificationChannelService_VerifyNotificationChannel_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/notification_service.proto",
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go
new file mode 100644
index 00000000000..b42b8d9f728
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/span_context.pb.go
@@ -0,0 +1,99 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/span_context.proto
+
+package monitoring
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The context of a span, attached to google.api.Distribution.Exemplars
+// in google.api.Distribution values during aggregation.
+//
+// It contains the name of a span with format:
+// projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
+type SpanContext struct {
+ // The resource name of the span in the following format:
+ //
+ // projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
+ //
+ // [TRACE_ID] is a unique identifier for a trace within a project;
+ // it is a 32-character hexadecimal encoding of a 16-byte array.
+ //
+ // [SPAN_ID] is a unique identifier for a span within a trace; it
+ // is a 16-character hexadecimal encoding of an 8-byte array.
+ SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SpanContext) Reset() { *m = SpanContext{} }
+func (m *SpanContext) String() string { return proto.CompactTextString(m) }
+func (*SpanContext) ProtoMessage() {}
+func (*SpanContext) Descriptor() ([]byte, []int) {
+ return fileDescriptor_933032e252f1c5e4, []int{0}
+}
+
+func (m *SpanContext) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SpanContext.Unmarshal(m, b)
+}
+func (m *SpanContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SpanContext.Marshal(b, m, deterministic)
+}
+func (m *SpanContext) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SpanContext.Merge(m, src)
+}
+func (m *SpanContext) XXX_Size() int {
+ return xxx_messageInfo_SpanContext.Size(m)
+}
+func (m *SpanContext) XXX_DiscardUnknown() {
+ xxx_messageInfo_SpanContext.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SpanContext proto.InternalMessageInfo
+
+func (m *SpanContext) GetSpanName() string {
+ if m != nil {
+ return m.SpanName
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*SpanContext)(nil), "google.monitoring.v3.SpanContext")
+}
+
+func init() {
+ proto.RegisterFile("google/monitoring/v3/span_context.proto", fileDescriptor_933032e252f1c5e4)
+}
+
+var fileDescriptor_933032e252f1c5e4 = []byte{
+ // 197 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0xcf, 0xcd, 0xcf, 0xcb, 0x2c, 0xc9, 0x2f, 0xca, 0xcc, 0x4b, 0xd7, 0x2f, 0x33,
+ 0xd6, 0x2f, 0x2e, 0x48, 0xcc, 0x8b, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x2b, 0x28,
+ 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x81, 0x28, 0xd4, 0x43, 0x28, 0xd4, 0x2b, 0x33, 0x56, 0xd2, 0xe2,
+ 0xe2, 0x0e, 0x2e, 0x48, 0xcc, 0x73, 0x86, 0x28, 0x15, 0x92, 0xe6, 0xe2, 0x04, 0x6b, 0xcd, 0x4b,
+ 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6,
+ 0x3a, 0xad, 0x60, 0xe4, 0x92, 0x48, 0xce, 0xcf, 0xd5, 0xc3, 0x66, 0x90, 0x93, 0x00, 0x92, 0x31,
+ 0x01, 0x20, 0x0b, 0x03, 0x18, 0xa3, 0xec, 0xa0, 0x2a, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5,
+ 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0xce, 0xd1, 0x87, 0x48, 0x25, 0x16, 0x64, 0x16,
+ 0xa3, 0x3a, 0xdd, 0x1a, 0xc1, 0x5b, 0xc5, 0x24, 0xe5, 0x0e, 0x31, 0xc0, 0x39, 0x27, 0xbf, 0x34,
+ 0x45, 0xcf, 0x17, 0x61, 0x61, 0x98, 0xf1, 0x29, 0x98, 0x64, 0x0c, 0x58, 0x32, 0x06, 0x21, 0x19,
+ 0x13, 0x66, 0x9c, 0xc4, 0x06, 0xb6, 0xc4, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x19, 0x01,
+ 0xcb, 0x1e, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go
new file mode 100644
index 00000000000..f7dd41a4385
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go
@@ -0,0 +1,969 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/uptime.proto
+
+package monitoring
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ duration "github.com/golang/protobuf/ptypes/duration"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The regions from which an uptime check can be run.
+type UptimeCheckRegion int32
+
+const (
+ // Default value if no region is specified. Will result in uptime checks
+ // running from all regions.
+ UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0
+ // Allows checks to run from locations within the United States of America.
+ UptimeCheckRegion_USA UptimeCheckRegion = 1
+ // Allows checks to run from locations within the continent of Europe.
+ UptimeCheckRegion_EUROPE UptimeCheckRegion = 2
+ // Allows checks to run from locations within the continent of South
+ // America.
+ UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3
+ // Allows checks to run from locations within the Asia Pacific area (ex:
+ // Singapore).
+ UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4
+)
+
+var UptimeCheckRegion_name = map[int32]string{
+ 0: "REGION_UNSPECIFIED",
+ 1: "USA",
+ 2: "EUROPE",
+ 3: "SOUTH_AMERICA",
+ 4: "ASIA_PACIFIC",
+}
+
+var UptimeCheckRegion_value = map[string]int32{
+ "REGION_UNSPECIFIED": 0,
+ "USA": 1,
+ "EUROPE": 2,
+ "SOUTH_AMERICA": 3,
+ "ASIA_PACIFIC": 4,
+}
+
+func (x UptimeCheckRegion) String() string {
+ return proto.EnumName(UptimeCheckRegion_name, int32(x))
+}
+
+func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{0}
+}
+
+// The supported resource types that can be used as values of
+// `group_resource.resource_type`.
+// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types.
+// The resource types `gae_app` and `uptime_url` are not valid here because
+// group checks on App Engine modules and URLs are not allowed.
+type GroupResourceType int32
+
+const (
+ // Default value (not valid).
+ GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0
+ // A group of instances from Google Cloud Platform (GCP) or
+ // Amazon Web Services (AWS).
+ GroupResourceType_INSTANCE GroupResourceType = 1
+ // A group of Amazon ELB load balancers.
+ GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2
+)
+
+var GroupResourceType_name = map[int32]string{
+ 0: "RESOURCE_TYPE_UNSPECIFIED",
+ 1: "INSTANCE",
+ 2: "AWS_ELB_LOAD_BALANCER",
+}
+
+var GroupResourceType_value = map[string]int32{
+ "RESOURCE_TYPE_UNSPECIFIED": 0,
+ "INSTANCE": 1,
+ "AWS_ELB_LOAD_BALANCER": 2,
+}
+
+func (x GroupResourceType) String() string {
+ return proto.EnumName(GroupResourceType_name, int32(x))
+}
+
+func (GroupResourceType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{1}
+}
+
+// Nimbus InternalCheckers.
+type InternalChecker struct {
+ // The GCP project ID. Not necessarily the same as the project_id for the
+ // config.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
+ // The internal network to perform this uptime check on.
+ Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"`
+ // The GCP zone the uptime check should egress from. Only respected for
+ // internal uptime checks, where internal_network is specified.
+ GcpZone string `protobuf:"bytes,3,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"`
+ // The checker ID.
+ CheckerId string `protobuf:"bytes,4,opt,name=checker_id,json=checkerId,proto3" json:"checker_id,omitempty"`
+ // The checker's human-readable name.
+ DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *InternalChecker) Reset() { *m = InternalChecker{} }
+func (m *InternalChecker) String() string { return proto.CompactTextString(m) }
+func (*InternalChecker) ProtoMessage() {}
+func (*InternalChecker) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{0}
+}
+
+func (m *InternalChecker) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_InternalChecker.Unmarshal(m, b)
+}
+func (m *InternalChecker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_InternalChecker.Marshal(b, m, deterministic)
+}
+func (m *InternalChecker) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InternalChecker.Merge(m, src)
+}
+func (m *InternalChecker) XXX_Size() int {
+ return xxx_messageInfo_InternalChecker.Size(m)
+}
+func (m *InternalChecker) XXX_DiscardUnknown() {
+ xxx_messageInfo_InternalChecker.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalChecker proto.InternalMessageInfo
+
+func (m *InternalChecker) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+func (m *InternalChecker) GetNetwork() string {
+ if m != nil {
+ return m.Network
+ }
+ return ""
+}
+
+func (m *InternalChecker) GetGcpZone() string {
+ if m != nil {
+ return m.GcpZone
+ }
+ return ""
+}
+
+func (m *InternalChecker) GetCheckerId() string {
+ if m != nil {
+ return m.CheckerId
+ }
+ return ""
+}
+
+func (m *InternalChecker) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+// This message configures which resources and services to monitor for
+// availability.
+type UptimeCheckConfig struct {
+ // A unique resource name for this UptimeCheckConfig. The format is:
+ //
+ //
+ // `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`.
+ //
+ // This field should be omitted when creating the uptime check configuration;
+ // on create, the resource name is assigned by the server and included in the
+ // response.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A human-friendly name for the uptime check configuration. The display name
+ // should be unique within a Stackdriver Account in order to make it easier
+ // to identify; however, uniqueness is not enforced. Required.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The resource the check is checking. Required.
+ //
+ // Types that are valid to be assigned to Resource:
+ // *UptimeCheckConfig_MonitoredResource
+ // *UptimeCheckConfig_ResourceGroup_
+ Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"`
+ // The type of uptime check request.
+ //
+ // Types that are valid to be assigned to CheckRequestType:
+ // *UptimeCheckConfig_HttpCheck_
+ // *UptimeCheckConfig_TcpCheck_
+ CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"`
+ // How often, in seconds, the uptime check is performed.
+ // Currently, the only supported values are `60s` (1 minute), `300s`
+ // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional,
+ // defaults to `300s`.
+ Period *duration.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"`
+ // The maximum amount of time to wait for the request to complete (must be
+ // between 1 and 60 seconds). Required.
+ Timeout *duration.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ // The expected content on the page the check is run against.
+ // Currently, only the first entry in the list is supported, and other entries
+ // will be ignored. The server will look for an exact match of the string in
+ // the page response's content. This field is optional and should only be
+ // specified if a content match is required.
+ ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"`
+ // The list of regions from which the check will be run.
+ // If this field is specified, enough regions to include a minimum of
+ // 3 locations must be provided, or an error message is returned.
+ // Not specifying this field will result in uptime checks running from all
+ // regions.
+ SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"`
+ // Denotes whether this is a check that egresses from InternalCheckers.
+ IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"`
+ // The internal checkers that this check will egress from. If `is_internal` is
+ // true and this list is empty, the check will egress from all
+ // InternalCheckers configured for the project that owns this CheckConfig.
+ InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UptimeCheckConfig) Reset() { *m = UptimeCheckConfig{} }
+func (m *UptimeCheckConfig) String() string { return proto.CompactTextString(m) }
+func (*UptimeCheckConfig) ProtoMessage() {}
+func (*UptimeCheckConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{1}
+}
+
+func (m *UptimeCheckConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UptimeCheckConfig.Unmarshal(m, b)
+}
+func (m *UptimeCheckConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UptimeCheckConfig.Marshal(b, m, deterministic)
+}
+func (m *UptimeCheckConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UptimeCheckConfig.Merge(m, src)
+}
+func (m *UptimeCheckConfig) XXX_Size() int {
+ return xxx_messageInfo_UptimeCheckConfig.Size(m)
+}
+func (m *UptimeCheckConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_UptimeCheckConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UptimeCheckConfig proto.InternalMessageInfo
+
+func (m *UptimeCheckConfig) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *UptimeCheckConfig) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+type isUptimeCheckConfig_Resource interface {
+ isUptimeCheckConfig_Resource()
+}
+
+type UptimeCheckConfig_MonitoredResource struct {
+ MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"`
+}
+
+type UptimeCheckConfig_ResourceGroup_ struct {
+ ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {}
+
+func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {}
+
+func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource {
+ if x, ok := m.GetResource().(*UptimeCheckConfig_MonitoredResource); ok {
+ return x.MonitoredResource
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup {
+ if x, ok := m.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok {
+ return x.ResourceGroup
+ }
+ return nil
+}
+
+type isUptimeCheckConfig_CheckRequestType interface {
+ isUptimeCheckConfig_CheckRequestType()
+}
+
+type UptimeCheckConfig_HttpCheck_ struct {
+ HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"`
+}
+
+type UptimeCheckConfig_TcpCheck_ struct {
+ TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {}
+
+func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {}
+
+func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType {
+ if m != nil {
+ return m.CheckRequestType
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck {
+ if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok {
+ return x.HttpCheck
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck {
+ if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok {
+ return x.TcpCheck
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetPeriod() *duration.Duration {
+ if m != nil {
+ return m.Period
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetTimeout() *duration.Duration {
+ if m != nil {
+ return m.Timeout
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher {
+ if m != nil {
+ return m.ContentMatchers
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion {
+ if m != nil {
+ return m.SelectedRegions
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetIsInternal() bool {
+ if m != nil {
+ return m.IsInternal
+ }
+ return false
+}
+
+func (m *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker {
+ if m != nil {
+ return m.InternalCheckers
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*UptimeCheckConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _UptimeCheckConfig_OneofMarshaler, _UptimeCheckConfig_OneofUnmarshaler, _UptimeCheckConfig_OneofSizer, []interface{}{
+ (*UptimeCheckConfig_MonitoredResource)(nil),
+ (*UptimeCheckConfig_ResourceGroup_)(nil),
+ (*UptimeCheckConfig_HttpCheck_)(nil),
+ (*UptimeCheckConfig_TcpCheck_)(nil),
+ }
+}
+
+func _UptimeCheckConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*UptimeCheckConfig)
+ // resource
+ switch x := m.Resource.(type) {
+ case *UptimeCheckConfig_MonitoredResource:
+ b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.MonitoredResource); err != nil {
+ return err
+ }
+ case *UptimeCheckConfig_ResourceGroup_:
+ b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ResourceGroup); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("UptimeCheckConfig.Resource has unexpected type %T", x)
+ }
+ // check_request_type
+ switch x := m.CheckRequestType.(type) {
+ case *UptimeCheckConfig_HttpCheck_:
+ b.EncodeVarint(5<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.HttpCheck); err != nil {
+ return err
+ }
+ case *UptimeCheckConfig_TcpCheck_:
+ b.EncodeVarint(6<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.TcpCheck); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("UptimeCheckConfig.CheckRequestType has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _UptimeCheckConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*UptimeCheckConfig)
+ switch tag {
+ case 3: // resource.monitored_resource
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(monitoredres.MonitoredResource)
+ err := b.DecodeMessage(msg)
+ m.Resource = &UptimeCheckConfig_MonitoredResource{msg}
+ return true, err
+ case 4: // resource.resource_group
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(UptimeCheckConfig_ResourceGroup)
+ err := b.DecodeMessage(msg)
+ m.Resource = &UptimeCheckConfig_ResourceGroup_{msg}
+ return true, err
+ case 5: // check_request_type.http_check
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(UptimeCheckConfig_HttpCheck)
+ err := b.DecodeMessage(msg)
+ m.CheckRequestType = &UptimeCheckConfig_HttpCheck_{msg}
+ return true, err
+ case 6: // check_request_type.tcp_check
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(UptimeCheckConfig_TcpCheck)
+ err := b.DecodeMessage(msg)
+ m.CheckRequestType = &UptimeCheckConfig_TcpCheck_{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _UptimeCheckConfig_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*UptimeCheckConfig)
+ // resource
+ switch x := m.Resource.(type) {
+ case *UptimeCheckConfig_MonitoredResource:
+ s := proto.Size(x.MonitoredResource)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *UptimeCheckConfig_ResourceGroup_:
+ s := proto.Size(x.ResourceGroup)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ // check_request_type
+ switch x := m.CheckRequestType.(type) {
+ case *UptimeCheckConfig_HttpCheck_:
+ s := proto.Size(x.HttpCheck)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *UptimeCheckConfig_TcpCheck_:
+ s := proto.Size(x.TcpCheck)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// The resource submessage for group checks. It can be used instead of a
+// monitored resource, when multiple resources are being monitored.
+type UptimeCheckConfig_ResourceGroup struct {
+ // The group of resources being monitored. Should be only the
+ // group_id, not projects//groups/.
+ GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+ // The resource type of the group members.
+ ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UptimeCheckConfig_ResourceGroup) Reset() { *m = UptimeCheckConfig_ResourceGroup{} }
+func (m *UptimeCheckConfig_ResourceGroup) String() string { return proto.CompactTextString(m) }
+func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {}
+func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{1, 0}
+}
+
+func (m *UptimeCheckConfig_ResourceGroup) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Unmarshal(m, b)
+}
+func (m *UptimeCheckConfig_ResourceGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Marshal(b, m, deterministic)
+}
+func (m *UptimeCheckConfig_ResourceGroup) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Merge(m, src)
+}
+func (m *UptimeCheckConfig_ResourceGroup) XXX_Size() int {
+ return xxx_messageInfo_UptimeCheckConfig_ResourceGroup.Size(m)
+}
+func (m *UptimeCheckConfig_ResourceGroup) XXX_DiscardUnknown() {
+ xxx_messageInfo_UptimeCheckConfig_ResourceGroup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UptimeCheckConfig_ResourceGroup proto.InternalMessageInfo
+
+func (m *UptimeCheckConfig_ResourceGroup) GetGroupId() string {
+ if m != nil {
+ return m.GroupId
+ }
+ return ""
+}
+
+func (m *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType {
+ if m != nil {
+ return m.ResourceType
+ }
+ return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED
+}
+
+// Information involved in an HTTP/HTTPS uptime check request.
+type UptimeCheckConfig_HttpCheck struct {
+ // If true, use HTTPS instead of HTTP to run the check.
+ UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"`
+ // The path to the page to run the check against. Will be combined with the
+ // host (specified within the MonitoredResource) and port to construct the
+ // full URL. Optional (defaults to "/").
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+ // The port to the page to run the check against. Will be combined with host
+ // (specified within the MonitoredResource) and path to construct the full
+ // URL. Optional (defaults to 80 without SSL, or 443 with SSL).
+ Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"`
+ // The authentication information. Optional when creating an HTTP check;
+ // defaults to empty.
+ AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"`
+ // Boolean specifiying whether to encrypt the header information.
+ // Encryption should be specified for any headers related to authentication
+ // that you do not wish to be seen when retrieving the configuration. The
+ // server will be responsible for encrypting the headers.
+ // On Get/List calls, if mask_headers is set to True then the headers
+ // will be obscured with ******.
+ MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"`
+ // The list of headers to send as part of the uptime check request.
+ // If two headers have the same key and different values, they should
+ // be entered as a single header, with the value being a comma-separated
+ // list of all the desired values as described at
+ // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31).
+ // Entering two separate headers with the same key in a Create call will
+ // cause the first to be overwritten by the second.
+ // The maximum number of headers allowed is 100.
+ Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UptimeCheckConfig_HttpCheck) Reset() { *m = UptimeCheckConfig_HttpCheck{} }
+func (m *UptimeCheckConfig_HttpCheck) String() string { return proto.CompactTextString(m) }
+func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {}
+func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{1, 1}
+}
+
+func (m *UptimeCheckConfig_HttpCheck) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Unmarshal(m, b)
+}
+func (m *UptimeCheckConfig_HttpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Marshal(b, m, deterministic)
+}
+func (m *UptimeCheckConfig_HttpCheck) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UptimeCheckConfig_HttpCheck.Merge(m, src)
+}
+func (m *UptimeCheckConfig_HttpCheck) XXX_Size() int {
+ return xxx_messageInfo_UptimeCheckConfig_HttpCheck.Size(m)
+}
+func (m *UptimeCheckConfig_HttpCheck) XXX_DiscardUnknown() {
+ xxx_messageInfo_UptimeCheckConfig_HttpCheck.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UptimeCheckConfig_HttpCheck proto.InternalMessageInfo
+
+func (m *UptimeCheckConfig_HttpCheck) GetUseSsl() bool {
+ if m != nil {
+ return m.UseSsl
+ }
+ return false
+}
+
+func (m *UptimeCheckConfig_HttpCheck) GetPath() string {
+ if m != nil {
+ return m.Path
+ }
+ return ""
+}
+
+func (m *UptimeCheckConfig_HttpCheck) GetPort() int32 {
+ if m != nil {
+ return m.Port
+ }
+ return 0
+}
+
+func (m *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication {
+ if m != nil {
+ return m.AuthInfo
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool {
+ if m != nil {
+ return m.MaskHeaders
+ }
+ return false
+}
+
+func (m *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string {
+ if m != nil {
+ return m.Headers
+ }
+ return nil
+}
+
+// A type of authentication to perform against the specified resource or URL
+// that uses username and password.
+// Currently, only Basic authentication is supported in Uptime Monitoring.
+type UptimeCheckConfig_HttpCheck_BasicAuthentication struct {
+ // The username to authenticate.
+ Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
+ // The password to authenticate.
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() {
+ *m = UptimeCheckConfig_HttpCheck_BasicAuthentication{}
+}
+func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string {
+ return proto.CompactTextString(m)
+}
+func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {}
+func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{1, 1, 0}
+}
+
+func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Unmarshal(m, b)
+}
+func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Marshal(b, m, deterministic)
+}
+func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Merge(m, src)
+}
+func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_Size() int {
+ return xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.Size(m)
+}
+func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) XXX_DiscardUnknown() {
+ xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UptimeCheckConfig_HttpCheck_BasicAuthentication proto.InternalMessageInfo
+
+func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string {
+ if m != nil {
+ return m.Username
+ }
+ return ""
+}
+
+func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+// Information required for a TCP uptime check request.
+type UptimeCheckConfig_TcpCheck struct {
+ // The port to the page to run the check against. Will be combined with host
+ // (specified within the MonitoredResource) to construct the full URL.
+ // Required.
+ Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UptimeCheckConfig_TcpCheck) Reset() { *m = UptimeCheckConfig_TcpCheck{} }
+func (m *UptimeCheckConfig_TcpCheck) String() string { return proto.CompactTextString(m) }
+func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {}
+func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{1, 2}
+}
+
+func (m *UptimeCheckConfig_TcpCheck) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Unmarshal(m, b)
+}
+func (m *UptimeCheckConfig_TcpCheck) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Marshal(b, m, deterministic)
+}
+func (m *UptimeCheckConfig_TcpCheck) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UptimeCheckConfig_TcpCheck.Merge(m, src)
+}
+func (m *UptimeCheckConfig_TcpCheck) XXX_Size() int {
+ return xxx_messageInfo_UptimeCheckConfig_TcpCheck.Size(m)
+}
+func (m *UptimeCheckConfig_TcpCheck) XXX_DiscardUnknown() {
+ xxx_messageInfo_UptimeCheckConfig_TcpCheck.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UptimeCheckConfig_TcpCheck proto.InternalMessageInfo
+
+func (m *UptimeCheckConfig_TcpCheck) GetPort() int32 {
+ if m != nil {
+ return m.Port
+ }
+ return 0
+}
+
+// Used to perform string matching. Currently, this matches on the exact
+// content. In the future, it can be expanded to allow for regular expressions
+// and more complex matching.
+type UptimeCheckConfig_ContentMatcher struct {
+ // String content to match (max 1024 bytes)
+ Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UptimeCheckConfig_ContentMatcher) Reset() { *m = UptimeCheckConfig_ContentMatcher{} }
+func (m *UptimeCheckConfig_ContentMatcher) String() string { return proto.CompactTextString(m) }
+func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {}
+func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{1, 3}
+}
+
+func (m *UptimeCheckConfig_ContentMatcher) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Unmarshal(m, b)
+}
+func (m *UptimeCheckConfig_ContentMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Marshal(b, m, deterministic)
+}
+func (m *UptimeCheckConfig_ContentMatcher) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Merge(m, src)
+}
+func (m *UptimeCheckConfig_ContentMatcher) XXX_Size() int {
+ return xxx_messageInfo_UptimeCheckConfig_ContentMatcher.Size(m)
+}
+func (m *UptimeCheckConfig_ContentMatcher) XXX_DiscardUnknown() {
+ xxx_messageInfo_UptimeCheckConfig_ContentMatcher.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UptimeCheckConfig_ContentMatcher proto.InternalMessageInfo
+
+func (m *UptimeCheckConfig_ContentMatcher) GetContent() string {
+ if m != nil {
+ return m.Content
+ }
+ return ""
+}
+
+// Contains the region, location, and list of IP
+// addresses where checkers in the location run from.
+type UptimeCheckIp struct {
+ // A broad region category in which the IP address is located.
+ Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"`
+ // A more specific location within the region that typically encodes
+ // a particular city/town/metro (and its containing state/province or country)
+ // within the broader umbrella region category.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // The IP address from which the uptime check originates. This is a full
+ // IP address (not an IP address range). Most IP addresses, as of this
+ // publication, are in IPv4 format; however, one should not rely on the
+ // IP addresses being in IPv4 format indefinitely and should support
+ // interpreting this field in either IPv4 or IPv6 format.
+ IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UptimeCheckIp) Reset() { *m = UptimeCheckIp{} }
+func (m *UptimeCheckIp) String() string { return proto.CompactTextString(m) }
+func (*UptimeCheckIp) ProtoMessage() {}
+func (*UptimeCheckIp) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ca0e36dfc8221d8, []int{2}
+}
+
+func (m *UptimeCheckIp) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UptimeCheckIp.Unmarshal(m, b)
+}
+func (m *UptimeCheckIp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UptimeCheckIp.Marshal(b, m, deterministic)
+}
+func (m *UptimeCheckIp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UptimeCheckIp.Merge(m, src)
+}
+func (m *UptimeCheckIp) XXX_Size() int {
+ return xxx_messageInfo_UptimeCheckIp.Size(m)
+}
+func (m *UptimeCheckIp) XXX_DiscardUnknown() {
+ xxx_messageInfo_UptimeCheckIp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UptimeCheckIp proto.InternalMessageInfo
+
+func (m *UptimeCheckIp) GetRegion() UptimeCheckRegion {
+ if m != nil {
+ return m.Region
+ }
+ return UptimeCheckRegion_REGION_UNSPECIFIED
+}
+
+func (m *UptimeCheckIp) GetLocation() string {
+ if m != nil {
+ return m.Location
+ }
+ return ""
+}
+
+func (m *UptimeCheckIp) GetIpAddress() string {
+ if m != nil {
+ return m.IpAddress
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("google.monitoring.v3.UptimeCheckRegion", UptimeCheckRegion_name, UptimeCheckRegion_value)
+ proto.RegisterEnum("google.monitoring.v3.GroupResourceType", GroupResourceType_name, GroupResourceType_value)
+ proto.RegisterType((*InternalChecker)(nil), "google.monitoring.v3.InternalChecker")
+ proto.RegisterType((*UptimeCheckConfig)(nil), "google.monitoring.v3.UptimeCheckConfig")
+ proto.RegisterType((*UptimeCheckConfig_ResourceGroup)(nil), "google.monitoring.v3.UptimeCheckConfig.ResourceGroup")
+ proto.RegisterType((*UptimeCheckConfig_HttpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck")
+ proto.RegisterMapType((map[string]string)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry")
+ proto.RegisterType((*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication")
+ proto.RegisterType((*UptimeCheckConfig_TcpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.TcpCheck")
+ proto.RegisterType((*UptimeCheckConfig_ContentMatcher)(nil), "google.monitoring.v3.UptimeCheckConfig.ContentMatcher")
+ proto.RegisterType((*UptimeCheckIp)(nil), "google.monitoring.v3.UptimeCheckIp")
+}
+
+func init() { proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor_7ca0e36dfc8221d8) }
+
+var fileDescriptor_7ca0e36dfc8221d8 = []byte{
+ // 1043 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xed, 0x6e, 0xe3, 0x44,
+ 0x17, 0x5e, 0x27, 0x6d, 0x3e, 0x4e, 0xfa, 0xe1, 0xce, 0xdb, 0x17, 0xdc, 0x48, 0x5d, 0xba, 0x45,
+ 0x88, 0xaa, 0x3f, 0x1c, 0xb6, 0x11, 0x08, 0x2d, 0xd2, 0x22, 0x27, 0x35, 0x8d, 0xa5, 0x36, 0x89,
+ 0x26, 0xcd, 0x02, 0x4b, 0x85, 0xe5, 0xda, 0x53, 0xc7, 0x34, 0xf1, 0x18, 0xcf, 0xb8, 0x4b, 0xb9,
+ 0x05, 0x2e, 0x83, 0x1f, 0x48, 0x5c, 0x01, 0xd7, 0xc0, 0x05, 0x70, 0x3d, 0xc8, 0xe3, 0x99, 0xb4,
+ 0x69, 0x8b, 0xb6, 0xfd, 0x37, 0xcf, 0xf9, 0x78, 0xe6, 0xf8, 0xcc, 0x79, 0x66, 0x0c, 0x2f, 0x42,
+ 0x4a, 0xc3, 0x29, 0x69, 0xcd, 0x68, 0x1c, 0x71, 0x9a, 0x46, 0x71, 0xd8, 0xba, 0x6a, 0xb7, 0xb2,
+ 0x84, 0x47, 0x33, 0x62, 0x26, 0x29, 0xe5, 0x14, 0x6d, 0x16, 0x21, 0xe6, 0x4d, 0x88, 0x79, 0xd5,
+ 0x6e, 0x7e, 0x2c, 0x13, 0xbd, 0x24, 0x52, 0xc9, 0x24, 0x70, 0x53, 0xc2, 0x68, 0x96, 0xfa, 0x32,
+ 0xb5, 0xf9, 0x5c, 0x06, 0x09, 0x74, 0x9e, 0x5d, 0xb4, 0x82, 0x2c, 0xf5, 0x78, 0x44, 0xe3, 0xc2,
+ 0xbf, 0xfb, 0x87, 0x06, 0xeb, 0x4e, 0xcc, 0x49, 0x1a, 0x7b, 0xd3, 0xee, 0x84, 0xf8, 0x97, 0x24,
+ 0x45, 0xdb, 0x00, 0x49, 0x4a, 0x7f, 0x22, 0x3e, 0x77, 0xa3, 0xc0, 0xd0, 0x76, 0xb4, 0xbd, 0x3a,
+ 0xae, 0x4b, 0x8b, 0x13, 0x20, 0x03, 0xaa, 0x31, 0xe1, 0xef, 0x68, 0x7a, 0x69, 0x94, 0x84, 0x4f,
+ 0x41, 0xb4, 0x05, 0xb5, 0xd0, 0x4f, 0xdc, 0x5f, 0x69, 0x4c, 0x8c, 0x72, 0xe1, 0x0a, 0xfd, 0xe4,
+ 0x2d, 0x8d, 0x49, 0xce, 0xe9, 0x17, 0xf4, 0x39, 0xe7, 0x52, 0xc1, 0x29, 0x2d, 0x4e, 0x80, 0x5e,
+ 0xc0, 0x4a, 0x10, 0xb1, 0x64, 0xea, 0x5d, 0xbb, 0xb1, 0x37, 0x23, 0xc6, 0xb2, 0x08, 0x68, 0x48,
+ 0x5b, 0xdf, 0x9b, 0x91, 0xdd, 0x7f, 0x1a, 0xb0, 0x31, 0x16, 0x5d, 0x11, 0x75, 0x76, 0x69, 0x7c,
+ 0x11, 0x85, 0x08, 0xc1, 0x92, 0x48, 0x28, 0xaa, 0x14, 0xeb, 0x7b, 0x64, 0xa5, 0x7b, 0x64, 0xa8,
+ 0x0f, 0xe8, 0x7e, 0xcb, 0x44, 0xcd, 0x8d, 0x83, 0x6d, 0x53, 0xb6, 0xdb, 0x4b, 0x22, 0xf3, 0x44,
+ 0x45, 0x61, 0x19, 0xd4, 0x7b, 0x86, 0x37, 0x66, 0x77, 0x8d, 0xe8, 0x47, 0x58, 0x53, 0x2c, 0x6e,
+ 0x98, 0xd2, 0x2c, 0x11, 0x9f, 0xd8, 0x38, 0xf8, 0xdc, 0x7c, 0xe8, 0xe8, 0xcc, 0x7b, 0xdf, 0x61,
+ 0x2a, 0xa6, 0xa3, 0x3c, 0xb9, 0xf7, 0x0c, 0xaf, 0xa6, 0xb7, 0x0d, 0x08, 0x03, 0x4c, 0x38, 0x4f,
+ 0x5c, 0xd1, 0x31, 0xd1, 0x9d, 0xc6, 0xc1, 0xcb, 0xc7, 0x72, 0xf7, 0x38, 0x4f, 0x04, 0xee, 0x69,
+ 0xb8, 0x3e, 0x51, 0x00, 0x0d, 0xa0, 0xce, 0x7d, 0x45, 0x59, 0x11, 0x94, 0x9f, 0x3d, 0x96, 0xf2,
+ 0xd4, 0x9f, 0x33, 0xd6, 0xb8, 0x5c, 0xa3, 0x97, 0x50, 0x49, 0x48, 0x1a, 0xd1, 0xc0, 0xa8, 0x0a,
+ 0xb6, 0x2d, 0xc5, 0xa6, 0x86, 0xcf, 0x3c, 0x94, 0xc3, 0x87, 0x65, 0x20, 0x6a, 0x43, 0x35, 0xa7,
+ 0xa6, 0x19, 0x37, 0x6a, 0xef, 0xcb, 0x51, 0x91, 0xc8, 0x03, 0xdd, 0xa7, 0x31, 0x27, 0x31, 0x77,
+ 0x67, 0x1e, 0xf7, 0x27, 0x24, 0x65, 0x46, 0x7d, 0xa7, 0xbc, 0xd7, 0x38, 0xf8, 0xe2, 0xb1, 0xf5,
+ 0x77, 0x8b, 0xfc, 0x93, 0x22, 0x1d, 0xaf, 0xfb, 0x0b, 0x98, 0x21, 0x0c, 0x3a, 0x23, 0x53, 0xe2,
+ 0x73, 0x31, 0x1e, 0x61, 0x44, 0x63, 0x66, 0xc0, 0x4e, 0x79, 0x6f, 0xed, 0xe0, 0xd3, 0xf7, 0x6e,
+ 0x81, 0x45, 0x3c, 0x5e, 0x57, 0x04, 0x05, 0x66, 0xe8, 0x23, 0x68, 0x44, 0xcc, 0x8d, 0xa4, 0xd8,
+ 0x8c, 0xf5, 0x1d, 0x6d, 0xaf, 0x86, 0x21, 0x62, 0x4a, 0x7e, 0x08, 0xc3, 0x86, 0xf2, 0xba, 0x52,
+ 0x1a, 0xcc, 0x58, 0x13, 0x1f, 0xf6, 0xc9, 0xc3, 0xbb, 0xde, 0x51, 0x2e, 0xd6, 0xa3, 0x45, 0x03,
+ 0x6b, 0xfe, 0x02, 0xab, 0x0b, 0xa3, 0x25, 0x34, 0x9a, 0x2f, 0x6e, 0xa4, 0x5d, 0x15, 0xd8, 0x09,
+ 0xd0, 0x31, 0xcc, 0xa7, 0xce, 0xe5, 0xd7, 0x49, 0x21, 0x9c, 0xff, 0xfc, 0x62, 0x41, 0xa7, 0xb8,
+ 0x4f, 0xaf, 0x13, 0x82, 0x57, 0xd2, 0x5b, 0xa8, 0xf9, 0x57, 0x19, 0xea, 0xf3, 0xc9, 0x43, 0x1f,
+ 0x42, 0x35, 0x63, 0xc4, 0x65, 0x6c, 0x2a, 0x76, 0xad, 0xe1, 0x4a, 0xc6, 0xc8, 0x88, 0x4d, 0x73,
+ 0x01, 0x27, 0x1e, 0x9f, 0x48, 0x91, 0x8a, 0xb5, 0xb0, 0xd1, 0x94, 0x0b, 0x3d, 0x2e, 0x63, 0xb1,
+ 0x46, 0xe7, 0x50, 0xf7, 0x32, 0x3e, 0x71, 0xa3, 0xf8, 0x82, 0x4a, 0x71, 0xd9, 0x4f, 0x16, 0x80,
+ 0xd9, 0xf1, 0x58, 0xe4, 0x5b, 0x19, 0x9f, 0x90, 0x98, 0x47, 0x7e, 0x31, 0x57, 0xb5, 0x9c, 0xd7,
+ 0x89, 0x2f, 0x68, 0x7e, 0x71, 0xcc, 0x3c, 0x76, 0xe9, 0x4e, 0x88, 0x17, 0xe4, 0xbd, 0x5f, 0x16,
+ 0x95, 0x36, 0x72, 0x5b, 0xaf, 0x30, 0xa1, 0xef, 0xa0, 0xaa, 0xbc, 0x15, 0x71, 0x32, 0xaf, 0x9f,
+ 0x5e, 0x84, 0xe4, 0xb2, 0x63, 0x9e, 0x5e, 0x63, 0x45, 0xd7, 0x3c, 0x81, 0xff, 0x3d, 0x50, 0x1d,
+ 0x6a, 0x42, 0x2d, 0x63, 0xf9, 0x99, 0xce, 0x2f, 0xb9, 0x39, 0xce, 0x7d, 0x89, 0xc7, 0xd8, 0x3b,
+ 0x9a, 0x06, 0xb2, 0x7f, 0x73, 0xdc, 0x7c, 0x05, 0x2b, 0xb7, 0xf7, 0x41, 0x3a, 0x94, 0x2f, 0xc9,
+ 0xb5, 0xa4, 0xc8, 0x97, 0x68, 0x13, 0x96, 0xaf, 0xbc, 0x69, 0xa6, 0xee, 0xc7, 0x02, 0xbc, 0x2a,
+ 0x7d, 0xa9, 0x35, 0x9f, 0x43, 0x4d, 0x09, 0x7c, 0x7e, 0x16, 0xda, 0xcd, 0x59, 0x34, 0xf7, 0x61,
+ 0x6d, 0x51, 0x40, 0xf9, 0x9b, 0x20, 0x25, 0xa4, 0x86, 0x4a, 0xc2, 0x0e, 0x40, 0x4d, 0x8d, 0x45,
+ 0x67, 0x13, 0x90, 0x98, 0x6b, 0x37, 0x25, 0x3f, 0x67, 0x84, 0x71, 0x31, 0x65, 0xbb, 0xbf, 0x69,
+ 0xb0, 0x7a, 0xab, 0x5d, 0x4e, 0x82, 0xbe, 0x86, 0x4a, 0x21, 0x3a, 0x41, 0xf6, 0x04, 0xcd, 0xc9,
+ 0xb4, 0xbc, 0x31, 0x53, 0x5a, 0x34, 0x50, 0x35, 0x46, 0xe1, 0xfc, 0x25, 0x8a, 0x12, 0xd7, 0x0b,
+ 0x82, 0x94, 0x30, 0x26, 0x9f, 0xa9, 0x7a, 0x94, 0x58, 0x85, 0x61, 0x9f, 0x2c, 0xbc, 0x32, 0x05,
+ 0x2f, 0xfa, 0x00, 0x10, 0xb6, 0x8f, 0x9c, 0x41, 0xdf, 0x1d, 0xf7, 0x47, 0x43, 0xbb, 0xeb, 0x7c,
+ 0xe3, 0xd8, 0x87, 0xfa, 0x33, 0x54, 0x85, 0xf2, 0x78, 0x64, 0xe9, 0x1a, 0x02, 0xa8, 0xd8, 0x63,
+ 0x3c, 0x18, 0xda, 0x7a, 0x09, 0x6d, 0xc0, 0xea, 0x68, 0x30, 0x3e, 0xed, 0xb9, 0xd6, 0x89, 0x8d,
+ 0x9d, 0xae, 0xa5, 0x97, 0x91, 0x0e, 0x2b, 0xd6, 0xc8, 0xb1, 0xdc, 0xa1, 0x95, 0xa7, 0x76, 0xf5,
+ 0xa5, 0xfd, 0x1f, 0x60, 0xe3, 0x9e, 0x80, 0xd0, 0x36, 0x6c, 0x61, 0x7b, 0x34, 0x18, 0xe3, 0xae,
+ 0xed, 0x9e, 0x7e, 0x3f, 0xb4, 0xef, 0xec, 0xb6, 0x02, 0x35, 0xa7, 0x3f, 0x3a, 0xb5, 0xfa, 0x5d,
+ 0x5b, 0xd7, 0xd0, 0x16, 0xfc, 0xdf, 0xfa, 0x76, 0xe4, 0xda, 0xc7, 0x1d, 0xf7, 0x78, 0x60, 0x1d,
+ 0xba, 0x1d, 0xeb, 0x38, 0xf7, 0x60, 0xbd, 0xd4, 0xf9, 0x5d, 0x03, 0xc3, 0xa7, 0xb3, 0x07, 0xbb,
+ 0xd6, 0x69, 0x14, 0x9f, 0x37, 0xcc, 0xef, 0xd7, 0xa1, 0xf6, 0xf6, 0xb5, 0x0c, 0x0a, 0xe9, 0xd4,
+ 0x8b, 0x43, 0x93, 0xa6, 0x61, 0x2b, 0x24, 0xb1, 0xb8, 0x7d, 0x5b, 0x85, 0xcb, 0x4b, 0x22, 0xb6,
+ 0xf8, 0x77, 0xf2, 0xd5, 0x0d, 0xfa, 0xb3, 0xd4, 0x3c, 0x2a, 0x08, 0xba, 0x53, 0x9a, 0x05, 0xea,
+ 0xbd, 0xcc, 0xf7, 0x7a, 0xd3, 0xfe, 0x5b, 0x39, 0xcf, 0x84, 0xf3, 0xec, 0xc6, 0x79, 0xf6, 0xa6,
+ 0x7d, 0x5e, 0x11, 0x9b, 0xb4, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x01, 0x68, 0xd9, 0xde, 0x01,
+ 0x09, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go
new file mode 100644
index 00000000000..3d2baf6d99b
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go
@@ -0,0 +1,793 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/monitoring/v3/uptime_service.proto
+
+package monitoring
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ empty "github.com/golang/protobuf/ptypes/empty"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ field_mask "google.golang.org/genproto/protobuf/field_mask"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The protocol for the `ListUptimeCheckConfigs` request.
+type ListUptimeCheckConfigsRequest struct {
+ // The project whose uptime check configurations are listed. The format
+ // is `projects/[PROJECT_ID]`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // The maximum number of results to return in a single response. The server
+ // may further constrain the maximum number of results returned in a single
+ // page. If the page_size is <=0, the server will decide the number of results
+ // to be returned.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return more results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListUptimeCheckConfigsRequest) Reset() { *m = ListUptimeCheckConfigsRequest{} }
+func (m *ListUptimeCheckConfigsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListUptimeCheckConfigsRequest) ProtoMessage() {}
+func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6222dd2aa0db8eee, []int{0}
+}
+
+func (m *ListUptimeCheckConfigsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListUptimeCheckConfigsRequest.Unmarshal(m, b)
+}
+func (m *ListUptimeCheckConfigsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListUptimeCheckConfigsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListUptimeCheckConfigsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListUptimeCheckConfigsRequest.Merge(m, src)
+}
+func (m *ListUptimeCheckConfigsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListUptimeCheckConfigsRequest.Size(m)
+}
+func (m *ListUptimeCheckConfigsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListUptimeCheckConfigsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListUptimeCheckConfigsRequest proto.InternalMessageInfo
+
+func (m *ListUptimeCheckConfigsRequest) GetParent() string {
+ if m != nil {
+ return m.Parent
+ }
+ return ""
+}
+
+func (m *ListUptimeCheckConfigsRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListUptimeCheckConfigsRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListUptimeCheckConfigs` response.
+type ListUptimeCheckConfigsResponse struct {
+ // The returned uptime check configurations.
+ UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"`
+ // This field represents the pagination token to retrieve the next page of
+ // results. If the value is empty, it means no further results for the
+ // request. To retrieve the next page of results, the value of the
+ // next_page_token is passed to the subsequent List method call (in the
+ // request message's page_token field).
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of uptime check configurations for the project,
+ // irrespective of any pagination.
+ TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListUptimeCheckConfigsResponse) Reset() { *m = ListUptimeCheckConfigsResponse{} }
+func (m *ListUptimeCheckConfigsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListUptimeCheckConfigsResponse) ProtoMessage() {}
+func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6222dd2aa0db8eee, []int{1}
+}
+
+func (m *ListUptimeCheckConfigsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListUptimeCheckConfigsResponse.Unmarshal(m, b)
+}
+func (m *ListUptimeCheckConfigsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListUptimeCheckConfigsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListUptimeCheckConfigsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListUptimeCheckConfigsResponse.Merge(m, src)
+}
+func (m *ListUptimeCheckConfigsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListUptimeCheckConfigsResponse.Size(m)
+}
+func (m *ListUptimeCheckConfigsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListUptimeCheckConfigsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListUptimeCheckConfigsResponse proto.InternalMessageInfo
+
+func (m *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig {
+ if m != nil {
+ return m.UptimeCheckConfigs
+ }
+ return nil
+}
+
+func (m *ListUptimeCheckConfigsResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+func (m *ListUptimeCheckConfigsResponse) GetTotalSize() int32 {
+ if m != nil {
+ return m.TotalSize
+ }
+ return 0
+}
+
+// The protocol for the `GetUptimeCheckConfig` request.
+type GetUptimeCheckConfigRequest struct {
+ // The uptime check configuration to retrieve. The format
+ // is `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetUptimeCheckConfigRequest) Reset() { *m = GetUptimeCheckConfigRequest{} }
+func (m *GetUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
+func (*GetUptimeCheckConfigRequest) ProtoMessage() {}
+func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6222dd2aa0db8eee, []int{2}
+}
+
+func (m *GetUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetUptimeCheckConfigRequest.Unmarshal(m, b)
+}
+func (m *GetUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetUptimeCheckConfigRequest.Marshal(b, m, deterministic)
+}
+func (m *GetUptimeCheckConfigRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetUptimeCheckConfigRequest.Merge(m, src)
+}
+func (m *GetUptimeCheckConfigRequest) XXX_Size() int {
+ return xxx_messageInfo_GetUptimeCheckConfigRequest.Size(m)
+}
+func (m *GetUptimeCheckConfigRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetUptimeCheckConfigRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetUptimeCheckConfigRequest proto.InternalMessageInfo
+
+func (m *GetUptimeCheckConfigRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The protocol for the `CreateUptimeCheckConfig` request.
+type CreateUptimeCheckConfigRequest struct {
+ // The project in which to create the uptime check. The format
+ // is `projects/[PROJECT_ID]`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // The new uptime check configuration.
+ UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateUptimeCheckConfigRequest) Reset() { *m = CreateUptimeCheckConfigRequest{} }
+func (m *CreateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateUptimeCheckConfigRequest) ProtoMessage() {}
+func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6222dd2aa0db8eee, []int{3}
+}
+
+func (m *CreateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateUptimeCheckConfigRequest.Unmarshal(m, b)
+}
+func (m *CreateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateUptimeCheckConfigRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateUptimeCheckConfigRequest.Merge(m, src)
+}
+func (m *CreateUptimeCheckConfigRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateUptimeCheckConfigRequest.Size(m)
+}
+func (m *CreateUptimeCheckConfigRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateUptimeCheckConfigRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateUptimeCheckConfigRequest proto.InternalMessageInfo
+
+func (m *CreateUptimeCheckConfigRequest) GetParent() string {
+ if m != nil {
+ return m.Parent
+ }
+ return ""
+}
+
+func (m *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig {
+ if m != nil {
+ return m.UptimeCheckConfig
+ }
+ return nil
+}
+
+// The protocol for the `UpdateUptimeCheckConfig` request.
+type UpdateUptimeCheckConfigRequest struct {
+ // Optional. If present, only the listed fields in the current uptime check
+ // configuration are updated with values from the new configuration. If this
+ // field is empty, then the current configuration is completely replaced with
+ // the new configuration.
+ UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // Required. If an `"updateMask"` has been specified, this field gives
+ // the values for the set of fields mentioned in the `"updateMask"`. If an
+ // `"updateMask"` has not been given, this uptime check configuration replaces
+ // the current configuration. If a field is mentioned in `"updateMask"` but
+ // the corresonding field is omitted in this partial uptime check
+ // configuration, it has the effect of deleting/clearing the field from the
+ // configuration on the server.
+ //
+ // The following fields can be updated: `display_name`,
+ // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and
+ // `selected_regions`.
+ UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdateUptimeCheckConfigRequest) Reset() { *m = UpdateUptimeCheckConfigRequest{} }
+func (m *UpdateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {}
+func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6222dd2aa0db8eee, []int{4}
+}
+
+func (m *UpdateUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Unmarshal(m, b)
+}
+func (m *UpdateUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Marshal(b, m, deterministic)
+}
+func (m *UpdateUptimeCheckConfigRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateUptimeCheckConfigRequest.Merge(m, src)
+}
+func (m *UpdateUptimeCheckConfigRequest) XXX_Size() int {
+ return xxx_messageInfo_UpdateUptimeCheckConfigRequest.Size(m)
+}
+func (m *UpdateUptimeCheckConfigRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateUptimeCheckConfigRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateUptimeCheckConfigRequest proto.InternalMessageInfo
+
+func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *field_mask.FieldMask {
+ if m != nil {
+ return m.UpdateMask
+ }
+ return nil
+}
+
+func (m *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig {
+ if m != nil {
+ return m.UptimeCheckConfig
+ }
+ return nil
+}
+
+// The protocol for the `DeleteUptimeCheckConfig` request.
+type DeleteUptimeCheckConfigRequest struct {
+ // The uptime check configuration to delete. The format
+ // is `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteUptimeCheckConfigRequest) Reset() { *m = DeleteUptimeCheckConfigRequest{} }
+func (m *DeleteUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {}
+func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6222dd2aa0db8eee, []int{5}
+}
+
+func (m *DeleteUptimeCheckConfigRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Unmarshal(m, b)
+}
+func (m *DeleteUptimeCheckConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteUptimeCheckConfigRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteUptimeCheckConfigRequest.Merge(m, src)
+}
+func (m *DeleteUptimeCheckConfigRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteUptimeCheckConfigRequest.Size(m)
+}
+func (m *DeleteUptimeCheckConfigRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteUptimeCheckConfigRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteUptimeCheckConfigRequest proto.InternalMessageInfo
+
+func (m *DeleteUptimeCheckConfigRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The protocol for the `ListUptimeCheckIps` request.
+type ListUptimeCheckIpsRequest struct {
+ // The maximum number of results to return in a single response. The server
+ // may further constrain the maximum number of results returned in a single
+ // page. If the page_size is <=0, the server will decide the number of results
+ // to be returned.
+ // NOTE: this field is not yet implemented
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return more results from the previous method call.
+ // NOTE: this field is not yet implemented
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListUptimeCheckIpsRequest) Reset() { *m = ListUptimeCheckIpsRequest{} }
+func (m *ListUptimeCheckIpsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListUptimeCheckIpsRequest) ProtoMessage() {}
+func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6222dd2aa0db8eee, []int{6}
+}
+
+func (m *ListUptimeCheckIpsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListUptimeCheckIpsRequest.Unmarshal(m, b)
+}
+func (m *ListUptimeCheckIpsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListUptimeCheckIpsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListUptimeCheckIpsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListUptimeCheckIpsRequest.Merge(m, src)
+}
+func (m *ListUptimeCheckIpsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListUptimeCheckIpsRequest.Size(m)
+}
+func (m *ListUptimeCheckIpsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListUptimeCheckIpsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListUptimeCheckIpsRequest proto.InternalMessageInfo
+
+func (m *ListUptimeCheckIpsRequest) GetPageSize() int32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *ListUptimeCheckIpsRequest) GetPageToken() string {
+ if m != nil {
+ return m.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListUptimeCheckIps` response.
+type ListUptimeCheckIpsResponse struct {
+ // The returned list of IP addresses (including region and location) that the
+ // checkers run from.
+ UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"`
+ // This field represents the pagination token to retrieve the next page of
+ // results. If the value is empty, it means no further results for the
+ // request. To retrieve the next page of results, the value of the
+ // next_page_token is passed to the subsequent List method call (in the
+ // request message's page_token field).
+ // NOTE: this field is not yet implemented
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListUptimeCheckIpsResponse) Reset() { *m = ListUptimeCheckIpsResponse{} }
+func (m *ListUptimeCheckIpsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListUptimeCheckIpsResponse) ProtoMessage() {}
+func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_6222dd2aa0db8eee, []int{7}
+}
+
+func (m *ListUptimeCheckIpsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListUptimeCheckIpsResponse.Unmarshal(m, b)
+}
+func (m *ListUptimeCheckIpsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListUptimeCheckIpsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListUptimeCheckIpsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListUptimeCheckIpsResponse.Merge(m, src)
+}
+func (m *ListUptimeCheckIpsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListUptimeCheckIpsResponse.Size(m)
+}
+func (m *ListUptimeCheckIpsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListUptimeCheckIpsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListUptimeCheckIpsResponse proto.InternalMessageInfo
+
+func (m *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp {
+ if m != nil {
+ return m.UptimeCheckIps
+ }
+ return nil
+}
+
+func (m *ListUptimeCheckIpsResponse) GetNextPageToken() string {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*ListUptimeCheckConfigsRequest)(nil), "google.monitoring.v3.ListUptimeCheckConfigsRequest")
+ proto.RegisterType((*ListUptimeCheckConfigsResponse)(nil), "google.monitoring.v3.ListUptimeCheckConfigsResponse")
+ proto.RegisterType((*GetUptimeCheckConfigRequest)(nil), "google.monitoring.v3.GetUptimeCheckConfigRequest")
+ proto.RegisterType((*CreateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.CreateUptimeCheckConfigRequest")
+ proto.RegisterType((*UpdateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.UpdateUptimeCheckConfigRequest")
+ proto.RegisterType((*DeleteUptimeCheckConfigRequest)(nil), "google.monitoring.v3.DeleteUptimeCheckConfigRequest")
+ proto.RegisterType((*ListUptimeCheckIpsRequest)(nil), "google.monitoring.v3.ListUptimeCheckIpsRequest")
+ proto.RegisterType((*ListUptimeCheckIpsResponse)(nil), "google.monitoring.v3.ListUptimeCheckIpsResponse")
+}
+
+func init() {
+ proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor_6222dd2aa0db8eee)
+}
+
+var fileDescriptor_6222dd2aa0db8eee = []byte{
+ // 747 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x6e, 0xd3, 0x4a,
+ 0x14, 0xd6, 0x24, 0xbd, 0x55, 0x7b, 0xaa, 0x7b, 0x2f, 0x0c, 0x51, 0x1b, 0x5c, 0x1a, 0x05, 0x23,
+ 0x41, 0x89, 0x90, 0x4d, 0x93, 0xae, 0xa8, 0xa8, 0x44, 0x03, 0x54, 0x95, 0xa8, 0x54, 0xa5, 0xb4,
+ 0x15, 0x50, 0x29, 0x72, 0xd3, 0xa9, 0x31, 0x49, 0x3c, 0xc6, 0x33, 0xae, 0xa0, 0xa8, 0x1b, 0xde,
+ 0x00, 0x75, 0xc9, 0x9e, 0x45, 0x1f, 0x00, 0xd6, 0xb0, 0x41, 0x62, 0x8b, 0x78, 0x03, 0x1e, 0x04,
+ 0x79, 0x3c, 0x26, 0x7f, 0x63, 0xe3, 0x88, 0x5d, 0x3c, 0xe7, 0xcc, 0x39, 0xdf, 0xf9, 0xfc, 0x9d,
+ 0x2f, 0x86, 0x9b, 0x36, 0xa5, 0x76, 0x87, 0x98, 0x5d, 0xea, 0x3a, 0x9c, 0xfa, 0x8e, 0x6b, 0x9b,
+ 0xc7, 0x35, 0x33, 0xf0, 0xb8, 0xd3, 0x25, 0x4d, 0x46, 0xfc, 0x63, 0xa7, 0x45, 0x0c, 0xcf, 0xa7,
+ 0x9c, 0xe2, 0x42, 0x94, 0x6a, 0xf4, 0x52, 0x8d, 0xe3, 0x9a, 0x76, 0x45, 0x16, 0xb0, 0x3c, 0xc7,
+ 0xb4, 0x5c, 0x97, 0x72, 0x8b, 0x3b, 0xd4, 0x65, 0xd1, 0x1d, 0xed, 0x6a, 0x4a, 0x79, 0x99, 0x32,
+ 0x2f, 0x53, 0xc4, 0xd3, 0x41, 0x70, 0x64, 0x92, 0xae, 0xc7, 0x5f, 0xcb, 0x60, 0x79, 0x38, 0x78,
+ 0xe4, 0x90, 0xce, 0x61, 0xb3, 0x6b, 0xb1, 0x76, 0x94, 0xa1, 0x33, 0x58, 0x78, 0xe4, 0x30, 0xbe,
+ 0x23, 0x4a, 0xd6, 0x9f, 0x93, 0x56, 0xbb, 0x4e, 0xdd, 0x23, 0xc7, 0x66, 0x0d, 0xf2, 0x32, 0x20,
+ 0x8c, 0xe3, 0x59, 0x98, 0xf4, 0x2c, 0x9f, 0xb8, 0xbc, 0x88, 0xca, 0x68, 0x71, 0xba, 0x21, 0x9f,
+ 0xf0, 0x3c, 0x4c, 0x7b, 0x96, 0x4d, 0x9a, 0xcc, 0x39, 0x21, 0xc5, 0x7c, 0x19, 0x2d, 0xfe, 0xd3,
+ 0x98, 0x0a, 0x0f, 0xb6, 0x9d, 0x13, 0x82, 0x17, 0x00, 0x44, 0x90, 0xd3, 0x36, 0x71, 0x8b, 0x13,
+ 0xe2, 0xa2, 0x48, 0x7f, 0x1c, 0x1e, 0xe8, 0x5f, 0x10, 0x94, 0x92, 0xba, 0x32, 0x8f, 0xba, 0x8c,
+ 0xe0, 0x27, 0x50, 0x90, 0x2c, 0xb6, 0xc2, 0x70, 0xb3, 0x15, 0xc5, 0x8b, 0xa8, 0x9c, 0x5f, 0x9c,
+ 0xa9, 0xde, 0x30, 0x54, 0x64, 0x1a, 0x23, 0xf5, 0x1a, 0x38, 0x18, 0x69, 0x81, 0xaf, 0xc3, 0xff,
+ 0x2e, 0x79, 0xc5, 0x9b, 0x7d, 0x08, 0x73, 0x02, 0xe1, 0xbf, 0xe1, 0xf1, 0x56, 0x8c, 0x32, 0x1c,
+ 0x82, 0x53, 0x6e, 0x75, 0xfa, 0x47, 0x9c, 0x16, 0x27, 0xe1, 0x8c, 0xfa, 0x12, 0xcc, 0xaf, 0x93,
+ 0xd1, 0x11, 0x62, 0xde, 0x30, 0x4c, 0xb8, 0x56, 0x97, 0x48, 0xd6, 0xc4, 0x6f, 0xfd, 0x1d, 0x82,
+ 0x52, 0xdd, 0x27, 0x16, 0x27, 0x89, 0xd7, 0x92, 0xe8, 0xde, 0x83, 0x4b, 0x0a, 0x3e, 0x04, 0xf0,
+ 0x31, 0xe8, 0xb8, 0x38, 0x42, 0x87, 0xfe, 0x11, 0x41, 0x69, 0xc7, 0x3b, 0x4c, 0xc3, 0xb4, 0x02,
+ 0x33, 0x81, 0xc8, 0x10, 0xc2, 0x91, 0x3d, 0xb5, 0xb8, 0x67, 0xac, 0x2d, 0xe3, 0x61, 0xa8, 0xad,
+ 0x4d, 0x8b, 0xb5, 0x1b, 0x10, 0xa5, 0x87, 0xbf, 0x93, 0x80, 0xe7, 0xff, 0x1a, 0xf8, 0x32, 0x94,
+ 0xee, 0x93, 0x0e, 0x49, 0xc1, 0xad, 0x7a, 0x05, 0x7b, 0x70, 0x79, 0x48, 0x79, 0x1b, 0xde, 0x6f,
+ 0xad, 0x0f, 0x68, 0x3a, 0x97, 0xaa, 0xe9, 0xfc, 0xb0, 0xa6, 0xcf, 0x10, 0x68, 0xaa, 0xca, 0x52,
+ 0xcf, 0x9b, 0x70, 0x61, 0x80, 0x06, 0xc7, 0x8b, 0xb5, 0x7c, 0xed, 0x8f, 0x1c, 0x6c, 0x78, 0x8d,
+ 0xff, 0x82, 0x81, 0xb2, 0x59, 0x35, 0x5c, 0xfd, 0x3a, 0x05, 0xb8, 0xaf, 0xd2, 0x76, 0xe4, 0x48,
+ 0xf8, 0x13, 0x82, 0x59, 0xf5, 0x02, 0xe2, 0x9a, 0x1a, 0x4e, 0xaa, 0x49, 0x68, 0xcb, 0xe3, 0x5d,
+ 0x8a, 0x38, 0xd1, 0xab, 0x6f, 0xbf, 0xff, 0x3c, 0xcb, 0xdd, 0xc2, 0x95, 0xd0, 0xd4, 0xde, 0x44,
+ 0x42, 0xbf, 0xeb, 0xf9, 0xf4, 0x05, 0x69, 0x71, 0x66, 0x56, 0x4e, 0x4d, 0xc5, 0xf2, 0x7e, 0x40,
+ 0x50, 0x50, 0xad, 0x1d, 0x5e, 0x52, 0x43, 0x48, 0x59, 0x51, 0x2d, 0xab, 0xfa, 0x86, 0x80, 0x86,
+ 0x3a, 0xea, 0x83, 0xa9, 0x40, 0x69, 0x56, 0x4e, 0xf1, 0x67, 0x04, 0x73, 0x09, 0xbb, 0x8e, 0x13,
+ 0xe8, 0x4a, 0xb7, 0x86, 0xec, 0x70, 0xd7, 0x05, 0xdc, 0x7b, 0xfa, 0x18, 0xbc, 0xde, 0x51, 0x2d,
+ 0x29, 0xfe, 0x81, 0x60, 0x2e, 0xc1, 0x1b, 0x92, 0x66, 0x48, 0xb7, 0x92, 0xec, 0x33, 0x3c, 0x13,
+ 0x33, 0xec, 0x54, 0x57, 0xc5, 0x0c, 0x0a, 0x70, 0x46, 0xa6, 0xd7, 0xa0, 0x9e, 0xeb, 0x3d, 0x82,
+ 0xb9, 0x04, 0xef, 0x48, 0x9a, 0x2b, 0xdd, 0x6a, 0xb4, 0xd9, 0x11, 0x37, 0x7c, 0x10, 0xfe, 0x0d,
+ 0xc7, 0xca, 0xa9, 0x8c, 0xa3, 0x9c, 0x33, 0x04, 0x78, 0xd4, 0x49, 0xb0, 0x99, 0x69, 0xc7, 0x7a,
+ 0x6e, 0xa6, 0xdd, 0xce, 0x7e, 0x41, 0x2e, 0xa4, 0x26, 0xd0, 0x16, 0x30, 0xee, 0x7d, 0x65, 0xc4,
+ 0x39, 0x6b, 0xe7, 0x08, 0x8a, 0x2d, 0xda, 0x55, 0xd6, 0x5c, 0x93, 0x1e, 0x23, 0xed, 0x65, 0x2b,
+ 0xe4, 0x60, 0x0b, 0x3d, 0x5d, 0x95, 0xb9, 0x36, 0xed, 0x58, 0xae, 0x6d, 0x50, 0xdf, 0x36, 0x6d,
+ 0xe2, 0x0a, 0x86, 0xcc, 0x28, 0x64, 0x79, 0x0e, 0x1b, 0xfc, 0xb8, 0x59, 0xe9, 0x3d, 0x9d, 0xe7,
+ 0xb4, 0xf5, 0xa8, 0x40, 0xbd, 0x43, 0x83, 0x43, 0x63, 0xb3, 0xd7, 0x72, 0xb7, 0xf6, 0x2d, 0x0e,
+ 0xee, 0x8b, 0xe0, 0x7e, 0x2f, 0xb8, 0xbf, 0x5b, 0x3b, 0x98, 0x14, 0x4d, 0x6a, 0xbf, 0x02, 0x00,
+ 0x00, 0xff, 0xff, 0x27, 0xb8, 0x65, 0x92, 0x9f, 0x09, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// UptimeCheckServiceClient is the client API for UptimeCheckService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type UptimeCheckServiceClient interface {
+ // Lists the existing valid uptime check configurations for the project,
+ // leaving out any invalid configurations.
+ ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error)
+ // Gets a single uptime check configuration.
+ GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
+ // Creates a new uptime check configuration.
+ CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
+ // Updates an uptime check configuration. You can either replace the entire
+ // configuration with a new one or replace only certain fields in the current
+ // configuration by specifying the fields to be updated via `"updateMask"`.
+ // Returns the updated configuration.
+ UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
+ // Deletes an uptime check configuration. Note that this method will fail
+ // if the uptime check configuration is referenced by an alert policy or
+ // other dependent configs that would be rendered invalid by the deletion.
+ DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ // Returns the list of IPs that checkers run from
+ ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error)
+}
+
+type uptimeCheckServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewUptimeCheckServiceClient(cc *grpc.ClientConn) UptimeCheckServiceClient {
+ return &uptimeCheckServiceClient{cc}
+}
+
+func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) {
+ out := new(ListUptimeCheckConfigsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
+ out := new(UptimeCheckConfig)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
+ out := new(UptimeCheckConfig)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
+ out := new(UptimeCheckConfig)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) {
+ out := new(ListUptimeCheckIpsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// UptimeCheckServiceServer is the server API for UptimeCheckService service.
+type UptimeCheckServiceServer interface {
+ // Lists the existing valid uptime check configurations for the project,
+ // leaving out any invalid configurations.
+ ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error)
+ // Gets a single uptime check configuration.
+ GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
+ // Creates a new uptime check configuration.
+ CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
+ // Updates an uptime check configuration. You can either replace the entire
+ // configuration with a new one or replace only certain fields in the current
+ // configuration by specifying the fields to be updated via `"updateMask"`.
+ // Returns the updated configuration.
+ UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
+ // Deletes an uptime check configuration. Note that this method will fail
+ // if the uptime check configuration is referenced by an alert policy or
+ // other dependent configs that would be rendered invalid by the deletion.
+ DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*empty.Empty, error)
+ // Returns the list of IPs that checkers run from
+ ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error)
+}
+
+func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) {
+ s.RegisterService(&_UptimeCheckService_serviceDesc, srv)
+}
+
+func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListUptimeCheckConfigsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListUptimeCheckIpsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.UptimeCheckService",
+ HandlerType: (*UptimeCheckServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListUptimeCheckConfigs",
+ Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler,
+ },
+ {
+ MethodName: "GetUptimeCheckConfig",
+ Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "CreateUptimeCheckConfig",
+ Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "UpdateUptimeCheckConfig",
+ Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "DeleteUptimeCheckConfig",
+ Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "ListUptimeCheckIps",
+ Handler: _UptimeCheckService_ListUptimeCheckIps_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/uptime_service.proto",
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
index bcc032df9dd..77c91bbc74d 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -327,6 +327,17 @@ func NewTooManyRequestsError(message string) *StatusError {
}}
}
+// NewRequestEntityTooLargeError returns an error indicating that the request
+// entity was too large.
+func NewRequestEntityTooLargeError(message string) *StatusError {
+ return &StatusError{metav1.Status{
+ Status: metav1.StatusFailure,
+ Code: http.StatusRequestEntityTooLarge,
+ Reason: metav1.StatusReasonRequestEntityTooLarge,
+ Message: fmt.Sprintf("Request entity too large: %s", message),
+ }}
+}
+
// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
reason := metav1.StatusReasonUnknown
@@ -513,6 +524,19 @@ func IsTooManyRequests(err error) bool {
return false
}
+// IsRequestEntityTooLargeError determines if err is an error which indicates
+// the request entity is too large.
+func IsRequestEntityTooLargeError(err error) bool {
+ if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge {
+ return true
+ }
+ switch t := err.(type) {
+ case APIStatus:
+ return t.Status().Code == http.StatusRequestEntityTooLarge
+ }
+ return false
+}
+
// IsUnexpectedServerError returns true if the server response was not in the expected API format,
// and may be the result of another HTTP actor.
func IsUnexpectedServerError(err error) bool {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index 4d3a55d7169..c1743382a8a 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -713,6 +713,10 @@ const (
// Status code 406
StatusReasonNotAcceptable StatusReason = "NotAcceptable"
+ // StatusReasonRequestEntityTooLarge means that the request entity is too large.
+ // Status code 413
+ StatusReasonRequestEntityTooLarge StatusReason = "RequestEntityTooLarge"
+
// StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable
// to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml.
// API calls that return UnsupportedMediaType can never succeed.
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
index 6b859b28897..284e32bc3cb 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
@@ -283,6 +283,7 @@ var _ GroupVersioner = multiGroupVersioner{}
type multiGroupVersioner struct {
target schema.GroupVersion
acceptedGroupKinds []schema.GroupKind
+ coerce bool
}
// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds.
@@ -294,6 +295,22 @@ func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKi
return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds}
}
+// NewCoercingMultiGroupVersioner returns the provided group version for any incoming kind.
+// Incoming kinds that match the provided groupKinds are preferred.
+// Kind may be empty in the provided group kind, in which case any kind will match.
+// Examples:
+// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar
+// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind)
+//
+// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
+// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group)
+//
+// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
+// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list)
+func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner {
+ return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true}
+}
+
// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will
// use the originating kind where possible.
func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
@@ -308,5 +325,8 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio
return v.target.WithKind(src.Kind), true
}
}
+ if v.coerce && len(kinds) > 0 {
+ return v.target.WithKind(kinds[0].Kind), true
+ }
return schema.GroupVersionKind{}, false
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
index 91fd4ed4f0b..a60a7c04156 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
@@ -64,7 +64,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
reader: r,
decoder: d,
buf: make([]byte, 1024),
- maxBytes: 1024 * 1024,
+ maxBytes: 16 * 1024 * 1024,
}
}
diff --git a/vendor/k8s.io/client-go/dynamic/fake/simple.go b/vendor/k8s.io/client-go/dynamic/fake/simple.go
index 13e2d8055e8..dde45892f8e 100644
--- a/vendor/k8s.io/client-go/dynamic/fake/simple.go
+++ b/vendor/k8s.io/client-go/dynamic/fake/simple.go
@@ -45,7 +45,7 @@ func NewSimpleDynamicClient(scheme *runtime.Scheme, objects ...runtime.Object) *
}
}
- cs := &FakeDynamicClient{}
+ cs := &FakeDynamicClient{scheme: scheme}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
diff --git a/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go
new file mode 100644
index 00000000000..a23b3165a04
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go
@@ -0,0 +1,372 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/discovery"
+ fakediscovery "k8s.io/client-go/discovery/fake"
+ clientset "k8s.io/client-go/kubernetes"
+ admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1"
+ fakeadmissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake"
+ admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1"
+ fakeadmissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake"
+ appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
+ fakeappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1/fake"
+ appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1"
+ fakeappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake"
+ appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2"
+ fakeappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake"
+ authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1"
+ fakeauthenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1/fake"
+ authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1"
+ fakeauthenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake"
+ authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
+ fakeauthorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1/fake"
+ authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1"
+ fakeauthorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake"
+ autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
+ fakeautoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake"
+ autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1"
+ fakeautoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake"
+ autoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2"
+ fakeautoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake"
+ batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
+ fakebatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1/fake"
+ batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1"
+ fakebatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake"
+ batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1"
+ fakebatchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake"
+ certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
+ fakecertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake"
+ coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1"
+ fakecoordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake"
+ corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
+ fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
+ eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1"
+ fakeeventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1/fake"
+ extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
+ fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake"
+ networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1"
+ fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake"
+ policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
+ fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake"
+ rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
+ fakerbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1/fake"
+ rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1"
+ fakerbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake"
+ rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
+ fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake"
+ schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1"
+ fakeschedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake"
+ schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1"
+ fakeschedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake"
+ settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1"
+ fakesettingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake"
+ storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1"
+ fakestoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1/fake"
+ storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1"
+ fakestoragev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake"
+ storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1"
+ fakestoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake"
+ "k8s.io/client-go/testing"
+)
+
+// NewSimpleClientset returns a clientset that will respond with the provided objects.
+// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
+// without applying any validations and/or defaults. It shouldn't be considered a replacement
+// for a real clientset and is mostly useful in simple unit tests.
+func NewSimpleClientset(objects ...runtime.Object) *Clientset {
+ o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
+ for _, obj := range objects {
+ if err := o.Add(obj); err != nil {
+ panic(err)
+ }
+ }
+
+ cs := &Clientset{}
+ cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
+ cs.AddReactor("*", "*", testing.ObjectReaction(o))
+ cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
+ gvr := action.GetResource()
+ ns := action.GetNamespace()
+ watch, err := o.Watch(gvr, ns)
+ if err != nil {
+ return false, nil, err
+ }
+ return true, watch, nil
+ })
+
+ return cs
+}
+
+// Clientset implements clientset.Interface. Meant to be embedded into a
+// struct to get a default implementation. This makes faking out just the method
+// you want to test easier.
+type Clientset struct {
+ testing.Fake
+ discovery *fakediscovery.FakeDiscovery
+}
+
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ return c.discovery
+}
+
+var _ clientset.Interface = &Clientset{}
+
+// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client
+func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface {
+ return &fakeadmissionregistrationv1alpha1.FakeAdmissionregistrationV1alpha1{Fake: &c.Fake}
+}
+
+// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client
+func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface {
+ return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake}
+}
+
+// Admissionregistration retrieves the AdmissionregistrationV1beta1Client
+func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface {
+ return &fakeadmissionregistrationv1beta1.FakeAdmissionregistrationV1beta1{Fake: &c.Fake}
+}
+
+// AppsV1beta1 retrieves the AppsV1beta1Client
+func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface {
+ return &fakeappsv1beta1.FakeAppsV1beta1{Fake: &c.Fake}
+}
+
+// AppsV1beta2 retrieves the AppsV1beta2Client
+func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface {
+ return &fakeappsv1beta2.FakeAppsV1beta2{Fake: &c.Fake}
+}
+
+// AppsV1 retrieves the AppsV1Client
+func (c *Clientset) AppsV1() appsv1.AppsV1Interface {
+ return &fakeappsv1.FakeAppsV1{Fake: &c.Fake}
+}
+
+// Apps retrieves the AppsV1Client
+func (c *Clientset) Apps() appsv1.AppsV1Interface {
+ return &fakeappsv1.FakeAppsV1{Fake: &c.Fake}
+}
+
+// AuthenticationV1 retrieves the AuthenticationV1Client
+func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface {
+ return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake}
+}
+
+// Authentication retrieves the AuthenticationV1Client
+func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface {
+ return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake}
+}
+
+// AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client
+func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface {
+ return &fakeauthenticationv1beta1.FakeAuthenticationV1beta1{Fake: &c.Fake}
+}
+
+// AuthorizationV1 retrieves the AuthorizationV1Client
+func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface {
+ return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake}
+}
+
+// Authorization retrieves the AuthorizationV1Client
+func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface {
+ return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake}
+}
+
+// AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client
+func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface {
+ return &fakeauthorizationv1beta1.FakeAuthorizationV1beta1{Fake: &c.Fake}
+}
+
+// AutoscalingV1 retrieves the AutoscalingV1Client
+func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface {
+ return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake}
+}
+
+// Autoscaling retrieves the AutoscalingV1Client
+func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface {
+ return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake}
+}
+
+// AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client
+func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface {
+ return &fakeautoscalingv2beta1.FakeAutoscalingV2beta1{Fake: &c.Fake}
+}
+
+// AutoscalingV2beta2 retrieves the AutoscalingV2beta2Client
+func (c *Clientset) AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface {
+ return &fakeautoscalingv2beta2.FakeAutoscalingV2beta2{Fake: &c.Fake}
+}
+
+// BatchV1 retrieves the BatchV1Client
+func (c *Clientset) BatchV1() batchv1.BatchV1Interface {
+ return &fakebatchv1.FakeBatchV1{Fake: &c.Fake}
+}
+
+// Batch retrieves the BatchV1Client
+func (c *Clientset) Batch() batchv1.BatchV1Interface {
+ return &fakebatchv1.FakeBatchV1{Fake: &c.Fake}
+}
+
+// BatchV1beta1 retrieves the BatchV1beta1Client
+func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface {
+ return &fakebatchv1beta1.FakeBatchV1beta1{Fake: &c.Fake}
+}
+
+// BatchV2alpha1 retrieves the BatchV2alpha1Client
+func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface {
+ return &fakebatchv2alpha1.FakeBatchV2alpha1{Fake: &c.Fake}
+}
+
+// CertificatesV1beta1 retrieves the CertificatesV1beta1Client
+func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface {
+ return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake}
+}
+
+// Certificates retrieves the CertificatesV1beta1Client
+func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface {
+ return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake}
+}
+
+// CoordinationV1beta1 retrieves the CoordinationV1beta1Client
+func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface {
+ return &fakecoordinationv1beta1.FakeCoordinationV1beta1{Fake: &c.Fake}
+}
+
+// Coordination retrieves the CoordinationV1beta1Client
+func (c *Clientset) Coordination() coordinationv1beta1.CoordinationV1beta1Interface {
+ return &fakecoordinationv1beta1.FakeCoordinationV1beta1{Fake: &c.Fake}
+}
+
+// CoreV1 retrieves the CoreV1Client
+func (c *Clientset) CoreV1() corev1.CoreV1Interface {
+ return &fakecorev1.FakeCoreV1{Fake: &c.Fake}
+}
+
+// Core retrieves the CoreV1Client
+func (c *Clientset) Core() corev1.CoreV1Interface {
+ return &fakecorev1.FakeCoreV1{Fake: &c.Fake}
+}
+
+// EventsV1beta1 retrieves the EventsV1beta1Client
+func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface {
+ return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake}
+}
+
+// Events retrieves the EventsV1beta1Client
+func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface {
+ return &fakeeventsv1beta1.FakeEventsV1beta1{Fake: &c.Fake}
+}
+
+// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client
+func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface {
+ return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake}
+}
+
+// Extensions retrieves the ExtensionsV1beta1Client
+func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface {
+ return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake}
+}
+
+// NetworkingV1 retrieves the NetworkingV1Client
+func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface {
+ return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake}
+}
+
+// Networking retrieves the NetworkingV1Client
+func (c *Clientset) Networking() networkingv1.NetworkingV1Interface {
+ return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake}
+}
+
+// PolicyV1beta1 retrieves the PolicyV1beta1Client
+func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface {
+ return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake}
+}
+
+// Policy retrieves the PolicyV1beta1Client
+func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface {
+ return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake}
+}
+
+// RbacV1 retrieves the RbacV1Client
+func (c *Clientset) RbacV1() rbacv1.RbacV1Interface {
+ return &fakerbacv1.FakeRbacV1{Fake: &c.Fake}
+}
+
+// Rbac retrieves the RbacV1Client
+func (c *Clientset) Rbac() rbacv1.RbacV1Interface {
+ return &fakerbacv1.FakeRbacV1{Fake: &c.Fake}
+}
+
+// RbacV1beta1 retrieves the RbacV1beta1Client
+func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface {
+ return &fakerbacv1beta1.FakeRbacV1beta1{Fake: &c.Fake}
+}
+
+// RbacV1alpha1 retrieves the RbacV1alpha1Client
+func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface {
+ return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake}
+}
+
+// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client
+func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface {
+ return &fakeschedulingv1alpha1.FakeSchedulingV1alpha1{Fake: &c.Fake}
+}
+
+// SchedulingV1beta1 retrieves the SchedulingV1beta1Client
+func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface {
+ return &fakeschedulingv1beta1.FakeSchedulingV1beta1{Fake: &c.Fake}
+}
+
+// Scheduling retrieves the SchedulingV1beta1Client
+func (c *Clientset) Scheduling() schedulingv1beta1.SchedulingV1beta1Interface {
+ return &fakeschedulingv1beta1.FakeSchedulingV1beta1{Fake: &c.Fake}
+}
+
+// SettingsV1alpha1 retrieves the SettingsV1alpha1Client
+func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface {
+ return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake}
+}
+
+// Settings retrieves the SettingsV1alpha1Client
+func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface {
+ return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake}
+}
+
+// StorageV1beta1 retrieves the StorageV1beta1Client
+func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface {
+ return &fakestoragev1beta1.FakeStorageV1beta1{Fake: &c.Fake}
+}
+
+// StorageV1 retrieves the StorageV1Client
+func (c *Clientset) StorageV1() storagev1.StorageV1Interface {
+ return &fakestoragev1.FakeStorageV1{Fake: &c.Fake}
+}
+
+// Storage retrieves the StorageV1Client
+func (c *Clientset) Storage() storagev1.StorageV1Interface {
+ return &fakestoragev1.FakeStorageV1{Fake: &c.Fake}
+}
+
+// StorageV1alpha1 retrieves the StorageV1alpha1Client
+func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface {
+ return &fakestoragev1alpha1.FakeStorageV1alpha1{Fake: &c.Fake}
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/fake/doc.go
new file mode 100644
index 00000000000..9b99e716709
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated fake clientset.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/fake/register.go b/vendor/k8s.io/client-go/kubernetes/fake/register.go
new file mode 100644
index 00000000000..c429979688b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/fake/register.go
@@ -0,0 +1,116 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ appsv1 "k8s.io/api/apps/v1"
+ appsv1beta1 "k8s.io/api/apps/v1beta1"
+ appsv1beta2 "k8s.io/api/apps/v1beta2"
+ authenticationv1 "k8s.io/api/authentication/v1"
+ authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
+ authorizationv1 "k8s.io/api/authorization/v1"
+ authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
+ autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
+ batchv1 "k8s.io/api/batch/v1"
+ batchv1beta1 "k8s.io/api/batch/v1beta1"
+ batchv2alpha1 "k8s.io/api/batch/v2alpha1"
+ certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
+ coordinationv1beta1 "k8s.io/api/coordination/v1beta1"
+ corev1 "k8s.io/api/core/v1"
+ eventsv1beta1 "k8s.io/api/events/v1beta1"
+ extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+ networkingv1 "k8s.io/api/networking/v1"
+ policyv1beta1 "k8s.io/api/policy/v1beta1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
+ rbacv1beta1 "k8s.io/api/rbac/v1beta1"
+ schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
+ settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
+ storagev1 "k8s.io/api/storage/v1"
+ storagev1alpha1 "k8s.io/api/storage/v1alpha1"
+ storagev1beta1 "k8s.io/api/storage/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var scheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(scheme)
+var parameterCodec = runtime.NewParameterCodec(scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ admissionregistrationv1alpha1.AddToScheme,
+ admissionregistrationv1beta1.AddToScheme,
+ appsv1beta1.AddToScheme,
+ appsv1beta2.AddToScheme,
+ appsv1.AddToScheme,
+ authenticationv1.AddToScheme,
+ authenticationv1beta1.AddToScheme,
+ authorizationv1.AddToScheme,
+ authorizationv1beta1.AddToScheme,
+ autoscalingv1.AddToScheme,
+ autoscalingv2beta1.AddToScheme,
+ autoscalingv2beta2.AddToScheme,
+ batchv1.AddToScheme,
+ batchv1beta1.AddToScheme,
+ batchv2alpha1.AddToScheme,
+ certificatesv1beta1.AddToScheme,
+ coordinationv1beta1.AddToScheme,
+ corev1.AddToScheme,
+ eventsv1beta1.AddToScheme,
+ extensionsv1beta1.AddToScheme,
+ networkingv1.AddToScheme,
+ policyv1beta1.AddToScheme,
+ rbacv1.AddToScheme,
+ rbacv1beta1.AddToScheme,
+ rbacv1alpha1.AddToScheme,
+ schedulingv1alpha1.AddToScheme,
+ schedulingv1beta1.AddToScheme,
+ settingsv1alpha1.AddToScheme,
+ storagev1beta1.AddToScheme,
+ storagev1.AddToScheme,
+ storagev1alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(scheme))
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go
new file mode 100644
index 00000000000..8457aec2763
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAdmissionregistrationV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAdmissionregistrationV1alpha1) InitializerConfigurations() v1alpha1.InitializerConfigurationInterface {
+ return &FakeInitializerConfigurations{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAdmissionregistrationV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go
new file mode 100644
index 00000000000..b927dae2cd3
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeInitializerConfigurations implements InitializerConfigurationInterface
+type FakeInitializerConfigurations struct {
+ Fake *FakeAdmissionregistrationV1alpha1
+}
+
+var initializerconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Resource: "initializerconfigurations"}
+
+var initializerconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "InitializerConfiguration"}
+
+// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any.
+func (c *FakeInitializerConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.InitializerConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(initializerconfigurationsResource, name), &v1alpha1.InitializerConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.InitializerConfiguration), err
+}
+
+// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors.
+func (c *FakeInitializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(initializerconfigurationsResource, initializerconfigurationsKind, opts), &v1alpha1.InitializerConfigurationList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.InitializerConfigurationList{ListMeta: obj.(*v1alpha1.InitializerConfigurationList).ListMeta}
+ for _, item := range obj.(*v1alpha1.InitializerConfigurationList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested initializerConfigurations.
+func (c *FakeInitializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(initializerconfigurationsResource, opts))
+}
+
+// Create takes the representation of a initializerConfiguration and creates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any.
+func (c *FakeInitializerConfigurations) Create(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(initializerconfigurationsResource, initializerConfiguration), &v1alpha1.InitializerConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.InitializerConfiguration), err
+}
+
+// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any.
+func (c *FakeInitializerConfigurations) Update(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(initializerconfigurationsResource, initializerConfiguration), &v1alpha1.InitializerConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.InitializerConfiguration), err
+}
+
+// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs.
+func (c *FakeInitializerConfigurations) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(initializerconfigurationsResource, name), &v1alpha1.InitializerConfiguration{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeInitializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(initializerconfigurationsResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.InitializerConfigurationList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched initializerConfiguration.
+func (c *FakeInitializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(initializerconfigurationsResource, name, data, subresources...), &v1alpha1.InitializerConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.InitializerConfiguration), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go
new file mode 100644
index 00000000000..1a988ddba1a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAdmissionregistrationV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAdmissionregistrationV1beta1) MutatingWebhookConfigurations() v1beta1.MutatingWebhookConfigurationInterface {
+ return &FakeMutatingWebhookConfigurations{c}
+}
+
+func (c *FakeAdmissionregistrationV1beta1) ValidatingWebhookConfigurations() v1beta1.ValidatingWebhookConfigurationInterface {
+ return &FakeValidatingWebhookConfigurations{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAdmissionregistrationV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go
new file mode 100644
index 00000000000..e06888cc13c
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeMutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface
+type FakeMutatingWebhookConfigurations struct {
+ Fake *FakeAdmissionregistrationV1beta1
+}
+
+var mutatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "mutatingwebhookconfigurations"}
+
+var mutatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "MutatingWebhookConfiguration"}
+
+// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any.
+func (c *FakeMutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.MutatingWebhookConfiguration), err
+}
+
+// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors.
+func (c *FakeMutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(mutatingwebhookconfigurationsResource, mutatingwebhookconfigurationsKind, opts), &v1beta1.MutatingWebhookConfigurationList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.MutatingWebhookConfigurationList{ListMeta: obj.(*v1beta1.MutatingWebhookConfigurationList).ListMeta}
+ for _, item := range obj.(*v1beta1.MutatingWebhookConfigurationList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations.
+func (c *FakeMutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(mutatingwebhookconfigurationsResource, opts))
+}
+
+// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
+func (c *FakeMutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.MutatingWebhookConfiguration), err
+}
+
+// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
+func (c *FakeMutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(mutatingwebhookconfigurationsResource, mutatingWebhookConfiguration), &v1beta1.MutatingWebhookConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.MutatingWebhookConfiguration), err
+}
+
+// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs.
+func (c *FakeMutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(mutatingwebhookconfigurationsResource, name), &v1beta1.MutatingWebhookConfiguration{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeMutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(mutatingwebhookconfigurationsResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.MutatingWebhookConfigurationList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched mutatingWebhookConfiguration.
+func (c *FakeMutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(mutatingwebhookconfigurationsResource, name, data, subresources...), &v1beta1.MutatingWebhookConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.MutatingWebhookConfiguration), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go
new file mode 100644
index 00000000000..1069634e236
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeValidatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface
+type FakeValidatingWebhookConfigurations struct {
+ Fake *FakeAdmissionregistrationV1beta1
+}
+
+var validatingwebhookconfigurationsResource = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1beta1", Resource: "validatingwebhookconfigurations"}
+
+var validatingwebhookconfigurationsKind = schema.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1beta1", Kind: "ValidatingWebhookConfiguration"}
+
+// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any.
+func (c *FakeValidatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ValidatingWebhookConfiguration), err
+}
+
+// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors.
+func (c *FakeValidatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(validatingwebhookconfigurationsResource, validatingwebhookconfigurationsKind, opts), &v1beta1.ValidatingWebhookConfigurationList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.ValidatingWebhookConfigurationList{ListMeta: obj.(*v1beta1.ValidatingWebhookConfigurationList).ListMeta}
+ for _, item := range obj.(*v1beta1.ValidatingWebhookConfigurationList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations.
+func (c *FakeValidatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(validatingwebhookconfigurationsResource, opts))
+}
+
+// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
+func (c *FakeValidatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ValidatingWebhookConfiguration), err
+}
+
+// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
+func (c *FakeValidatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(validatingwebhookconfigurationsResource, validatingWebhookConfiguration), &v1beta1.ValidatingWebhookConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ValidatingWebhookConfiguration), err
+}
+
+// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs.
+func (c *FakeValidatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(validatingwebhookconfigurationsResource, name), &v1beta1.ValidatingWebhookConfiguration{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeValidatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(validatingwebhookconfigurationsResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.ValidatingWebhookConfigurationList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched validatingWebhookConfiguration.
+func (c *FakeValidatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(validatingwebhookconfigurationsResource, name, data, subresources...), &v1beta1.ValidatingWebhookConfiguration{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ValidatingWebhookConfiguration), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go
new file mode 100644
index 00000000000..458df0fa335
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go
@@ -0,0 +1,56 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/apps/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAppsV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAppsV1) ControllerRevisions(namespace string) v1.ControllerRevisionInterface {
+ return &FakeControllerRevisions{c, namespace}
+}
+
+func (c *FakeAppsV1) DaemonSets(namespace string) v1.DaemonSetInterface {
+ return &FakeDaemonSets{c, namespace}
+}
+
+func (c *FakeAppsV1) Deployments(namespace string) v1.DeploymentInterface {
+ return &FakeDeployments{c, namespace}
+}
+
+func (c *FakeAppsV1) ReplicaSets(namespace string) v1.ReplicaSetInterface {
+ return &FakeReplicaSets{c, namespace}
+}
+
+func (c *FakeAppsV1) StatefulSets(namespace string) v1.StatefulSetInterface {
+ return &FakeStatefulSets{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAppsV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go
new file mode 100644
index 00000000000..fc2808daf88
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeControllerRevisions implements ControllerRevisionInterface
+type FakeControllerRevisions struct {
+ Fake *FakeAppsV1
+ ns string
+}
+
+var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "controllerrevisions"}
+
+var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ControllerRevision"}
+
+// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
+func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (result *appsv1.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &appsv1.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.ControllerRevision), err
+}
+
+// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
+func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *appsv1.ControllerRevisionList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &appsv1.ControllerRevisionList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &appsv1.ControllerRevisionList{ListMeta: obj.(*appsv1.ControllerRevisionList).ListMeta}
+ for _, item := range obj.(*appsv1.ControllerRevisionList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested controllerRevisions.
+func (c *FakeControllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *FakeControllerRevisions) Create(controllerRevision *appsv1.ControllerRevision) (result *appsv1.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &appsv1.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.ControllerRevision), err
+}
+
+// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *FakeControllerRevisions) Update(controllerRevision *appsv1.ControllerRevision) (result *appsv1.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &appsv1.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.ControllerRevision), err
+}
+
+// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
+func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &appsv1.ControllerRevision{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &appsv1.ControllerRevisionList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched controllerRevision.
+func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &appsv1.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.ControllerRevision), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go
new file mode 100644
index 00000000000..89e72ebd399
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDaemonSets implements DaemonSetInterface
+type FakeDaemonSets struct {
+ Fake *FakeAppsV1
+ ns string
+}
+
+var daemonsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}
+
+var daemonsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"}
+
+// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
+func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *appsv1.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &appsv1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.DaemonSet), err
+}
+
+// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
+func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *appsv1.DaemonSetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &appsv1.DaemonSetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &appsv1.DaemonSetList{ListMeta: obj.(*appsv1.DaemonSetList).ListMeta}
+ for _, item := range obj.(*appsv1.DaemonSetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested daemonSets.
+func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *FakeDaemonSets) Create(daemonSet *appsv1.DaemonSet) (result *appsv1.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &appsv1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.DaemonSet), err
+}
+
+// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *FakeDaemonSets) Update(daemonSet *appsv1.DaemonSet) (result *appsv1.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &appsv1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.DaemonSet), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeDaemonSets) UpdateStatus(daemonSet *appsv1.DaemonSet) (*appsv1.DaemonSet, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &appsv1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.DaemonSet), err
+}
+
+// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
+func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &appsv1.DaemonSet{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &appsv1.DaemonSetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched daemonSet.
+func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &appsv1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.DaemonSet), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go
new file mode 100644
index 00000000000..2fbd82d6b8e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDeployments implements DeploymentInterface
+type FakeDeployments struct {
+ Fake *FakeAppsV1
+ ns string
+}
+
+var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
+
+var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}
+
+// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
+func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *appsv1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &appsv1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.Deployment), err
+}
+
+// List takes label and field selectors, and returns the list of Deployments that match those selectors.
+func (c *FakeDeployments) List(opts v1.ListOptions) (result *appsv1.DeploymentList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &appsv1.DeploymentList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &appsv1.DeploymentList{ListMeta: obj.(*appsv1.DeploymentList).ListMeta}
+ for _, item := range obj.(*appsv1.DeploymentList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested deployments.
+func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *FakeDeployments) Create(deployment *appsv1.Deployment) (result *appsv1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &appsv1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.Deployment), err
+}
+
+// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *FakeDeployments) Update(deployment *appsv1.Deployment) (result *appsv1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &appsv1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.Deployment), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeDeployments) UpdateStatus(deployment *appsv1.Deployment) (*appsv1.Deployment, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &appsv1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.Deployment), err
+}
+
+// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
+func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &appsv1.Deployment{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &appsv1.DeploymentList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched deployment.
+func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &appsv1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.Deployment), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go
new file mode 100644
index 00000000000..7b882c8630c
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeReplicaSets implements ReplicaSetInterface
+type FakeReplicaSets struct {
+ Fake *FakeAppsV1
+ ns string
+}
+
+var replicasetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}
+
+var replicasetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "ReplicaSet"}
+
+// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
+func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *appsv1.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &appsv1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.ReplicaSet), err
+}
+
+// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
+func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *appsv1.ReplicaSetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &appsv1.ReplicaSetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &appsv1.ReplicaSetList{ListMeta: obj.(*appsv1.ReplicaSetList).ListMeta}
+ for _, item := range obj.(*appsv1.ReplicaSetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested replicaSets.
+func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *FakeReplicaSets) Create(replicaSet *appsv1.ReplicaSet) (result *appsv1.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &appsv1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.ReplicaSet), err
+}
+
+// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *FakeReplicaSets) Update(replicaSet *appsv1.ReplicaSet) (result *appsv1.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &appsv1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.ReplicaSet), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeReplicaSets) UpdateStatus(replicaSet *appsv1.ReplicaSet) (*appsv1.ReplicaSet, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &appsv1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.ReplicaSet), err
+}
+
+// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
+func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &appsv1.ReplicaSet{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &appsv1.ReplicaSetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched replicaSet.
+func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &appsv1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.ReplicaSet), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go
new file mode 100644
index 00000000000..3cd643a5979
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeStatefulSets implements StatefulSetInterface
+type FakeStatefulSets struct {
+ Fake *FakeAppsV1
+ ns string
+}
+
+var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}
+
+var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}
+
+// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
+func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *appsv1.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &appsv1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.StatefulSet), err
+}
+
+// List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
+func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *appsv1.StatefulSetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &appsv1.StatefulSetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &appsv1.StatefulSetList{ListMeta: obj.(*appsv1.StatefulSetList).ListMeta}
+ for _, item := range obj.(*appsv1.StatefulSetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested statefulSets.
+func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *FakeStatefulSets) Create(statefulSet *appsv1.StatefulSet) (result *appsv1.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &appsv1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.StatefulSet), err
+}
+
+// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *FakeStatefulSets) Update(statefulSet *appsv1.StatefulSet) (result *appsv1.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &appsv1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.StatefulSet), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeStatefulSets) UpdateStatus(statefulSet *appsv1.StatefulSet) (*appsv1.StatefulSet, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &appsv1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.StatefulSet), err
+}
+
+// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
+func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &appsv1.StatefulSet{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &appsv1.StatefulSetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched statefulSet.
+func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *appsv1.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &appsv1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*appsv1.StatefulSet), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go
new file mode 100644
index 00000000000..2ff602be9b6
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAppsV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAppsV1beta1) ControllerRevisions(namespace string) v1beta1.ControllerRevisionInterface {
+ return &FakeControllerRevisions{c, namespace}
+}
+
+func (c *FakeAppsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterface {
+ return &FakeDeployments{c, namespace}
+}
+
+func (c *FakeAppsV1beta1) Scales(namespace string) v1beta1.ScaleInterface {
+ return &FakeScales{c, namespace}
+}
+
+func (c *FakeAppsV1beta1) StatefulSets(namespace string) v1beta1.StatefulSetInterface {
+ return &FakeStatefulSets{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAppsV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go
new file mode 100644
index 00000000000..92419489113
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/apps/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeControllerRevisions implements ControllerRevisionInterface
+type FakeControllerRevisions struct {
+ Fake *FakeAppsV1beta1
+ ns string
+}
+
+var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "controllerrevisions"}
+
+var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "ControllerRevision"}
+
+// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
+func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ControllerRevision), err
+}
+
+// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
+func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta1.ControllerRevisionList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.ControllerRevisionList{ListMeta: obj.(*v1beta1.ControllerRevisionList).ListMeta}
+ for _, item := range obj.(*v1beta1.ControllerRevisionList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested controllerRevisions.
+func (c *FakeControllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *FakeControllerRevisions) Create(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ControllerRevision), err
+}
+
+// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *FakeControllerRevisions) Update(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta1.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ControllerRevision), err
+}
+
+// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
+func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &v1beta1.ControllerRevision{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.ControllerRevisionList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched controllerRevision.
+func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &v1beta1.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ControllerRevision), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go
new file mode 100644
index 00000000000..c4749c52b3b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/apps/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDeployments implements DeploymentInterface
+type FakeDeployments struct {
+ Fake *FakeAppsV1beta1
+ ns string
+}
+
+var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "deployments"}
+
+var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "Deployment"}
+
+// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
+func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
+
+// List takes label and field selectors, and returns the list of Deployments that match those selectors.
+func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.DeploymentList{ListMeta: obj.(*v1beta1.DeploymentList).ListMeta}
+ for _, item := range obj.(*v1beta1.DeploymentList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested deployments.
+func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
+
+// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
+
+// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
+func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched deployment.
+func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go
new file mode 100644
index 00000000000..de71947e523
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_scale.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeScales implements ScaleInterface
+type FakeScales struct {
+ Fake *FakeAppsV1beta1
+ ns string
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go
new file mode 100644
index 00000000000..b0f194a7ddd
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/apps/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeStatefulSets implements StatefulSetInterface
+type FakeStatefulSets struct {
+ Fake *FakeAppsV1beta1
+ ns string
+}
+
+var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"}
+
+var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta1", Kind: "StatefulSet"}
+
+// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
+func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.StatefulSet), err
+}
+
+// List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
+func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta1.StatefulSetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.StatefulSetList{ListMeta: obj.(*v1beta1.StatefulSetList).ListMeta}
+ for _, item := range obj.(*v1beta1.StatefulSetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested statefulSets.
+func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *FakeStatefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.StatefulSet), err
+}
+
+// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *FakeStatefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.StatefulSet), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.StatefulSet), err
+}
+
+// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
+func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta1.StatefulSet{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.StatefulSetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched statefulSet.
+func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &v1beta1.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.StatefulSet), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go
new file mode 100644
index 00000000000..f7d79d35225
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go
@@ -0,0 +1,60 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAppsV1beta2 struct {
+ *testing.Fake
+}
+
+func (c *FakeAppsV1beta2) ControllerRevisions(namespace string) v1beta2.ControllerRevisionInterface {
+ return &FakeControllerRevisions{c, namespace}
+}
+
+func (c *FakeAppsV1beta2) DaemonSets(namespace string) v1beta2.DaemonSetInterface {
+ return &FakeDaemonSets{c, namespace}
+}
+
+func (c *FakeAppsV1beta2) Deployments(namespace string) v1beta2.DeploymentInterface {
+ return &FakeDeployments{c, namespace}
+}
+
+func (c *FakeAppsV1beta2) ReplicaSets(namespace string) v1beta2.ReplicaSetInterface {
+ return &FakeReplicaSets{c, namespace}
+}
+
+func (c *FakeAppsV1beta2) Scales(namespace string) v1beta2.ScaleInterface {
+ return &FakeScales{c, namespace}
+}
+
+func (c *FakeAppsV1beta2) StatefulSets(namespace string) v1beta2.StatefulSetInterface {
+ return &FakeStatefulSets{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAppsV1beta2) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go
new file mode 100644
index 00000000000..954ac35df8c
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta2 "k8s.io/api/apps/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeControllerRevisions implements ControllerRevisionInterface
+type FakeControllerRevisions struct {
+ Fake *FakeAppsV1beta2
+ ns string
+}
+
+var controllerrevisionsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "controllerrevisions"}
+
+var controllerrevisionsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ControllerRevision"}
+
+// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
+func (c *FakeControllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.ControllerRevision), err
+}
+
+// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
+func (c *FakeControllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(controllerrevisionsResource, controllerrevisionsKind, c.ns, opts), &v1beta2.ControllerRevisionList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta2.ControllerRevisionList{ListMeta: obj.(*v1beta2.ControllerRevisionList).ListMeta}
+ for _, item := range obj.(*v1beta2.ControllerRevisionList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested controllerRevisions.
+func (c *FakeControllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(controllerrevisionsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *FakeControllerRevisions) Create(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.ControllerRevision), err
+}
+
+// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
+func (c *FakeControllerRevisions) Update(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(controllerrevisionsResource, c.ns, controllerRevision), &v1beta2.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.ControllerRevision), err
+}
+
+// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
+func (c *FakeControllerRevisions) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(controllerrevisionsResource, c.ns, name), &v1beta2.ControllerRevision{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeControllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(controllerrevisionsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta2.ControllerRevisionList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched controllerRevision.
+func (c *FakeControllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(controllerrevisionsResource, c.ns, name, data, subresources...), &v1beta2.ControllerRevision{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.ControllerRevision), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go
new file mode 100644
index 00000000000..38a1475503e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta2 "k8s.io/api/apps/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDaemonSets implements DaemonSetInterface
+type FakeDaemonSets struct {
+ Fake *FakeAppsV1beta2
+ ns string
+}
+
+var daemonsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "daemonsets"}
+
+var daemonsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "DaemonSet"}
+
+// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
+func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.DaemonSet), err
+}
+
+// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
+func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta2.DaemonSetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta2.DaemonSetList{ListMeta: obj.(*v1beta2.DaemonSetList).ListMeta}
+ for _, item := range obj.(*v1beta2.DaemonSetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested daemonSets.
+func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *FakeDaemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.DaemonSet), err
+}
+
+// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *FakeDaemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta2.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.DaemonSet), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta2.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.DaemonSet), err
+}
+
+// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
+func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta2.DaemonSet{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta2.DaemonSetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched daemonSet.
+func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &v1beta2.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.DaemonSet), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go
new file mode 100644
index 00000000000..cae2322424b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta2 "k8s.io/api/apps/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDeployments implements DeploymentInterface
+type FakeDeployments struct {
+ Fake *FakeAppsV1beta2
+ ns string
+}
+
+var deploymentsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "deployments"}
+
+var deploymentsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"}
+
+// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
+func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.Deployment), err
+}
+
+// List takes label and field selectors, and returns the list of Deployments that match those selectors.
+func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta2.DeploymentList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta2.DeploymentList{ListMeta: obj.(*v1beta2.DeploymentList).ListMeta}
+ for _, item := range obj.(*v1beta2.DeploymentList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested deployments.
+func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *FakeDeployments) Create(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.Deployment), err
+}
+
+// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *FakeDeployments) Update(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta2.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.Deployment), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeDeployments) UpdateStatus(deployment *v1beta2.Deployment) (*v1beta2.Deployment, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta2.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.Deployment), err
+}
+
+// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
+func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta2.Deployment{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta2.DeploymentList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched deployment.
+func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta2.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.Deployment), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go
new file mode 100644
index 00000000000..05fa7893180
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta2 "k8s.io/api/apps/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeReplicaSets implements ReplicaSetInterface
+type FakeReplicaSets struct {
+ Fake *FakeAppsV1beta2
+ ns string
+}
+
+var replicasetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "replicasets"}
+
+var replicasetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
+
+// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
+func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.ReplicaSet), err
+}
+
+// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
+func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta2.ReplicaSetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta2.ReplicaSetList{ListMeta: obj.(*v1beta2.ReplicaSetList).ListMeta}
+ for _, item := range obj.(*v1beta2.ReplicaSetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested replicaSets.
+func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *FakeReplicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.ReplicaSet), err
+}
+
+// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *FakeReplicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta2.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.ReplicaSet), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta2.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.ReplicaSet), err
+}
+
+// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
+func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta2.ReplicaSet{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta2.ReplicaSetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched replicaSet.
+func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &v1beta2.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.ReplicaSet), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go
new file mode 100644
index 00000000000..b06b7e8e303
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_scale.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeScales implements ScaleInterface
+type FakeScales struct {
+ Fake *FakeAppsV1beta2
+ ns string
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go
new file mode 100644
index 00000000000..fe78512862a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go
@@ -0,0 +1,162 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta2 "k8s.io/api/apps/v1beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeStatefulSets implements StatefulSetInterface
+type FakeStatefulSets struct {
+ Fake *FakeAppsV1beta2
+ ns string
+}
+
+var statefulsetsResource = schema.GroupVersionResource{Group: "apps", Version: "v1beta2", Resource: "statefulsets"}
+
+var statefulsetsKind = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "StatefulSet"}
+
+// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
+func (c *FakeStatefulSets) Get(name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.StatefulSet), err
+}
+
+// List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
+func (c *FakeStatefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(statefulsetsResource, statefulsetsKind, c.ns, opts), &v1beta2.StatefulSetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta2.StatefulSetList{ListMeta: obj.(*v1beta2.StatefulSetList).ListMeta}
+ for _, item := range obj.(*v1beta2.StatefulSetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested statefulSets.
+func (c *FakeStatefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(statefulsetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *FakeStatefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.StatefulSet), err
+}
+
+// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
+func (c *FakeStatefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(statefulsetsResource, c.ns, statefulSet), &v1beta2.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.StatefulSet), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeStatefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "status", c.ns, statefulSet), &v1beta2.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.StatefulSet), err
+}
+
+// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
+func (c *FakeStatefulSets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(statefulsetsResource, c.ns, name), &v1beta2.StatefulSet{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeStatefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(statefulsetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta2.StatefulSetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched statefulSet.
+func (c *FakeStatefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(statefulsetsResource, c.ns, name, data, subresources...), &v1beta2.StatefulSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.StatefulSet), err
+}
+
+// GetScale takes name of the statefulSet, and returns the corresponding scale object, and an error if there is any.
+func (c *FakeStatefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetSubresourceAction(statefulsetsResource, c.ns, "scale", statefulSetName), &v1beta2.Scale{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.Scale), err
+}
+
+// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *FakeStatefulSets) UpdateScale(statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(statefulsetsResource, "scale", c.ns, scale), &v1beta2.Scale{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta2.Scale), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go
new file mode 100644
index 00000000000..ee06a6cdd6c
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/authentication/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAuthenticationV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAuthenticationV1) TokenReviews() v1.TokenReviewInterface {
+ return &FakeTokenReviews{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAuthenticationV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go
new file mode 100644
index 00000000000..e2a7f72b666
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeTokenReviews implements TokenReviewInterface
+type FakeTokenReviews struct {
+ Fake *FakeAuthenticationV1
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go
new file mode 100644
index 00000000000..7008c927cd4
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authenticationapi "k8s.io/api/authentication/v1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeTokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewRootCreateAction(authenticationapi.SchemeGroupVersion.WithResource("tokenreviews"), tokenReview), &authenticationapi.TokenReview{})
+ return obj.(*authenticationapi.TokenReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go
new file mode 100644
index 00000000000..7299653ca26
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAuthenticationV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAuthenticationV1beta1) TokenReviews() v1beta1.TokenReviewInterface {
+ return &FakeTokenReviews{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAuthenticationV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go
new file mode 100644
index 00000000000..63b6b6a8535
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeTokenReviews implements TokenReviewInterface
+type FakeTokenReviews struct {
+ Fake *FakeAuthenticationV1beta1
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go
new file mode 100644
index 00000000000..92ef5d1a158
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authenticationapi "k8s.io/api/authentication/v1beta1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeTokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewRootCreateAction(authenticationapi.SchemeGroupVersion.WithResource("tokenreviews"), tokenReview), &authenticationapi.TokenReview{})
+ return obj.(*authenticationapi.TokenReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go
new file mode 100644
index 00000000000..f7e8234509d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAuthorizationV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAuthorizationV1) LocalSubjectAccessReviews(namespace string) v1.LocalSubjectAccessReviewInterface {
+ return &FakeLocalSubjectAccessReviews{c, namespace}
+}
+
+func (c *FakeAuthorizationV1) SelfSubjectAccessReviews() v1.SelfSubjectAccessReviewInterface {
+ return &FakeSelfSubjectAccessReviews{c}
+}
+
+func (c *FakeAuthorizationV1) SelfSubjectRulesReviews() v1.SelfSubjectRulesReviewInterface {
+ return &FakeSelfSubjectRulesReviews{c}
+}
+
+func (c *FakeAuthorizationV1) SubjectAccessReviews() v1.SubjectAccessReviewInterface {
+ return &FakeSubjectAccessReviews{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAuthorizationV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go
new file mode 100644
index 00000000000..778ba9cea02
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface
+type FakeLocalSubjectAccessReviews struct {
+ Fake *FakeAuthorizationV1
+ ns string
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go
new file mode 100644
index 00000000000..a01e415c8fa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authorizationapi "k8s.io/api/authorization/v1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeLocalSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewCreateAction(authorizationapi.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), c.ns, sar), &authorizationapi.SubjectAccessReview{})
+ return obj.(*authorizationapi.LocalSubjectAccessReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go
new file mode 100644
index 00000000000..a43a980baff
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface
+type FakeSelfSubjectAccessReviews struct {
+ Fake *FakeAuthorizationV1
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go
new file mode 100644
index 00000000000..91acbe029e7
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authorizationapi "k8s.io/api/authorization/v1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeSelfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), sar), &authorizationapi.SelfSubjectAccessReview{})
+ return obj.(*authorizationapi.SelfSubjectAccessReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go
new file mode 100644
index 00000000000..243f2e89eea
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface
+type FakeSelfSubjectRulesReviews struct {
+ Fake *FakeAuthorizationV1
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go
new file mode 100644
index 00000000000..a6dc9513498
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authorizationapi "k8s.io/api/authorization/v1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeSelfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), srr), &authorizationapi.SelfSubjectRulesReview{})
+ return obj.(*authorizationapi.SelfSubjectRulesReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go
new file mode 100644
index 00000000000..d07e5625464
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeSubjectAccessReviews implements SubjectAccessReviewInterface
+type FakeSubjectAccessReviews struct {
+ Fake *FakeAuthorizationV1
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go
new file mode 100644
index 00000000000..a2a2f0697ec
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authorizationapi "k8s.io/api/authorization/v1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeSubjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("subjectaccessreviews"), sar), &authorizationapi.SubjectAccessReview{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*authorizationapi.SubjectAccessReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go
new file mode 100644
index 00000000000..8e328a57bc8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAuthorizationV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAuthorizationV1beta1) LocalSubjectAccessReviews(namespace string) v1beta1.LocalSubjectAccessReviewInterface {
+ return &FakeLocalSubjectAccessReviews{c, namespace}
+}
+
+func (c *FakeAuthorizationV1beta1) SelfSubjectAccessReviews() v1beta1.SelfSubjectAccessReviewInterface {
+ return &FakeSelfSubjectAccessReviews{c}
+}
+
+func (c *FakeAuthorizationV1beta1) SelfSubjectRulesReviews() v1beta1.SelfSubjectRulesReviewInterface {
+ return &FakeSelfSubjectRulesReviews{c}
+}
+
+func (c *FakeAuthorizationV1beta1) SubjectAccessReviews() v1beta1.SubjectAccessReviewInterface {
+ return &FakeSubjectAccessReviews{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAuthorizationV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go
new file mode 100644
index 00000000000..8754e39d87c
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go
@@ -0,0 +1,17 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go
new file mode 100644
index 00000000000..d02d05e5d16
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeLocalSubjectAccessReviews implements LocalSubjectAccessReviewInterface
+type FakeLocalSubjectAccessReviews struct {
+ Fake *FakeAuthorizationV1beta1
+ ns string
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go
new file mode 100644
index 00000000000..5211628f26e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authorizationapi "k8s.io/api/authorization/v1beta1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeLocalSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewCreateAction(authorizationapi.SchemeGroupVersion.WithResource("localsubjectaccessreviews"), c.ns, sar), &authorizationapi.SubjectAccessReview{})
+ return obj.(*authorizationapi.LocalSubjectAccessReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go
new file mode 100644
index 00000000000..8f98ce7a3c1
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeSelfSubjectAccessReviews implements SelfSubjectAccessReviewInterface
+type FakeSelfSubjectAccessReviews struct {
+ Fake *FakeAuthorizationV1beta1
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go
new file mode 100644
index 00000000000..6e3af12a789
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authorizationapi "k8s.io/api/authorization/v1beta1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeSelfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectaccessreviews"), sar), &authorizationapi.SelfSubjectAccessReview{})
+ return obj.(*authorizationapi.SelfSubjectAccessReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go
new file mode 100644
index 00000000000..d8466b4c8da
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeSelfSubjectRulesReviews implements SelfSubjectRulesReviewInterface
+type FakeSelfSubjectRulesReviews struct {
+ Fake *FakeAuthorizationV1beta1
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go
new file mode 100644
index 00000000000..f92ffd717de
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authorizationapi "k8s.io/api/authorization/v1beta1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeSelfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("selfsubjectrulesreviews"), srr), &authorizationapi.SelfSubjectRulesReview{})
+ return obj.(*authorizationapi.SelfSubjectRulesReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go
new file mode 100644
index 00000000000..0d0abdb72a5
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go
@@ -0,0 +1,24 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeSubjectAccessReviews implements SubjectAccessReviewInterface
+type FakeSubjectAccessReviews struct {
+ Fake *FakeAuthorizationV1beta1
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go
new file mode 100644
index 00000000000..b0b18b099c0
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authorizationapi "k8s.io/api/authorization/v1beta1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeSubjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
+ obj, err := c.Fake.Invokes(core.NewRootCreateAction(authorizationapi.SchemeGroupVersion.WithResource("subjectaccessreviews"), sar), &authorizationapi.SubjectAccessReview{})
+ return obj.(*authorizationapi.SubjectAccessReview), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go
new file mode 100644
index 00000000000..99e26fcf39b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAutoscalingV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAutoscalingV1) HorizontalPodAutoscalers(namespace string) v1.HorizontalPodAutoscalerInterface {
+ return &FakeHorizontalPodAutoscalers{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAutoscalingV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go
new file mode 100644
index 00000000000..7df8343753c
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
+type FakeHorizontalPodAutoscalers struct {
+ Fake *FakeAutoscalingV1
+ ns string
+}
+
+var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"}
+
+var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v1", Kind: "HorizontalPodAutoscaler"}
+
+// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
+func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *autoscalingv1.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &autoscalingv1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*autoscalingv1.HorizontalPodAutoscaler), err
+}
+
+// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
+func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *autoscalingv1.HorizontalPodAutoscalerList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &autoscalingv1.HorizontalPodAutoscalerList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &autoscalingv1.HorizontalPodAutoscalerList{ListMeta: obj.(*autoscalingv1.HorizontalPodAutoscalerList).ListMeta}
+ for _, item := range obj.(*autoscalingv1.HorizontalPodAutoscalerList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
+func (c *FakeHorizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler) (result *autoscalingv1.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*autoscalingv1.HorizontalPodAutoscaler), err
+}
+
+// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler) (result *autoscalingv1.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*autoscalingv1.HorizontalPodAutoscaler), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler) (*autoscalingv1.HorizontalPodAutoscaler, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &autoscalingv1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*autoscalingv1.HorizontalPodAutoscaler), err
+}
+
+// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
+func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &autoscalingv1.HorizontalPodAutoscaler{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &autoscalingv1.HorizontalPodAutoscalerList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched horizontalPodAutoscaler.
+func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *autoscalingv1.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &autoscalingv1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*autoscalingv1.HorizontalPodAutoscaler), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go
new file mode 100644
index 00000000000..be8e0f48e50
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAutoscalingV2beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAutoscalingV2beta1) HorizontalPodAutoscalers(namespace string) v2beta1.HorizontalPodAutoscalerInterface {
+ return &FakeHorizontalPodAutoscalers{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAutoscalingV2beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go
new file mode 100644
index 00000000000..2d860341af8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v2beta1 "k8s.io/api/autoscaling/v2beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
+type FakeHorizontalPodAutoscalers struct {
+ Fake *FakeAutoscalingV2beta1
+ ns string
+}
+
+var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2beta1", Resource: "horizontalpodautoscalers"}
+
+var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta1", Kind: "HorizontalPodAutoscaler"}
+
+// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
+func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta1.HorizontalPodAutoscaler), err
+}
+
+// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
+func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta1.HorizontalPodAutoscalerList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2beta1.HorizontalPodAutoscalerList{ListMeta: obj.(*v2beta1.HorizontalPodAutoscalerList).ListMeta}
+ for _, item := range obj.(*v2beta1.HorizontalPodAutoscalerList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
+func (c *FakeHorizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta1.HorizontalPodAutoscaler), err
+}
+
+// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta1.HorizontalPodAutoscaler), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta1.HorizontalPodAutoscaler), err
+}
+
+// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
+func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v2beta1.HorizontalPodAutoscaler{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v2beta1.HorizontalPodAutoscalerList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched horizontalPodAutoscaler.
+func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v2beta1.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta1.HorizontalPodAutoscaler), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go
new file mode 100644
index 00000000000..8c36e0e815a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAutoscalingV2beta2 struct {
+ *testing.Fake
+}
+
+func (c *FakeAutoscalingV2beta2) HorizontalPodAutoscalers(namespace string) v2beta2.HorizontalPodAutoscalerInterface {
+ return &FakeHorizontalPodAutoscalers{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAutoscalingV2beta2) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go
new file mode 100644
index 00000000000..a19b86e2d03
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v2beta2 "k8s.io/api/autoscaling/v2beta2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface
+type FakeHorizontalPodAutoscalers struct {
+ Fake *FakeAutoscalingV2beta2
+ ns string
+}
+
+var horizontalpodautoscalersResource = schema.GroupVersionResource{Group: "autoscaling", Version: "v2beta2", Resource: "horizontalpodautoscalers"}
+
+var horizontalpodautoscalersKind = schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta2", Kind: "HorizontalPodAutoscaler"}
+
+// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
+func (c *FakeHorizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(horizontalpodautoscalersResource, c.ns, name), &v2beta2.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta2.HorizontalPodAutoscaler), err
+}
+
+// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
+func (c *FakeHorizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(horizontalpodautoscalersResource, horizontalpodautoscalersKind, c.ns, opts), &v2beta2.HorizontalPodAutoscalerList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2beta2.HorizontalPodAutoscalerList{ListMeta: obj.(*v2beta2.HorizontalPodAutoscalerList).ListMeta}
+ for _, item := range obj.(*v2beta2.HorizontalPodAutoscalerList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
+func (c *FakeHorizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(horizontalpodautoscalersResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *FakeHorizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta2.HorizontalPodAutoscaler), err
+}
+
+// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
+func (c *FakeHorizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(horizontalpodautoscalersResource, c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta2.HorizontalPodAutoscaler), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeHorizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(horizontalpodautoscalersResource, "status", c.ns, horizontalPodAutoscaler), &v2beta2.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta2.HorizontalPodAutoscaler), err
+}
+
+// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
+func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(horizontalpodautoscalersResource, c.ns, name), &v2beta2.HorizontalPodAutoscaler{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeHorizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(horizontalpodautoscalersResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v2beta2.HorizontalPodAutoscalerList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched horizontalPodAutoscaler.
+func (c *FakeHorizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(horizontalpodautoscalersResource, c.ns, name, data, subresources...), &v2beta2.HorizontalPodAutoscaler{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2beta2.HorizontalPodAutoscaler), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go
new file mode 100644
index 00000000000..c90dd75616d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/batch/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeBatchV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeBatchV1) Jobs(namespace string) v1.JobInterface {
+ return &FakeJobs{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeBatchV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go
new file mode 100644
index 00000000000..f12619bb423
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ batchv1 "k8s.io/api/batch/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeJobs implements JobInterface
+type FakeJobs struct {
+ Fake *FakeBatchV1
+ ns string
+}
+
+var jobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}
+
+var jobsKind = schema.GroupVersionKind{Group: "batch", Version: "v1", Kind: "Job"}
+
+// Get takes name of the job, and returns the corresponding job object, and an error if there is any.
+func (c *FakeJobs) Get(name string, options v1.GetOptions) (result *batchv1.Job, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(jobsResource, c.ns, name), &batchv1.Job{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*batchv1.Job), err
+}
+
+// List takes label and field selectors, and returns the list of Jobs that match those selectors.
+func (c *FakeJobs) List(opts v1.ListOptions) (result *batchv1.JobList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(jobsResource, jobsKind, c.ns, opts), &batchv1.JobList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &batchv1.JobList{ListMeta: obj.(*batchv1.JobList).ListMeta}
+ for _, item := range obj.(*batchv1.JobList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested jobs.
+func (c *FakeJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(jobsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any.
+func (c *FakeJobs) Create(job *batchv1.Job) (result *batchv1.Job, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(jobsResource, c.ns, job), &batchv1.Job{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*batchv1.Job), err
+}
+
+// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any.
+func (c *FakeJobs) Update(job *batchv1.Job) (result *batchv1.Job, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(jobsResource, c.ns, job), &batchv1.Job{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*batchv1.Job), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeJobs) UpdateStatus(job *batchv1.Job) (*batchv1.Job, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(jobsResource, "status", c.ns, job), &batchv1.Job{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*batchv1.Job), err
+}
+
+// Delete takes name of the job and deletes it. Returns an error if one occurs.
+func (c *FakeJobs) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(jobsResource, c.ns, name), &batchv1.Job{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(jobsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &batchv1.JobList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched job.
+func (c *FakeJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *batchv1.Job, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(jobsResource, c.ns, name, data, subresources...), &batchv1.Job{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*batchv1.Job), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go
new file mode 100644
index 00000000000..6f350aed9a3
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeBatchV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeBatchV1beta1) CronJobs(namespace string) v1beta1.CronJobInterface {
+ return &FakeCronJobs{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeBatchV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go
new file mode 100644
index 00000000000..d80ef5e67e9
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/batch/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCronJobs implements CronJobInterface
+type FakeCronJobs struct {
+ Fake *FakeBatchV1beta1
+ ns string
+}
+
+var cronjobsResource = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
+
+var cronjobsKind = schema.GroupVersionKind{Group: "batch", Version: "v1beta1", Kind: "CronJob"}
+
+// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
+func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CronJob), err
+}
+
+// List takes label and field selectors, and returns the list of CronJobs that match those selectors.
+func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v1beta1.CronJobList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.CronJobList{ListMeta: obj.(*v1beta1.CronJobList).ListMeta}
+ for _, item := range obj.(*v1beta1.CronJobList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested cronJobs.
+func (c *FakeCronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any.
+func (c *FakeCronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CronJob), err
+}
+
+// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
+func (c *FakeCronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v1beta1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CronJob), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (*v1beta1.CronJob, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v1beta1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CronJob), err
+}
+
+// Delete takes name of the cronJob and deletes it. Returns an error if one occurs.
+func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &v1beta1.CronJob{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.CronJobList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched cronJob.
+func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, data, subresources...), &v1beta1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CronJob), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go
new file mode 100644
index 00000000000..3e478cde9de
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeBatchV2alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeBatchV2alpha1) CronJobs(namespace string) v2alpha1.CronJobInterface {
+ return &FakeCronJobs{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeBatchV2alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go
new file mode 100644
index 00000000000..75c0b17338f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v2alpha1 "k8s.io/api/batch/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCronJobs implements CronJobInterface
+type FakeCronJobs struct {
+ Fake *FakeBatchV2alpha1
+ ns string
+}
+
+var cronjobsResource = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}
+
+var cronjobsKind = schema.GroupVersionKind{Group: "batch", Version: "v2alpha1", Kind: "CronJob"}
+
+// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
+func (c *FakeCronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(cronjobsResource, c.ns, name), &v2alpha1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CronJob), err
+}
+
+// List takes label and field selectors, and returns the list of CronJobs that match those selectors.
+func (c *FakeCronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(cronjobsResource, cronjobsKind, c.ns, opts), &v2alpha1.CronJobList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2alpha1.CronJobList{ListMeta: obj.(*v2alpha1.CronJobList).ListMeta}
+ for _, item := range obj.(*v2alpha1.CronJobList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested cronJobs.
+func (c *FakeCronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(cronjobsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any.
+func (c *FakeCronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CronJob), err
+}
+
+// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
+func (c *FakeCronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(cronjobsResource, c.ns, cronJob), &v2alpha1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CronJob), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (*v2alpha1.CronJob, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(cronjobsResource, "status", c.ns, cronJob), &v2alpha1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CronJob), err
+}
+
+// Delete takes name of the cronJob and deletes it. Returns an error if one occurs.
+func (c *FakeCronJobs) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(cronjobsResource, c.ns, name), &v2alpha1.CronJob{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(cronjobsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v2alpha1.CronJobList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched cronJob.
+func (c *FakeCronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(cronjobsResource, c.ns, name, data, subresources...), &v2alpha1.CronJob{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CronJob), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go
new file mode 100644
index 00000000000..29d8b088ea2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeCertificatesV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeCertificatesV1beta1) CertificateSigningRequests() v1beta1.CertificateSigningRequestInterface {
+ return &FakeCertificateSigningRequests{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeCertificatesV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go
new file mode 100644
index 00000000000..dfd5171951e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go
@@ -0,0 +1,131 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/certificates/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCertificateSigningRequests implements CertificateSigningRequestInterface
+type FakeCertificateSigningRequests struct {
+ Fake *FakeCertificatesV1beta1
+}
+
+var certificatesigningrequestsResource = schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}
+
+var certificatesigningrequestsKind = schema.GroupVersionKind{Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequest"}
+
+// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any.
+func (c *FakeCertificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CertificateSigningRequest), err
+}
+
+// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors.
+func (c *FakeCertificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(certificatesigningrequestsResource, certificatesigningrequestsKind, opts), &v1beta1.CertificateSigningRequestList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.CertificateSigningRequestList{ListMeta: obj.(*v1beta1.CertificateSigningRequestList).ListMeta}
+ for _, item := range obj.(*v1beta1.CertificateSigningRequestList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested certificateSigningRequests.
+func (c *FakeCertificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts))
+}
+
+// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
+func (c *FakeCertificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CertificateSigningRequest), err
+}
+
+// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
+func (c *FakeCertificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1beta1.CertificateSigningRequest{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CertificateSigningRequest), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCertificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1beta1.CertificateSigningRequest{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CertificateSigningRequest), err
+}
+
+// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs.
+func (c *FakeCertificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(certificatesigningrequestsResource, name), &v1beta1.CertificateSigningRequest{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCertificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched certificateSigningRequest.
+func (c *FakeCertificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, data, subresources...), &v1beta1.CertificateSigningRequest{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.CertificateSigningRequest), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go
new file mode 100644
index 00000000000..8af33e62ad5
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ certificates "k8s.io/api/certificates/v1beta1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeCertificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) {
+ obj, err := c.Fake.
+ Invokes(core.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "approval", certificateSigningRequest), &certificates.CertificateSigningRequest{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*certificates.CertificateSigningRequest), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go
new file mode 100644
index 00000000000..f583b466e2f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeCoordinationV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeCoordinationV1beta1) Leases(namespace string) v1beta1.LeaseInterface {
+ return &FakeLeases{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeCoordinationV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go
new file mode 100644
index 00000000000..3204e02913e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/coordination/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeLeases implements LeaseInterface
+type FakeLeases struct {
+ Fake *FakeCoordinationV1beta1
+ ns string
+}
+
+var leasesResource = schema.GroupVersionResource{Group: "coordination.k8s.io", Version: "v1beta1", Resource: "leases"}
+
+var leasesKind = schema.GroupVersionKind{Group: "coordination.k8s.io", Version: "v1beta1", Kind: "Lease"}
+
+// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any.
+func (c *FakeLeases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(leasesResource, c.ns, name), &v1beta1.Lease{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Lease), err
+}
+
+// List takes label and field selectors, and returns the list of Leases that match those selectors.
+func (c *FakeLeases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(leasesResource, leasesKind, c.ns, opts), &v1beta1.LeaseList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.LeaseList{ListMeta: obj.(*v1beta1.LeaseList).ListMeta}
+ for _, item := range obj.(*v1beta1.LeaseList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested leases.
+func (c *FakeLeases) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(leasesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any.
+func (c *FakeLeases) Create(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(leasesResource, c.ns, lease), &v1beta1.Lease{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Lease), err
+}
+
+// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any.
+func (c *FakeLeases) Update(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(leasesResource, c.ns, lease), &v1beta1.Lease{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Lease), err
+}
+
+// Delete takes name of the lease and deletes it. Returns an error if one occurs.
+func (c *FakeLeases) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(leasesResource, c.ns, name), &v1beta1.Lease{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeLeases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(leasesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.LeaseList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched lease.
+func (c *FakeLeases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(leasesResource, c.ns, name, data, subresources...), &v1beta1.Lease{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Lease), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go
new file mode 100644
index 00000000000..d06023d4826
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeComponentStatuses implements ComponentStatusInterface
+type FakeComponentStatuses struct {
+ Fake *FakeCoreV1
+}
+
+var componentstatusesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "componentstatuses"}
+
+var componentstatusesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ComponentStatus"}
+
+// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any.
+func (c *FakeComponentStatuses) Get(name string, options v1.GetOptions) (result *corev1.ComponentStatus, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(componentstatusesResource, name), &corev1.ComponentStatus{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ComponentStatus), err
+}
+
+// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors.
+func (c *FakeComponentStatuses) List(opts v1.ListOptions) (result *corev1.ComponentStatusList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(componentstatusesResource, componentstatusesKind, opts), &corev1.ComponentStatusList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.ComponentStatusList{ListMeta: obj.(*corev1.ComponentStatusList).ListMeta}
+ for _, item := range obj.(*corev1.ComponentStatusList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested componentStatuses.
+func (c *FakeComponentStatuses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(componentstatusesResource, opts))
+}
+
+// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any.
+func (c *FakeComponentStatuses) Create(componentStatus *corev1.ComponentStatus) (result *corev1.ComponentStatus, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(componentstatusesResource, componentStatus), &corev1.ComponentStatus{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ComponentStatus), err
+}
+
+// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any.
+func (c *FakeComponentStatuses) Update(componentStatus *corev1.ComponentStatus) (result *corev1.ComponentStatus, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(componentstatusesResource, componentStatus), &corev1.ComponentStatus{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ComponentStatus), err
+}
+
+// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs.
+func (c *FakeComponentStatuses) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(componentstatusesResource, name), &corev1.ComponentStatus{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeComponentStatuses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(componentstatusesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.ComponentStatusList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched componentStatus.
+func (c *FakeComponentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ComponentStatus, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(componentstatusesResource, name, data, subresources...), &corev1.ComponentStatus{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ComponentStatus), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go
new file mode 100644
index 00000000000..b491661f208
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeConfigMaps implements ConfigMapInterface
+type FakeConfigMaps struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var configmapsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}
+
+var configmapsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}
+
+// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any.
+func (c *FakeConfigMaps) Get(name string, options v1.GetOptions) (result *corev1.ConfigMap, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(configmapsResource, c.ns, name), &corev1.ConfigMap{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ConfigMap), err
+}
+
+// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors.
+func (c *FakeConfigMaps) List(opts v1.ListOptions) (result *corev1.ConfigMapList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(configmapsResource, configmapsKind, c.ns, opts), &corev1.ConfigMapList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.ConfigMapList{ListMeta: obj.(*corev1.ConfigMapList).ListMeta}
+ for _, item := range obj.(*corev1.ConfigMapList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested configMaps.
+func (c *FakeConfigMaps) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(configmapsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any.
+func (c *FakeConfigMaps) Create(configMap *corev1.ConfigMap) (result *corev1.ConfigMap, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(configmapsResource, c.ns, configMap), &corev1.ConfigMap{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ConfigMap), err
+}
+
+// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any.
+func (c *FakeConfigMaps) Update(configMap *corev1.ConfigMap) (result *corev1.ConfigMap, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(configmapsResource, c.ns, configMap), &corev1.ConfigMap{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ConfigMap), err
+}
+
+// Delete takes name of the configMap and deletes it. Returns an error if one occurs.
+func (c *FakeConfigMaps) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(configmapsResource, c.ns, name), &corev1.ConfigMap{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeConfigMaps) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(configmapsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.ConfigMapList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched configMap.
+func (c *FakeConfigMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ConfigMap, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(configmapsResource, c.ns, name, data, subresources...), &corev1.ConfigMap{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ConfigMap), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go
new file mode 100644
index 00000000000..5ad90943c94
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go
@@ -0,0 +1,100 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/core/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeCoreV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeCoreV1) ComponentStatuses() v1.ComponentStatusInterface {
+ return &FakeComponentStatuses{c}
+}
+
+func (c *FakeCoreV1) ConfigMaps(namespace string) v1.ConfigMapInterface {
+ return &FakeConfigMaps{c, namespace}
+}
+
+func (c *FakeCoreV1) Endpoints(namespace string) v1.EndpointsInterface {
+ return &FakeEndpoints{c, namespace}
+}
+
+func (c *FakeCoreV1) Events(namespace string) v1.EventInterface {
+ return &FakeEvents{c, namespace}
+}
+
+func (c *FakeCoreV1) LimitRanges(namespace string) v1.LimitRangeInterface {
+ return &FakeLimitRanges{c, namespace}
+}
+
+func (c *FakeCoreV1) Namespaces() v1.NamespaceInterface {
+ return &FakeNamespaces{c}
+}
+
+func (c *FakeCoreV1) Nodes() v1.NodeInterface {
+ return &FakeNodes{c}
+}
+
+func (c *FakeCoreV1) PersistentVolumes() v1.PersistentVolumeInterface {
+ return &FakePersistentVolumes{c}
+}
+
+func (c *FakeCoreV1) PersistentVolumeClaims(namespace string) v1.PersistentVolumeClaimInterface {
+ return &FakePersistentVolumeClaims{c, namespace}
+}
+
+func (c *FakeCoreV1) Pods(namespace string) v1.PodInterface {
+ return &FakePods{c, namespace}
+}
+
+func (c *FakeCoreV1) PodTemplates(namespace string) v1.PodTemplateInterface {
+ return &FakePodTemplates{c, namespace}
+}
+
+func (c *FakeCoreV1) ReplicationControllers(namespace string) v1.ReplicationControllerInterface {
+ return &FakeReplicationControllers{c, namespace}
+}
+
+func (c *FakeCoreV1) ResourceQuotas(namespace string) v1.ResourceQuotaInterface {
+ return &FakeResourceQuotas{c, namespace}
+}
+
+func (c *FakeCoreV1) Secrets(namespace string) v1.SecretInterface {
+ return &FakeSecrets{c, namespace}
+}
+
+func (c *FakeCoreV1) Services(namespace string) v1.ServiceInterface {
+ return &FakeServices{c, namespace}
+}
+
+func (c *FakeCoreV1) ServiceAccounts(namespace string) v1.ServiceAccountInterface {
+ return &FakeServiceAccounts{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeCoreV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go
new file mode 100644
index 00000000000..2c9f0de0960
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeEndpoints implements EndpointsInterface
+type FakeEndpoints struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var endpointsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"}
+
+var endpointsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Endpoints"}
+
+// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any.
+func (c *FakeEndpoints) Get(name string, options v1.GetOptions) (result *corev1.Endpoints, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &corev1.Endpoints{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Endpoints), err
+}
+
+// List takes label and field selectors, and returns the list of Endpoints that match those selectors.
+func (c *FakeEndpoints) List(opts v1.ListOptions) (result *corev1.EndpointsList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &corev1.EndpointsList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.EndpointsList{ListMeta: obj.(*corev1.EndpointsList).ListMeta}
+ for _, item := range obj.(*corev1.EndpointsList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested endpoints.
+func (c *FakeEndpoints) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any.
+func (c *FakeEndpoints) Create(endpoints *corev1.Endpoints) (result *corev1.Endpoints, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &corev1.Endpoints{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Endpoints), err
+}
+
+// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any.
+func (c *FakeEndpoints) Update(endpoints *corev1.Endpoints) (result *corev1.Endpoints, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &corev1.Endpoints{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Endpoints), err
+}
+
+// Delete takes name of the endpoints and deletes it. Returns an error if one occurs.
+func (c *FakeEndpoints) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(endpointsResource, c.ns, name), &corev1.Endpoints{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeEndpoints) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.EndpointsList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched endpoints.
+func (c *FakeEndpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Endpoints, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, data, subresources...), &corev1.Endpoints{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Endpoints), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go
new file mode 100644
index 00000000000..68405a54f13
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeEvents implements EventInterface
+type FakeEvents struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var eventsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "events"}
+
+var eventsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Event"}
+
+// Get takes name of the event, and returns the corresponding event object, and an error if there is any.
+func (c *FakeEvents) Get(name string, options v1.GetOptions) (result *corev1.Event, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(eventsResource, c.ns, name), &corev1.Event{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Event), err
+}
+
+// List takes label and field selectors, and returns the list of Events that match those selectors.
+func (c *FakeEvents) List(opts v1.ListOptions) (result *corev1.EventList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &corev1.EventList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.EventList{ListMeta: obj.(*corev1.EventList).ListMeta}
+ for _, item := range obj.(*corev1.EventList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested events.
+func (c *FakeEvents) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any.
+func (c *FakeEvents) Create(event *corev1.Event) (result *corev1.Event, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &corev1.Event{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Event), err
+}
+
+// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
+func (c *FakeEvents) Update(event *corev1.Event) (result *corev1.Event, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &corev1.Event{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Event), err
+}
+
+// Delete takes name of the event and deletes it. Returns an error if one occurs.
+func (c *FakeEvents) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &corev1.Event{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.EventList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched event.
+func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Event, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &corev1.Event{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Event), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go
new file mode 100644
index 00000000000..dd3fb839289
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/apimachinery/pkg/runtime"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeEvents) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
+ action := core.NewRootCreateAction(eventsResource, event)
+ if c.ns != "" {
+ action = core.NewCreateAction(eventsResource, c.ns, event)
+ }
+ obj, err := c.Fake.Invokes(action, event)
+ if obj == nil {
+ return nil, err
+ }
+
+ return obj.(*v1.Event), err
+}
+
+// Update replaces an existing event. Returns the copy of the event the server returns, or an error.
+func (c *FakeEvents) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
+ action := core.NewRootUpdateAction(eventsResource, event)
+ if c.ns != "" {
+ action = core.NewUpdateAction(eventsResource, c.ns, event)
+ }
+ obj, err := c.Fake.Invokes(action, event)
+ if obj == nil {
+ return nil, err
+ }
+
+ return obj.(*v1.Event), err
+}
+
+// PatchWithEventNamespace patches an existing event. Returns the copy of the event the server returns, or an error.
+func (c *FakeEvents) PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) {
+ action := core.NewRootPatchAction(eventsResource, event.Name, data)
+ if c.ns != "" {
+ action = core.NewPatchAction(eventsResource, c.ns, event.Name, data)
+ }
+ obj, err := c.Fake.Invokes(action, event)
+ if obj == nil {
+ return nil, err
+ }
+
+ return obj.(*v1.Event), err
+}
+
+// Search returns a list of events matching the specified object.
+func (c *FakeEvents) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) {
+ action := core.NewRootListAction(eventsResource, eventsKind, metav1.ListOptions{})
+ if c.ns != "" {
+ action = core.NewListAction(eventsResource, eventsKind, c.ns, metav1.ListOptions{})
+ }
+ obj, err := c.Fake.Invokes(action, &v1.EventList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ return obj.(*v1.EventList), err
+}
+
+func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector {
+ action := core.GenericActionImpl{}
+ action.Verb = "get-field-selector"
+ action.Resource = eventsResource
+
+ c.Fake.Invokes(action, nil)
+ return fields.Everything()
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go
new file mode 100644
index 00000000000..03c03c5d0e2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeLimitRanges implements LimitRangeInterface
+type FakeLimitRanges struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var limitrangesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "limitranges"}
+
+var limitrangesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "LimitRange"}
+
+// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any.
+func (c *FakeLimitRanges) Get(name string, options v1.GetOptions) (result *corev1.LimitRange, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(limitrangesResource, c.ns, name), &corev1.LimitRange{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.LimitRange), err
+}
+
+// List takes label and field selectors, and returns the list of LimitRanges that match those selectors.
+func (c *FakeLimitRanges) List(opts v1.ListOptions) (result *corev1.LimitRangeList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(limitrangesResource, limitrangesKind, c.ns, opts), &corev1.LimitRangeList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.LimitRangeList{ListMeta: obj.(*corev1.LimitRangeList).ListMeta}
+ for _, item := range obj.(*corev1.LimitRangeList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested limitRanges.
+func (c *FakeLimitRanges) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(limitrangesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any.
+func (c *FakeLimitRanges) Create(limitRange *corev1.LimitRange) (result *corev1.LimitRange, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(limitrangesResource, c.ns, limitRange), &corev1.LimitRange{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.LimitRange), err
+}
+
+// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any.
+func (c *FakeLimitRanges) Update(limitRange *corev1.LimitRange) (result *corev1.LimitRange, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(limitrangesResource, c.ns, limitRange), &corev1.LimitRange{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.LimitRange), err
+}
+
+// Delete takes name of the limitRange and deletes it. Returns an error if one occurs.
+func (c *FakeLimitRanges) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(limitrangesResource, c.ns, name), &corev1.LimitRange{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeLimitRanges) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(limitrangesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.LimitRangeList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched limitRange.
+func (c *FakeLimitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.LimitRange, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(limitrangesResource, c.ns, name, data, subresources...), &corev1.LimitRange{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.LimitRange), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go
new file mode 100644
index 00000000000..12b918af01b
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go
@@ -0,0 +1,123 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeNamespaces implements NamespaceInterface
+type FakeNamespaces struct {
+ Fake *FakeCoreV1
+}
+
+var namespacesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}
+
+var namespacesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}
+
+// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any.
+func (c *FakeNamespaces) Get(name string, options v1.GetOptions) (result *corev1.Namespace, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(namespacesResource, name), &corev1.Namespace{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Namespace), err
+}
+
+// List takes label and field selectors, and returns the list of Namespaces that match those selectors.
+func (c *FakeNamespaces) List(opts v1.ListOptions) (result *corev1.NamespaceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &corev1.NamespaceList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.NamespaceList{ListMeta: obj.(*corev1.NamespaceList).ListMeta}
+ for _, item := range obj.(*corev1.NamespaceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested namespaces.
+func (c *FakeNamespaces) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts))
+}
+
+// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any.
+func (c *FakeNamespaces) Create(namespace *corev1.Namespace) (result *corev1.Namespace, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &corev1.Namespace{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Namespace), err
+}
+
+// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any.
+func (c *FakeNamespaces) Update(namespace *corev1.Namespace) (result *corev1.Namespace, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &corev1.Namespace{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Namespace), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeNamespaces) UpdateStatus(namespace *corev1.Namespace) (*corev1.Namespace, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(namespacesResource, "status", namespace), &corev1.Namespace{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Namespace), err
+}
+
+// Delete takes name of the namespace and deletes it. Returns an error if one occurs.
+func (c *FakeNamespaces) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(namespacesResource, name), &corev1.Namespace{})
+ return err
+}
+
+// Patch applies the patch and returns the patched namespace.
+func (c *FakeNamespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Namespace, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, data, subresources...), &corev1.Namespace{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Namespace), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go
new file mode 100644
index 00000000000..a0eae349044
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "k8s.io/api/core/v1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeNamespaces) Finalize(namespace *v1.Namespace) (*v1.Namespace, error) {
+ action := core.CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = namespacesResource
+ action.Subresource = "finalize"
+ action.Object = namespace
+
+ obj, err := c.Fake.Invokes(action, namespace)
+ if obj == nil {
+ return nil, err
+ }
+
+ return obj.(*v1.Namespace), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go
new file mode 100644
index 00000000000..a2bc97b2c6c
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go
@@ -0,0 +1,131 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeNodes implements NodeInterface
+type FakeNodes struct {
+ Fake *FakeCoreV1
+}
+
+var nodesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}
+
+var nodesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
+
+// Get takes name of the node, and returns the corresponding node object, and an error if there is any.
+func (c *FakeNodes) Get(name string, options v1.GetOptions) (result *corev1.Node, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(nodesResource, name), &corev1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Node), err
+}
+
+// List takes label and field selectors, and returns the list of Nodes that match those selectors.
+func (c *FakeNodes) List(opts v1.ListOptions) (result *corev1.NodeList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &corev1.NodeList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.NodeList{ListMeta: obj.(*corev1.NodeList).ListMeta}
+ for _, item := range obj.(*corev1.NodeList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested nodes.
+func (c *FakeNodes) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(nodesResource, opts))
+}
+
+// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any.
+func (c *FakeNodes) Create(node *corev1.Node) (result *corev1.Node, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(nodesResource, node), &corev1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Node), err
+}
+
+// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any.
+func (c *FakeNodes) Update(node *corev1.Node) (result *corev1.Node, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(nodesResource, node), &corev1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Node), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeNodes) UpdateStatus(node *corev1.Node) (*corev1.Node, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &corev1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Node), err
+}
+
+// Delete takes name of the node and deletes it. Returns an error if one occurs.
+func (c *FakeNodes) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(nodesResource, name), &corev1.Node{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeNodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(nodesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.NodeList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched node.
+func (c *FakeNodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Node, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, data, subresources...), &corev1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Node), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go
new file mode 100644
index 00000000000..eb684fd2951
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "k8s.io/api/core/v1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeNodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) {
+ obj, err := c.Fake.Invokes(
+ core.NewRootPatchSubresourceAction(nodesResource, nodeName, data, "status"), &v1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+
+ return obj.(*v1.Node), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go
new file mode 100644
index 00000000000..71e2f2dc576
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go
@@ -0,0 +1,131 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePersistentVolumes implements PersistentVolumeInterface
+type FakePersistentVolumes struct {
+ Fake *FakeCoreV1
+}
+
+var persistentvolumesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumes"}
+
+var persistentvolumesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PersistentVolume"}
+
+// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any.
+func (c *FakePersistentVolumes) Get(name string, options v1.GetOptions) (result *corev1.PersistentVolume, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(persistentvolumesResource, name), &corev1.PersistentVolume{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolume), err
+}
+
+// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors.
+func (c *FakePersistentVolumes) List(opts v1.ListOptions) (result *corev1.PersistentVolumeList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(persistentvolumesResource, persistentvolumesKind, opts), &corev1.PersistentVolumeList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.PersistentVolumeList{ListMeta: obj.(*corev1.PersistentVolumeList).ListMeta}
+ for _, item := range obj.(*corev1.PersistentVolumeList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested persistentVolumes.
+func (c *FakePersistentVolumes) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(persistentvolumesResource, opts))
+}
+
+// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any.
+func (c *FakePersistentVolumes) Create(persistentVolume *corev1.PersistentVolume) (result *corev1.PersistentVolume, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(persistentvolumesResource, persistentVolume), &corev1.PersistentVolume{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolume), err
+}
+
+// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any.
+func (c *FakePersistentVolumes) Update(persistentVolume *corev1.PersistentVolume) (result *corev1.PersistentVolume, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(persistentvolumesResource, persistentVolume), &corev1.PersistentVolume{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolume), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakePersistentVolumes) UpdateStatus(persistentVolume *corev1.PersistentVolume) (*corev1.PersistentVolume, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(persistentvolumesResource, "status", persistentVolume), &corev1.PersistentVolume{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolume), err
+}
+
+// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs.
+func (c *FakePersistentVolumes) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(persistentvolumesResource, name), &corev1.PersistentVolume{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePersistentVolumes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(persistentvolumesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.PersistentVolumeList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched persistentVolume.
+func (c *FakePersistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolume, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(persistentvolumesResource, name, data, subresources...), &corev1.PersistentVolume{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolume), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go
new file mode 100644
index 00000000000..a06eca81cb8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePersistentVolumeClaims implements PersistentVolumeClaimInterface
+type FakePersistentVolumeClaims struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var persistentvolumeclaimsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}
+
+var persistentvolumeclaimsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PersistentVolumeClaim"}
+
+// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any.
+func (c *FakePersistentVolumeClaims) Get(name string, options v1.GetOptions) (result *corev1.PersistentVolumeClaim, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(persistentvolumeclaimsResource, c.ns, name), &corev1.PersistentVolumeClaim{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolumeClaim), err
+}
+
+// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors.
+func (c *FakePersistentVolumeClaims) List(opts v1.ListOptions) (result *corev1.PersistentVolumeClaimList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(persistentvolumeclaimsResource, persistentvolumeclaimsKind, c.ns, opts), &corev1.PersistentVolumeClaimList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.PersistentVolumeClaimList{ListMeta: obj.(*corev1.PersistentVolumeClaimList).ListMeta}
+ for _, item := range obj.(*corev1.PersistentVolumeClaimList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested persistentVolumeClaims.
+func (c *FakePersistentVolumeClaims) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(persistentvolumeclaimsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
+func (c *FakePersistentVolumeClaims) Create(persistentVolumeClaim *corev1.PersistentVolumeClaim) (result *corev1.PersistentVolumeClaim, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolumeClaim), err
+}
+
+// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
+func (c *FakePersistentVolumeClaims) Update(persistentVolumeClaim *corev1.PersistentVolumeClaim) (result *corev1.PersistentVolumeClaim, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(persistentvolumeclaimsResource, c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolumeClaim), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakePersistentVolumeClaims) UpdateStatus(persistentVolumeClaim *corev1.PersistentVolumeClaim) (*corev1.PersistentVolumeClaim, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(persistentvolumeclaimsResource, "status", c.ns, persistentVolumeClaim), &corev1.PersistentVolumeClaim{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolumeClaim), err
+}
+
+// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs.
+func (c *FakePersistentVolumeClaims) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(persistentvolumeclaimsResource, c.ns, name), &corev1.PersistentVolumeClaim{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePersistentVolumeClaims) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(persistentvolumeclaimsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.PersistentVolumeClaimList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched persistentVolumeClaim.
+func (c *FakePersistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PersistentVolumeClaim, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(persistentvolumeclaimsResource, c.ns, name, data, subresources...), &corev1.PersistentVolumeClaim{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PersistentVolumeClaim), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go
new file mode 100644
index 00000000000..bbf39eafc28
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePods implements PodInterface
+type FakePods struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var podsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
+
+var podsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}
+
+// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any.
+func (c *FakePods) Get(name string, options v1.GetOptions) (result *corev1.Pod, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(podsResource, c.ns, name), &corev1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Pod), err
+}
+
+// List takes label and field selectors, and returns the list of Pods that match those selectors.
+func (c *FakePods) List(opts v1.ListOptions) (result *corev1.PodList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &corev1.PodList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.PodList{ListMeta: obj.(*corev1.PodList).ListMeta}
+ for _, item := range obj.(*corev1.PodList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested pods.
+func (c *FakePods) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *FakePods) Create(pod *corev1.Pod) (result *corev1.Pod, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &corev1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Pod), err
+}
+
+// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *FakePods) Update(pod *corev1.Pod) (result *corev1.Pod, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &corev1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Pod), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakePods) UpdateStatus(pod *corev1.Pod) (*corev1.Pod, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &corev1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Pod), err
+}
+
+// Delete takes name of the pod and deletes it. Returns an error if one occurs.
+func (c *FakePods) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(podsResource, c.ns, name), &corev1.Pod{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePods) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.PodList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched pod.
+func (c *FakePods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Pod, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, data, subresources...), &corev1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Pod), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go
new file mode 100644
index 00000000000..497cc785705
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "k8s.io/api/core/v1"
+ policy "k8s.io/api/policy/v1beta1"
+ restclient "k8s.io/client-go/rest"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakePods) Bind(binding *v1.Binding) error {
+ action := core.CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = podsResource
+ action.Subresource = "bindings"
+ action.Object = binding
+
+ _, err := c.Fake.Invokes(action, binding)
+ return err
+}
+
+func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request {
+ action := core.GenericActionImpl{}
+ action.Verb = "get"
+ action.Namespace = c.ns
+ action.Resource = podsResource
+ action.Subresource = "logs"
+ action.Value = opts
+
+ _, _ = c.Fake.Invokes(action, &v1.Pod{})
+ return &restclient.Request{}
+}
+
+func (c *FakePods) Evict(eviction *policy.Eviction) error {
+ action := core.CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = podsResource
+ action.Subresource = "eviction"
+ action.Object = eviction
+
+ _, err := c.Fake.Invokes(action, eviction)
+ return err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go
new file mode 100644
index 00000000000..ff242f1660f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePodTemplates implements PodTemplateInterface
+type FakePodTemplates struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var podtemplatesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "podtemplates"}
+
+var podtemplatesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "PodTemplate"}
+
+// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any.
+func (c *FakePodTemplates) Get(name string, options v1.GetOptions) (result *corev1.PodTemplate, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(podtemplatesResource, c.ns, name), &corev1.PodTemplate{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PodTemplate), err
+}
+
+// List takes label and field selectors, and returns the list of PodTemplates that match those selectors.
+func (c *FakePodTemplates) List(opts v1.ListOptions) (result *corev1.PodTemplateList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(podtemplatesResource, podtemplatesKind, c.ns, opts), &corev1.PodTemplateList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.PodTemplateList{ListMeta: obj.(*corev1.PodTemplateList).ListMeta}
+ for _, item := range obj.(*corev1.PodTemplateList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested podTemplates.
+func (c *FakePodTemplates) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(podtemplatesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any.
+func (c *FakePodTemplates) Create(podTemplate *corev1.PodTemplate) (result *corev1.PodTemplate, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(podtemplatesResource, c.ns, podTemplate), &corev1.PodTemplate{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PodTemplate), err
+}
+
+// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any.
+func (c *FakePodTemplates) Update(podTemplate *corev1.PodTemplate) (result *corev1.PodTemplate, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(podtemplatesResource, c.ns, podTemplate), &corev1.PodTemplate{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PodTemplate), err
+}
+
+// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs.
+func (c *FakePodTemplates) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(podtemplatesResource, c.ns, name), &corev1.PodTemplate{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePodTemplates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(podtemplatesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.PodTemplateList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched podTemplate.
+func (c *FakePodTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.PodTemplate, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(podtemplatesResource, c.ns, name, data, subresources...), &corev1.PodTemplate{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.PodTemplate), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go
new file mode 100644
index 00000000000..64fde0b6cef
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go
@@ -0,0 +1,163 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1beta1 "k8s.io/api/extensions/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeReplicationControllers implements ReplicationControllerInterface
+type FakeReplicationControllers struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var replicationcontrollersResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}
+
+var replicationcontrollersKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ReplicationController"}
+
+// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any.
+func (c *FakeReplicationControllers) Get(name string, options v1.GetOptions) (result *corev1.ReplicationController, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(replicationcontrollersResource, c.ns, name), &corev1.ReplicationController{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ReplicationController), err
+}
+
+// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors.
+func (c *FakeReplicationControllers) List(opts v1.ListOptions) (result *corev1.ReplicationControllerList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(replicationcontrollersResource, replicationcontrollersKind, c.ns, opts), &corev1.ReplicationControllerList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.ReplicationControllerList{ListMeta: obj.(*corev1.ReplicationControllerList).ListMeta}
+ for _, item := range obj.(*corev1.ReplicationControllerList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested replicationControllers.
+func (c *FakeReplicationControllers) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(replicationcontrollersResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any.
+func (c *FakeReplicationControllers) Create(replicationController *corev1.ReplicationController) (result *corev1.ReplicationController, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(replicationcontrollersResource, c.ns, replicationController), &corev1.ReplicationController{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ReplicationController), err
+}
+
+// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any.
+func (c *FakeReplicationControllers) Update(replicationController *corev1.ReplicationController) (result *corev1.ReplicationController, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(replicationcontrollersResource, c.ns, replicationController), &corev1.ReplicationController{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ReplicationController), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeReplicationControllers) UpdateStatus(replicationController *corev1.ReplicationController) (*corev1.ReplicationController, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "status", c.ns, replicationController), &corev1.ReplicationController{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ReplicationController), err
+}
+
+// Delete takes name of the replicationController and deletes it. Returns an error if one occurs.
+func (c *FakeReplicationControllers) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(replicationcontrollersResource, c.ns, name), &corev1.ReplicationController{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeReplicationControllers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(replicationcontrollersResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.ReplicationControllerList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched replicationController.
+func (c *FakeReplicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ReplicationController, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(replicationcontrollersResource, c.ns, name, data, subresources...), &corev1.ReplicationController{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ReplicationController), err
+}
+
+// GetScale takes name of the replicationController, and returns the corresponding scale object, and an error if there is any.
+func (c *FakeReplicationControllers) GetScale(replicationControllerName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetSubresourceAction(replicationcontrollersResource, c.ns, "scale", replicationControllerName), &v1beta1.Scale{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Scale), err
+}
+
+// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *FakeReplicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(replicationcontrollersResource, "scale", c.ns, scale), &v1beta1.Scale{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Scale), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go
new file mode 100644
index 00000000000..069749ccff0
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeResourceQuotas implements ResourceQuotaInterface
+type FakeResourceQuotas struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var resourcequotasResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "resourcequotas"}
+
+var resourcequotasKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ResourceQuota"}
+
+// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any.
+func (c *FakeResourceQuotas) Get(name string, options v1.GetOptions) (result *corev1.ResourceQuota, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(resourcequotasResource, c.ns, name), &corev1.ResourceQuota{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ResourceQuota), err
+}
+
+// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors.
+func (c *FakeResourceQuotas) List(opts v1.ListOptions) (result *corev1.ResourceQuotaList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(resourcequotasResource, resourcequotasKind, c.ns, opts), &corev1.ResourceQuotaList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.ResourceQuotaList{ListMeta: obj.(*corev1.ResourceQuotaList).ListMeta}
+ for _, item := range obj.(*corev1.ResourceQuotaList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested resourceQuotas.
+func (c *FakeResourceQuotas) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(resourcequotasResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
+func (c *FakeResourceQuotas) Create(resourceQuota *corev1.ResourceQuota) (result *corev1.ResourceQuota, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(resourcequotasResource, c.ns, resourceQuota), &corev1.ResourceQuota{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ResourceQuota), err
+}
+
+// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
+func (c *FakeResourceQuotas) Update(resourceQuota *corev1.ResourceQuota) (result *corev1.ResourceQuota, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(resourcequotasResource, c.ns, resourceQuota), &corev1.ResourceQuota{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ResourceQuota), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *corev1.ResourceQuota) (*corev1.ResourceQuota, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(resourcequotasResource, "status", c.ns, resourceQuota), &corev1.ResourceQuota{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ResourceQuota), err
+}
+
+// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs.
+func (c *FakeResourceQuotas) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(resourcequotasResource, c.ns, name), &corev1.ResourceQuota{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeResourceQuotas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(resourcequotasResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.ResourceQuotaList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched resourceQuota.
+func (c *FakeResourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ResourceQuota, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(resourcequotasResource, c.ns, name, data, subresources...), &corev1.ResourceQuota{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ResourceQuota), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go
new file mode 100644
index 00000000000..f59ba40bc3d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeSecrets implements SecretInterface
+type FakeSecrets struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var secretsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
+
+var secretsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"}
+
+// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any.
+func (c *FakeSecrets) Get(name string, options v1.GetOptions) (result *corev1.Secret, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(secretsResource, c.ns, name), &corev1.Secret{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Secret), err
+}
+
+// List takes label and field selectors, and returns the list of Secrets that match those selectors.
+func (c *FakeSecrets) List(opts v1.ListOptions) (result *corev1.SecretList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &corev1.SecretList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.SecretList{ListMeta: obj.(*corev1.SecretList).ListMeta}
+ for _, item := range obj.(*corev1.SecretList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested secrets.
+func (c *FakeSecrets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any.
+func (c *FakeSecrets) Create(secret *corev1.Secret) (result *corev1.Secret, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &corev1.Secret{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Secret), err
+}
+
+// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any.
+func (c *FakeSecrets) Update(secret *corev1.Secret) (result *corev1.Secret, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &corev1.Secret{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Secret), err
+}
+
+// Delete takes name of the secret and deletes it. Returns an error if one occurs.
+func (c *FakeSecrets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(secretsResource, c.ns, name), &corev1.Secret{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeSecrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.SecretList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched secret.
+func (c *FakeSecrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Secret, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, data, subresources...), &corev1.Secret{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Secret), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go
new file mode 100644
index 00000000000..2ffcdff76a1
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go
@@ -0,0 +1,132 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeServices implements ServiceInterface
+type FakeServices struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var servicesResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}
+
+var servicesKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"}
+
+// Get takes name of the service, and returns the corresponding service object, and an error if there is any.
+func (c *FakeServices) Get(name string, options v1.GetOptions) (result *corev1.Service, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(servicesResource, c.ns, name), &corev1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Service), err
+}
+
+// List takes label and field selectors, and returns the list of Services that match those selectors.
+func (c *FakeServices) List(opts v1.ListOptions) (result *corev1.ServiceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &corev1.ServiceList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.ServiceList{ListMeta: obj.(*corev1.ServiceList).ListMeta}
+ for _, item := range obj.(*corev1.ServiceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested services.
+func (c *FakeServices) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any.
+func (c *FakeServices) Create(service *corev1.Service) (result *corev1.Service, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &corev1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Service), err
+}
+
+// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any.
+func (c *FakeServices) Update(service *corev1.Service) (result *corev1.Service, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &corev1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Service), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeServices) UpdateStatus(service *corev1.Service) (*corev1.Service, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &corev1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Service), err
+}
+
+// Delete takes name of the service and deletes it. Returns an error if one occurs.
+func (c *FakeServices) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(servicesResource, c.ns, name), &corev1.Service{})
+
+ return err
+}
+
+// Patch applies the patch and returns the patched service.
+func (c *FakeServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Service, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, data, subresources...), &corev1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.Service), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go
new file mode 100644
index 00000000000..92e4930d711
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ restclient "k8s.io/client-go/rest"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeServices) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper {
+ return c.Fake.InvokesProxy(core.NewProxyGetAction(servicesResource, c.ns, scheme, name, port, path, params))
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go
new file mode 100644
index 00000000000..2b2c5a7b22c
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeServiceAccounts implements ServiceAccountInterface
+type FakeServiceAccounts struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var serviceaccountsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"}
+
+var serviceaccountsKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceAccount"}
+
+// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any.
+func (c *FakeServiceAccounts) Get(name string, options v1.GetOptions) (result *corev1.ServiceAccount, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(serviceaccountsResource, c.ns, name), &corev1.ServiceAccount{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ServiceAccount), err
+}
+
+// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors.
+func (c *FakeServiceAccounts) List(opts v1.ListOptions) (result *corev1.ServiceAccountList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(serviceaccountsResource, serviceaccountsKind, c.ns, opts), &corev1.ServiceAccountList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &corev1.ServiceAccountList{ListMeta: obj.(*corev1.ServiceAccountList).ListMeta}
+ for _, item := range obj.(*corev1.ServiceAccountList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested serviceAccounts.
+func (c *FakeServiceAccounts) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(serviceaccountsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any.
+func (c *FakeServiceAccounts) Create(serviceAccount *corev1.ServiceAccount) (result *corev1.ServiceAccount, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(serviceaccountsResource, c.ns, serviceAccount), &corev1.ServiceAccount{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ServiceAccount), err
+}
+
+// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any.
+func (c *FakeServiceAccounts) Update(serviceAccount *corev1.ServiceAccount) (result *corev1.ServiceAccount, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(serviceaccountsResource, c.ns, serviceAccount), &corev1.ServiceAccount{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ServiceAccount), err
+}
+
+// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs.
+func (c *FakeServiceAccounts) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(serviceaccountsResource, c.ns, name), &corev1.ServiceAccount{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeServiceAccounts) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(serviceaccountsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &corev1.ServiceAccountList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched serviceAccount.
+func (c *FakeServiceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.ServiceAccount, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(serviceaccountsResource, c.ns, name, data, subresources...), &corev1.ServiceAccount{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*corev1.ServiceAccount), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go
new file mode 100644
index 00000000000..a0efbcc2fe9
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ authenticationv1 "k8s.io/api/authentication/v1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeServiceAccounts) CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
+ obj, err := c.Fake.Invokes(core.NewCreateSubresourceAction(serviceaccountsResource, name, "token", c.ns, tr), &authenticationv1.TokenRequest{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*authenticationv1.TokenRequest), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go
new file mode 100644
index 00000000000..b210e40a041
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/events/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeEvents implements EventInterface
+type FakeEvents struct {
+ Fake *FakeEventsV1beta1
+ ns string
+}
+
+var eventsResource = schema.GroupVersionResource{Group: "events.k8s.io", Version: "v1beta1", Resource: "events"}
+
+var eventsKind = schema.GroupVersionKind{Group: "events.k8s.io", Version: "v1beta1", Kind: "Event"}
+
+// Get takes name of the event, and returns the corresponding event object, and an error if there is any.
+func (c *FakeEvents) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(eventsResource, c.ns, name), &v1beta1.Event{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Event), err
+}
+
+// List takes label and field selectors, and returns the list of Events that match those selectors.
+func (c *FakeEvents) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(eventsResource, eventsKind, c.ns, opts), &v1beta1.EventList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.EventList{ListMeta: obj.(*v1beta1.EventList).ListMeta}
+ for _, item := range obj.(*v1beta1.EventList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested events.
+func (c *FakeEvents) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(eventsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any.
+func (c *FakeEvents) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(eventsResource, c.ns, event), &v1beta1.Event{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Event), err
+}
+
+// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
+func (c *FakeEvents) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(eventsResource, c.ns, event), &v1beta1.Event{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Event), err
+}
+
+// Delete takes name of the event and deletes it. Returns an error if one occurs.
+func (c *FakeEvents) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(eventsResource, c.ns, name), &v1beta1.Event{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeEvents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(eventsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.EventList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched event.
+func (c *FakeEvents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(eventsResource, c.ns, name, data, subresources...), &v1beta1.Event{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Event), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go
new file mode 100644
index 00000000000..875c774e388
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeEventsV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeEventsV1beta1) Events(namespace string) v1beta1.EventInterface {
+ return &FakeEvents{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeEventsV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go
new file mode 100644
index 00000000000..3a760b3175e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/extensions/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDaemonSets implements DaemonSetInterface
+type FakeDaemonSets struct {
+ Fake *FakeExtensionsV1beta1
+ ns string
+}
+
+var daemonsetsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}
+
+var daemonsetsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "DaemonSet"}
+
+// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
+func (c *FakeDaemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.DaemonSet), err
+}
+
+// List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
+func (c *FakeDaemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(daemonsetsResource, daemonsetsKind, c.ns, opts), &v1beta1.DaemonSetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.DaemonSetList{ListMeta: obj.(*v1beta1.DaemonSetList).ListMeta}
+ for _, item := range obj.(*v1beta1.DaemonSetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested daemonSets.
+func (c *FakeDaemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(daemonsetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *FakeDaemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.DaemonSet), err
+}
+
+// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
+func (c *FakeDaemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(daemonsetsResource, c.ns, daemonSet), &v1beta1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.DaemonSet), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeDaemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(daemonsetsResource, "status", c.ns, daemonSet), &v1beta1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.DaemonSet), err
+}
+
+// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
+func (c *FakeDaemonSets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(daemonsetsResource, c.ns, name), &v1beta1.DaemonSet{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDaemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(daemonsetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.DaemonSetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched daemonSet.
+func (c *FakeDaemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(daemonsetsResource, c.ns, name, data, subresources...), &v1beta1.DaemonSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.DaemonSet), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go
new file mode 100644
index 00000000000..f032a556389
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go
@@ -0,0 +1,162 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/extensions/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDeployments implements DeploymentInterface
+type FakeDeployments struct {
+ Fake *FakeExtensionsV1beta1
+ ns string
+}
+
+var deploymentsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}
+
+var deploymentsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Deployment"}
+
+// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
+func (c *FakeDeployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
+
+// List takes label and field selectors, and returns the list of Deployments that match those selectors.
+func (c *FakeDeployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(deploymentsResource, deploymentsKind, c.ns, opts), &v1beta1.DeploymentList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.DeploymentList{ListMeta: obj.(*v1beta1.DeploymentList).ListMeta}
+ for _, item := range obj.(*v1beta1.DeploymentList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested deployments.
+func (c *FakeDeployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(deploymentsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *FakeDeployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
+
+// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
+func (c *FakeDeployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(deploymentsResource, c.ns, deployment), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeDeployments) UpdateStatus(deployment *v1beta1.Deployment) (*v1beta1.Deployment, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "status", c.ns, deployment), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
+
+// Delete takes name of the deployment and deletes it. Returns an error if one occurs.
+func (c *FakeDeployments) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(deploymentsResource, c.ns, name), &v1beta1.Deployment{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDeployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(deploymentsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.DeploymentList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched deployment.
+func (c *FakeDeployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(deploymentsResource, c.ns, name, data, subresources...), &v1beta1.Deployment{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Deployment), err
+}
+
+// GetScale takes name of the deployment, and returns the corresponding scale object, and an error if there is any.
+func (c *FakeDeployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetSubresourceAction(deploymentsResource, c.ns, "scale", deploymentName), &v1beta1.Scale{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Scale), err
+}
+
+// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *FakeDeployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(deploymentsResource, "scale", c.ns, scale), &v1beta1.Scale{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Scale), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go
new file mode 100644
index 00000000000..af2bc0f713a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "k8s.io/api/extensions/v1beta1"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeDeployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error {
+ action := core.CreateActionImpl{}
+ action.Verb = "create"
+ action.Resource = deploymentsResource
+ action.Subresource = "rollback"
+ action.Object = deploymentRollback
+
+ _, err := c.Fake.Invokes(action, deploymentRollback)
+ return err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go
new file mode 100644
index 00000000000..1aba34f9dcb
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go
@@ -0,0 +1,60 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeExtensionsV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeExtensionsV1beta1) DaemonSets(namespace string) v1beta1.DaemonSetInterface {
+ return &FakeDaemonSets{c, namespace}
+}
+
+func (c *FakeExtensionsV1beta1) Deployments(namespace string) v1beta1.DeploymentInterface {
+ return &FakeDeployments{c, namespace}
+}
+
+func (c *FakeExtensionsV1beta1) Ingresses(namespace string) v1beta1.IngressInterface {
+ return &FakeIngresses{c, namespace}
+}
+
+func (c *FakeExtensionsV1beta1) PodSecurityPolicies() v1beta1.PodSecurityPolicyInterface {
+ return &FakePodSecurityPolicies{c}
+}
+
+func (c *FakeExtensionsV1beta1) ReplicaSets(namespace string) v1beta1.ReplicaSetInterface {
+ return &FakeReplicaSets{c, namespace}
+}
+
+func (c *FakeExtensionsV1beta1) Scales(namespace string) v1beta1.ScaleInterface {
+ return &FakeScales{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeExtensionsV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go
new file mode 100644
index 00000000000..55257a88a28
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/extensions/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeIngresses implements IngressInterface
+type FakeIngresses struct {
+ Fake *FakeExtensionsV1beta1
+ ns string
+}
+
+var ingressesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "ingresses"}
+
+var ingressesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "Ingress"}
+
+// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
+func (c *FakeIngresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1beta1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Ingress), err
+}
+
+// List takes label and field selectors, and returns the list of Ingresses that match those selectors.
+func (c *FakeIngresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1beta1.IngressList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.IngressList{ListMeta: obj.(*v1beta1.IngressList).ListMeta}
+ for _, item := range obj.(*v1beta1.IngressList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ingresses.
+func (c *FakeIngresses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any.
+func (c *FakeIngresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Ingress), err
+}
+
+// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
+func (c *FakeIngresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1beta1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Ingress), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeIngresses) UpdateStatus(ingress *v1beta1.Ingress) (*v1beta1.Ingress, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1beta1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Ingress), err
+}
+
+// Delete takes name of the ingress and deletes it. Returns an error if one occurs.
+func (c *FakeIngresses) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(ingressesResource, c.ns, name), &v1beta1.Ingress{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeIngresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.IngressList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ingress.
+func (c *FakeIngresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, data, subresources...), &v1beta1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Ingress), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go
new file mode 100644
index 00000000000..70b5dac2819
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/extensions/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePodSecurityPolicies implements PodSecurityPolicyInterface
+type FakePodSecurityPolicies struct {
+ Fake *FakeExtensionsV1beta1
+}
+
+var podsecuritypoliciesResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "podsecuritypolicies"}
+
+var podsecuritypoliciesKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "PodSecurityPolicy"}
+
+// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any.
+func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodSecurityPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors.
+func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(podsecuritypoliciesResource, podsecuritypoliciesKind, opts), &v1beta1.PodSecurityPolicyList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.PodSecurityPolicyList{ListMeta: obj.(*v1beta1.PodSecurityPolicyList).ListMeta}
+ for _, item := range obj.(*v1beta1.PodSecurityPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested podSecurityPolicies.
+func (c *FakePodSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts))
+}
+
+// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
+func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodSecurityPolicy), err
+}
+
+// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
+func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodSecurityPolicy), err
+}
+
+// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs.
+func (c *FakePodSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched podSecurityPolicy.
+func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, data, subresources...), &v1beta1.PodSecurityPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodSecurityPolicy), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go
new file mode 100644
index 00000000000..2ab8f244f57
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go
@@ -0,0 +1,162 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/extensions/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeReplicaSets implements ReplicaSetInterface
+type FakeReplicaSets struct {
+ Fake *FakeExtensionsV1beta1
+ ns string
+}
+
+var replicasetsResource = schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"}
+
+var replicasetsKind = schema.GroupVersionKind{Group: "extensions", Version: "v1beta1", Kind: "ReplicaSet"}
+
+// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
+func (c *FakeReplicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ReplicaSet), err
+}
+
+// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
+func (c *FakeReplicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(replicasetsResource, replicasetsKind, c.ns, opts), &v1beta1.ReplicaSetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.ReplicaSetList{ListMeta: obj.(*v1beta1.ReplicaSetList).ListMeta}
+ for _, item := range obj.(*v1beta1.ReplicaSetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested replicaSets.
+func (c *FakeReplicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(replicasetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *FakeReplicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ReplicaSet), err
+}
+
+// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
+func (c *FakeReplicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(replicasetsResource, c.ns, replicaSet), &v1beta1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ReplicaSet), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeReplicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "status", c.ns, replicaSet), &v1beta1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ReplicaSet), err
+}
+
+// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
+func (c *FakeReplicaSets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(replicasetsResource, c.ns, name), &v1beta1.ReplicaSet{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeReplicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(replicasetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.ReplicaSetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched replicaSet.
+func (c *FakeReplicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(replicasetsResource, c.ns, name, data, subresources...), &v1beta1.ReplicaSet{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ReplicaSet), err
+}
+
+// GetScale takes name of the replicaSet, and returns the corresponding scale object, and an error if there is any.
+func (c *FakeReplicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetSubresourceAction(replicasetsResource, c.ns, "scale", replicaSetName), &v1beta1.Scale{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Scale), err
+}
+
+// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
+func (c *FakeReplicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(replicasetsResource, "scale", c.ns, scale), &v1beta1.Scale{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Scale), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go
new file mode 100644
index 00000000000..02c4d0bab73
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeScales implements ScaleInterface
+type FakeScales struct {
+ Fake *FakeExtensionsV1beta1
+ ns string
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go
new file mode 100644
index 00000000000..1f1d49ba1a9
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_scale_expansion.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "k8s.io/api/extensions/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeScales) Get(kind string, name string) (result *v1beta1.Scale, err error) {
+ action := core.GetActionImpl{}
+ action.Verb = "get"
+ action.Namespace = c.ns
+ action.Resource = schema.GroupVersionResource{Resource: kind}
+ action.Subresource = "scale"
+ action.Name = name
+ obj, err := c.Fake.Invokes(action, &v1beta1.Scale{})
+ result = obj.(*v1beta1.Scale)
+ return
+}
+
+func (c *FakeScales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) {
+ action := core.UpdateActionImpl{}
+ action.Verb = "update"
+ action.Namespace = c.ns
+ action.Resource = schema.GroupVersionResource{Resource: kind}
+ action.Subresource = "scale"
+ action.Object = scale
+ obj, err := c.Fake.Invokes(action, scale)
+ result = obj.(*v1beta1.Scale)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go
new file mode 100644
index 00000000000..6b135c63690
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/networking/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeNetworkingV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeNetworkingV1) NetworkPolicies(namespace string) v1.NetworkPolicyInterface {
+ return &FakeNetworkPolicies{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeNetworkingV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go
new file mode 100644
index 00000000000..7be202298de
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ networkingv1 "k8s.io/api/networking/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeNetworkPolicies implements NetworkPolicyInterface
+type FakeNetworkPolicies struct {
+ Fake *FakeNetworkingV1
+ ns string
+}
+
+var networkpoliciesResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "networkpolicies"}
+
+var networkpoliciesKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1", Kind: "NetworkPolicy"}
+
+// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
+func (c *FakeNetworkPolicies) Get(name string, options v1.GetOptions) (result *networkingv1.NetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &networkingv1.NetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*networkingv1.NetworkPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
+func (c *FakeNetworkPolicies) List(opts v1.ListOptions) (result *networkingv1.NetworkPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &networkingv1.NetworkPolicyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &networkingv1.NetworkPolicyList{ListMeta: obj.(*networkingv1.NetworkPolicyList).ListMeta}
+ for _, item := range obj.(*networkingv1.NetworkPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested networkPolicies.
+func (c *FakeNetworkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
+func (c *FakeNetworkPolicies) Create(networkPolicy *networkingv1.NetworkPolicy) (result *networkingv1.NetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &networkingv1.NetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*networkingv1.NetworkPolicy), err
+}
+
+// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
+func (c *FakeNetworkPolicies) Update(networkPolicy *networkingv1.NetworkPolicy) (result *networkingv1.NetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &networkingv1.NetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*networkingv1.NetworkPolicy), err
+}
+
+// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeNetworkPolicies) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(networkpoliciesResource, c.ns, name), &networkingv1.NetworkPolicy{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeNetworkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &networkingv1.NetworkPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched networkPolicy.
+func (c *FakeNetworkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *networkingv1.NetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, data, subresources...), &networkingv1.NetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*networkingv1.NetworkPolicy), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go
new file mode 100644
index 00000000000..b8f6f3eae27
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go
@@ -0,0 +1,25 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+// FakeEvictions implements EvictionInterface
+type FakeEvictions struct {
+ Fake *FakePolicyV1beta1
+ ns string
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go
new file mode 100644
index 00000000000..2f0d8e95370
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ policy "k8s.io/api/policy/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ core "k8s.io/client-go/testing"
+)
+
+func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {
+ action := core.GetActionImpl{}
+ action.Verb = "post"
+ action.Namespace = c.ns
+ action.Resource = schema.GroupVersionResource{Group: "", Version: "", Resource: "pods"}
+ action.Subresource = "eviction"
+ _, err := c.Fake.Invokes(action, eviction)
+ return err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go
new file mode 100644
index 00000000000..3f2e78b3109
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go
@@ -0,0 +1,140 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/policy/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePodDisruptionBudgets implements PodDisruptionBudgetInterface
+type FakePodDisruptionBudgets struct {
+ Fake *FakePolicyV1beta1
+ ns string
+}
+
+var poddisruptionbudgetsResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "poddisruptionbudgets"}
+
+var poddisruptionbudgetsKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodDisruptionBudget"}
+
+// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any.
+func (c *FakePodDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodDisruptionBudget), err
+}
+
+// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors.
+func (c *FakePodDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(poddisruptionbudgetsResource, poddisruptionbudgetsKind, c.ns, opts), &v1beta1.PodDisruptionBudgetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.PodDisruptionBudgetList{ListMeta: obj.(*v1beta1.PodDisruptionBudgetList).ListMeta}
+ for _, item := range obj.(*v1beta1.PodDisruptionBudgetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested podDisruptionBudgets.
+func (c *FakePodDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(poddisruptionbudgetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
+func (c *FakePodDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodDisruptionBudget), err
+}
+
+// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
+func (c *FakePodDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(poddisruptionbudgetsResource, c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodDisruptionBudget), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakePodDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(poddisruptionbudgetsResource, "status", c.ns, podDisruptionBudget), &v1beta1.PodDisruptionBudget{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodDisruptionBudget), err
+}
+
+// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs.
+func (c *FakePodDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(poddisruptionbudgetsResource, c.ns, name), &v1beta1.PodDisruptionBudget{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePodDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(poddisruptionbudgetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.PodDisruptionBudgetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched podDisruptionBudget.
+func (c *FakePodDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(poddisruptionbudgetsResource, c.ns, name, data, subresources...), &v1beta1.PodDisruptionBudget{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodDisruptionBudget), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go
new file mode 100644
index 00000000000..0df9aa15f98
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/policy/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePodSecurityPolicies implements PodSecurityPolicyInterface
+type FakePodSecurityPolicies struct {
+ Fake *FakePolicyV1beta1
+}
+
+var podsecuritypoliciesResource = schema.GroupVersionResource{Group: "policy", Version: "v1beta1", Resource: "podsecuritypolicies"}
+
+var podsecuritypoliciesKind = schema.GroupVersionKind{Group: "policy", Version: "v1beta1", Kind: "PodSecurityPolicy"}
+
+// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any.
+func (c *FakePodSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodSecurityPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors.
+func (c *FakePodSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(podsecuritypoliciesResource, podsecuritypoliciesKind, opts), &v1beta1.PodSecurityPolicyList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.PodSecurityPolicyList{ListMeta: obj.(*v1beta1.PodSecurityPolicyList).ListMeta}
+ for _, item := range obj.(*v1beta1.PodSecurityPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested podSecurityPolicies.
+func (c *FakePodSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(podsecuritypoliciesResource, opts))
+}
+
+// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
+func (c *FakePodSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodSecurityPolicy), err
+}
+
+// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
+func (c *FakePodSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(podsecuritypoliciesResource, podSecurityPolicy), &v1beta1.PodSecurityPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodSecurityPolicy), err
+}
+
+// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs.
+func (c *FakePodSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(podsecuritypoliciesResource, name), &v1beta1.PodSecurityPolicy{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePodSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(podsecuritypoliciesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.PodSecurityPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched podSecurityPolicy.
+func (c *FakePodSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(podsecuritypoliciesResource, name, data, subresources...), &v1beta1.PodSecurityPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PodSecurityPolicy), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go
new file mode 100644
index 00000000000..9c780bf1f06
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakePolicyV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakePolicyV1beta1) Evictions(namespace string) v1beta1.EvictionInterface {
+ return &FakeEvictions{c, namespace}
+}
+
+func (c *FakePolicyV1beta1) PodDisruptionBudgets(namespace string) v1beta1.PodDisruptionBudgetInterface {
+ return &FakePodDisruptionBudgets{c, namespace}
+}
+
+func (c *FakePolicyV1beta1) PodSecurityPolicies() v1beta1.PodSecurityPolicyInterface {
+ return &FakePodSecurityPolicies{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakePolicyV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go
new file mode 100644
index 00000000000..d93ac829442
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ rbacv1 "k8s.io/api/rbac/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusterRoles implements ClusterRoleInterface
+type FakeClusterRoles struct {
+ Fake *FakeRbacV1
+}
+
+var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}
+
+var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"}
+
+// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
+func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *rbacv1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(clusterrolesResource, name), &rbacv1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.ClusterRole), err
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
+func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *rbacv1.ClusterRoleList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &rbacv1.ClusterRoleList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &rbacv1.ClusterRoleList{ListMeta: obj.(*rbacv1.ClusterRoleList).ListMeta}
+ for _, item := range obj.(*rbacv1.ClusterRoleList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoles.
+func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts))
+}
+
+// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *FakeClusterRoles) Create(clusterRole *rbacv1.ClusterRole) (result *rbacv1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &rbacv1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.ClusterRole), err
+}
+
+// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *FakeClusterRoles) Update(clusterRole *rbacv1.ClusterRole) (result *rbacv1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &rbacv1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.ClusterRole), err
+}
+
+// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
+func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &rbacv1.ClusterRole{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &rbacv1.ClusterRoleList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clusterRole.
+func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &rbacv1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.ClusterRole), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go
new file mode 100644
index 00000000000..a8b2b57ffd7
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ rbacv1 "k8s.io/api/rbac/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusterRoleBindings implements ClusterRoleBindingInterface
+type FakeClusterRoleBindings struct {
+ Fake *FakeRbacV1
+}
+
+var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterrolebindings"}
+
+var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBinding"}
+
+// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
+func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *rbacv1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &rbacv1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.ClusterRoleBinding), err
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
+func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *rbacv1.ClusterRoleBindingList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &rbacv1.ClusterRoleBindingList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &rbacv1.ClusterRoleBindingList{ListMeta: obj.(*rbacv1.ClusterRoleBindingList).ListMeta}
+ for _, item := range obj.(*rbacv1.ClusterRoleBindingList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
+func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts))
+}
+
+// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *rbacv1.ClusterRoleBinding) (result *rbacv1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &rbacv1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.ClusterRoleBinding), err
+}
+
+// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *rbacv1.ClusterRoleBinding) (result *rbacv1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &rbacv1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.ClusterRoleBinding), err
+}
+
+// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
+func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &rbacv1.ClusterRoleBinding{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &rbacv1.ClusterRoleBindingList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clusterRoleBinding.
+func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &rbacv1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.ClusterRoleBinding), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go
new file mode 100644
index 00000000000..426fd70d6cc
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/rbac/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeRbacV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeRbacV1) ClusterRoles() v1.ClusterRoleInterface {
+ return &FakeClusterRoles{c}
+}
+
+func (c *FakeRbacV1) ClusterRoleBindings() v1.ClusterRoleBindingInterface {
+ return &FakeClusterRoleBindings{c}
+}
+
+func (c *FakeRbacV1) Roles(namespace string) v1.RoleInterface {
+ return &FakeRoles{c, namespace}
+}
+
+func (c *FakeRbacV1) RoleBindings(namespace string) v1.RoleBindingInterface {
+ return &FakeRoleBindings{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeRbacV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go
new file mode 100644
index 00000000000..f048bbdfb2d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ rbacv1 "k8s.io/api/rbac/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeRoles implements RoleInterface
+type FakeRoles struct {
+ Fake *FakeRbacV1
+ ns string
+}
+
+var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"}
+
+var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"}
+
+// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
+func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *rbacv1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(rolesResource, c.ns, name), &rbacv1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.Role), err
+}
+
+// List takes label and field selectors, and returns the list of Roles that match those selectors.
+func (c *FakeRoles) List(opts v1.ListOptions) (result *rbacv1.RoleList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &rbacv1.RoleList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &rbacv1.RoleList{ListMeta: obj.(*rbacv1.RoleList).ListMeta}
+ for _, item := range obj.(*rbacv1.RoleList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested roles.
+func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any.
+func (c *FakeRoles) Create(role *rbacv1.Role) (result *rbacv1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &rbacv1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.Role), err
+}
+
+// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
+func (c *FakeRoles) Update(role *rbacv1.Role) (result *rbacv1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &rbacv1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.Role), err
+}
+
+// Delete takes name of the role and deletes it. Returns an error if one occurs.
+func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &rbacv1.Role{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &rbacv1.RoleList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched role.
+func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &rbacv1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.Role), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go
new file mode 100644
index 00000000000..c71635fce4e
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ rbacv1 "k8s.io/api/rbac/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeRoleBindings implements RoleBindingInterface
+type FakeRoleBindings struct {
+ Fake *FakeRbacV1
+ ns string
+}
+
+var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"}
+
+var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"}
+
+// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
+func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *rbacv1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &rbacv1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.RoleBinding), err
+}
+
+// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
+func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *rbacv1.RoleBindingList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &rbacv1.RoleBindingList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &rbacv1.RoleBindingList{ListMeta: obj.(*rbacv1.RoleBindingList).ListMeta}
+ for _, item := range obj.(*rbacv1.RoleBindingList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested roleBindings.
+func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *FakeRoleBindings) Create(roleBinding *rbacv1.RoleBinding) (result *rbacv1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &rbacv1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.RoleBinding), err
+}
+
+// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *FakeRoleBindings) Update(roleBinding *rbacv1.RoleBinding) (result *rbacv1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &rbacv1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.RoleBinding), err
+}
+
+// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
+func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &rbacv1.RoleBinding{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &rbacv1.RoleBindingList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched roleBinding.
+func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *rbacv1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &rbacv1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*rbacv1.RoleBinding), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go
new file mode 100644
index 00000000000..13fbce4e72a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/api/rbac/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusterRoles implements ClusterRoleInterface
+type FakeClusterRoles struct {
+ Fake *FakeRbacV1alpha1
+}
+
+var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterroles"}
+
+var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRole"}
+
+// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
+func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1alpha1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRole), err
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
+func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1alpha1.ClusterRoleList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ClusterRoleList{ListMeta: obj.(*v1alpha1.ClusterRoleList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ClusterRoleList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoles.
+func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts))
+}
+
+// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *FakeClusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRole), err
+}
+
+// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *FakeClusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRole), err
+}
+
+// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
+func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1alpha1.ClusterRole{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clusterRole.
+func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &v1alpha1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRole), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go
new file mode 100644
index 00000000000..5076543d9f4
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/api/rbac/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusterRoleBindings implements ClusterRoleBindingInterface
+type FakeClusterRoleBindings struct {
+ Fake *FakeRbacV1alpha1
+}
+
+var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterrolebindings"}
+
+var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "ClusterRoleBinding"}
+
+// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
+func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRoleBinding), err
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
+func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1alpha1.ClusterRoleBindingList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ClusterRoleBindingList{ListMeta: obj.(*v1alpha1.ClusterRoleBindingList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ClusterRoleBindingList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
+func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts))
+}
+
+// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRoleBinding), err
+}
+
+// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRoleBinding), err
+}
+
+// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
+func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clusterRoleBinding.
+func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &v1alpha1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClusterRoleBinding), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go
new file mode 100644
index 00000000000..3447e9be833
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeRbacV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeRbacV1alpha1) ClusterRoles() v1alpha1.ClusterRoleInterface {
+ return &FakeClusterRoles{c}
+}
+
+func (c *FakeRbacV1alpha1) ClusterRoleBindings() v1alpha1.ClusterRoleBindingInterface {
+ return &FakeClusterRoleBindings{c}
+}
+
+func (c *FakeRbacV1alpha1) Roles(namespace string) v1alpha1.RoleInterface {
+ return &FakeRoles{c, namespace}
+}
+
+func (c *FakeRbacV1alpha1) RoleBindings(namespace string) v1alpha1.RoleBindingInterface {
+ return &FakeRoleBindings{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeRbacV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go
new file mode 100644
index 00000000000..24d8efee3c1
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/api/rbac/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeRoles implements RoleInterface
+type FakeRoles struct {
+ Fake *FakeRbacV1alpha1
+ ns string
+}
+
+var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "roles"}
+
+var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "Role"}
+
+// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
+func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1alpha1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Role), err
+}
+
+// List takes label and field selectors, and returns the list of Roles that match those selectors.
+func (c *FakeRoles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1alpha1.RoleList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.RoleList{ListMeta: obj.(*v1alpha1.RoleList).ListMeta}
+ for _, item := range obj.(*v1alpha1.RoleList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested roles.
+func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any.
+func (c *FakeRoles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Role), err
+}
+
+// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
+func (c *FakeRoles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Role), err
+}
+
+// Delete takes name of the role and deletes it. Returns an error if one occurs.
+func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1alpha1.Role{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched role.
+func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &v1alpha1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Role), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go
new file mode 100644
index 00000000000..cb01ef99db7
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/api/rbac/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeRoleBindings implements RoleBindingInterface
+type FakeRoleBindings struct {
+ Fake *FakeRbacV1alpha1
+ ns string
+}
+
+var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "rolebindings"}
+
+var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Kind: "RoleBinding"}
+
+// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
+func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.RoleBinding), err
+}
+
+// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
+func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1alpha1.RoleBindingList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.RoleBindingList{ListMeta: obj.(*v1alpha1.RoleBindingList).ListMeta}
+ for _, item := range obj.(*v1alpha1.RoleBindingList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested roleBindings.
+func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *FakeRoleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.RoleBinding), err
+}
+
+// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *FakeRoleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.RoleBinding), err
+}
+
+// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
+func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched roleBinding.
+func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &v1alpha1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.RoleBinding), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go
new file mode 100644
index 00000000000..62a832197e7
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/rbac/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusterRoles implements ClusterRoleInterface
+type FakeClusterRoles struct {
+ Fake *FakeRbacV1beta1
+}
+
+var clusterrolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterroles"}
+
+var clusterrolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRole"}
+
+// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
+func (c *FakeClusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1beta1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ClusterRole), err
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
+func (c *FakeClusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(clusterrolesResource, clusterrolesKind, opts), &v1beta1.ClusterRoleList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.ClusterRoleList{ListMeta: obj.(*v1beta1.ClusterRoleList).ListMeta}
+ for _, item := range obj.(*v1beta1.ClusterRoleList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoles.
+func (c *FakeClusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts))
+}
+
+// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *FakeClusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ClusterRole), err
+}
+
+// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
+func (c *FakeClusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1beta1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ClusterRole), err
+}
+
+// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
+func (c *FakeClusterRoles) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1beta1.ClusterRole{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clusterRole.
+func (c *FakeClusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &v1beta1.ClusterRole{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ClusterRole), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go
new file mode 100644
index 00000000000..c9ab4726964
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/rbac/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClusterRoleBindings implements ClusterRoleBindingInterface
+type FakeClusterRoleBindings struct {
+ Fake *FakeRbacV1beta1
+}
+
+var clusterrolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "clusterrolebindings"}
+
+var clusterrolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "ClusterRoleBinding"}
+
+// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
+func (c *FakeClusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ClusterRoleBinding), err
+}
+
+// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
+func (c *FakeClusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(clusterrolebindingsResource, clusterrolebindingsKind, opts), &v1beta1.ClusterRoleBindingList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.ClusterRoleBindingList{ListMeta: obj.(*v1beta1.ClusterRoleBindingList).ListMeta}
+ for _, item := range obj.(*v1beta1.ClusterRoleBindingList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clusterRoleBindings.
+func (c *FakeClusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts))
+}
+
+// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ClusterRoleBinding), err
+}
+
+// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
+func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1beta1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ClusterRoleBinding), err
+}
+
+// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
+func (c *FakeClusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1beta1.ClusterRoleBinding{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.ClusterRoleBindingList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clusterRoleBinding.
+func (c *FakeClusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &v1beta1.ClusterRoleBinding{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.ClusterRoleBinding), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go
new file mode 100644
index 00000000000..bdbc246b7aa
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go
@@ -0,0 +1,52 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeRbacV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeRbacV1beta1) ClusterRoles() v1beta1.ClusterRoleInterface {
+ return &FakeClusterRoles{c}
+}
+
+func (c *FakeRbacV1beta1) ClusterRoleBindings() v1beta1.ClusterRoleBindingInterface {
+ return &FakeClusterRoleBindings{c}
+}
+
+func (c *FakeRbacV1beta1) Roles(namespace string) v1beta1.RoleInterface {
+ return &FakeRoles{c, namespace}
+}
+
+func (c *FakeRbacV1beta1) RoleBindings(namespace string) v1beta1.RoleBindingInterface {
+ return &FakeRoleBindings{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeRbacV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go
new file mode 100644
index 00000000000..45b07a001ed
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/rbac/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeRoles implements RoleInterface
+type FakeRoles struct {
+ Fake *FakeRbacV1beta1
+ ns string
+}
+
+var rolesResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"}
+
+var rolesKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "Role"}
+
+// Get takes name of the role, and returns the corresponding role object, and an error if there is any.
+func (c *FakeRoles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1beta1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Role), err
+}
+
+// List takes label and field selectors, and returns the list of Roles that match those selectors.
+func (c *FakeRoles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(rolesResource, rolesKind, c.ns, opts), &v1beta1.RoleList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.RoleList{ListMeta: obj.(*v1beta1.RoleList).ListMeta}
+ for _, item := range obj.(*v1beta1.RoleList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested roles.
+func (c *FakeRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any.
+func (c *FakeRoles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1beta1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Role), err
+}
+
+// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
+func (c *FakeRoles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1beta1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Role), err
+}
+
+// Delete takes name of the role and deletes it. Returns an error if one occurs.
+func (c *FakeRoles) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1beta1.Role{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.RoleList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched role.
+func (c *FakeRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &v1beta1.Role{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.Role), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go
new file mode 100644
index 00000000000..1efd4000560
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/rbac/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeRoleBindings implements RoleBindingInterface
+type FakeRoleBindings struct {
+ Fake *FakeRbacV1beta1
+ ns string
+}
+
+var rolebindingsResource = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "rolebindings"}
+
+var rolebindingsKind = schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Kind: "RoleBinding"}
+
+// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
+func (c *FakeRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.RoleBinding), err
+}
+
+// List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
+func (c *FakeRoleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(rolebindingsResource, rolebindingsKind, c.ns, opts), &v1beta1.RoleBindingList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.RoleBindingList{ListMeta: obj.(*v1beta1.RoleBindingList).ListMeta}
+ for _, item := range obj.(*v1beta1.RoleBindingList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested roleBindings.
+func (c *FakeRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *FakeRoleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.RoleBinding), err
+}
+
+// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
+func (c *FakeRoleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1beta1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.RoleBinding), err
+}
+
+// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
+func (c *FakeRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1beta1.RoleBinding{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.RoleBindingList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched roleBinding.
+func (c *FakeRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &v1beta1.RoleBinding{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.RoleBinding), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go
new file mode 100644
index 00000000000..8ab4421a97d
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePriorityClasses implements PriorityClassInterface
+type FakePriorityClasses struct {
+ Fake *FakeSchedulingV1alpha1
+}
+
+var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "v1alpha1", Resource: "priorityclasses"}
+
+var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1alpha1", Kind: "PriorityClass"}
+
+// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
+func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1alpha1.PriorityClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.PriorityClass), err
+}
+
+// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
+func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1alpha1.PriorityClassList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.PriorityClassList{ListMeta: obj.(*v1alpha1.PriorityClassList).ListMeta}
+ for _, item := range obj.(*v1alpha1.PriorityClassList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested priorityClasses.
+func (c *FakePriorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts))
+}
+
+// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any.
+func (c *FakePriorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.PriorityClass), err
+}
+
+// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
+func (c *FakePriorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1alpha1.PriorityClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.PriorityClass), err
+}
+
+// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
+func (c *FakePriorityClasses) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &v1alpha1.PriorityClass{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.PriorityClassList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched priorityClass.
+func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, data, subresources...), &v1alpha1.PriorityClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.PriorityClass), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go
new file mode 100644
index 00000000000..974ba193f7a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeSchedulingV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeSchedulingV1alpha1) PriorityClasses() v1alpha1.PriorityClassInterface {
+ return &FakePriorityClasses{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeSchedulingV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go
new file mode 100644
index 00000000000..e234fec66c1
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/scheduling/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePriorityClasses implements PriorityClassInterface
+type FakePriorityClasses struct {
+ Fake *FakeSchedulingV1beta1
+}
+
+var priorityclassesResource = schema.GroupVersionResource{Group: "scheduling.k8s.io", Version: "v1beta1", Resource: "priorityclasses"}
+
+var priorityclassesKind = schema.GroupVersionKind{Group: "scheduling.k8s.io", Version: "v1beta1", Kind: "PriorityClass"}
+
+// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
+func (c *FakePriorityClasses) Get(name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(priorityclassesResource, name), &v1beta1.PriorityClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PriorityClass), err
+}
+
+// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
+func (c *FakePriorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(priorityclassesResource, priorityclassesKind, opts), &v1beta1.PriorityClassList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.PriorityClassList{ListMeta: obj.(*v1beta1.PriorityClassList).ListMeta}
+ for _, item := range obj.(*v1beta1.PriorityClassList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested priorityClasses.
+func (c *FakePriorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(priorityclassesResource, opts))
+}
+
+// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any.
+func (c *FakePriorityClasses) Create(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PriorityClass), err
+}
+
+// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
+func (c *FakePriorityClasses) Update(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(priorityclassesResource, priorityClass), &v1beta1.PriorityClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PriorityClass), err
+}
+
+// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
+func (c *FakePriorityClasses) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(priorityclassesResource, name), &v1beta1.PriorityClass{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePriorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(priorityclassesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.PriorityClassList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched priorityClass.
+func (c *FakePriorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(priorityclassesResource, name, data, subresources...), &v1beta1.PriorityClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.PriorityClass), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go
new file mode 100644
index 00000000000..4a6878a45e3
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeSchedulingV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeSchedulingV1beta1) PriorityClasses() v1beta1.PriorityClassInterface {
+ return &FakePriorityClasses{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeSchedulingV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go
new file mode 100644
index 00000000000..90eaccec5a5
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go
@@ -0,0 +1,128 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/api/settings/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePodPresets implements PodPresetInterface
+type FakePodPresets struct {
+ Fake *FakeSettingsV1alpha1
+ ns string
+}
+
+var podpresetsResource = schema.GroupVersionResource{Group: "settings.k8s.io", Version: "v1alpha1", Resource: "podpresets"}
+
+var podpresetsKind = schema.GroupVersionKind{Group: "settings.k8s.io", Version: "v1alpha1", Kind: "PodPreset"}
+
+// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any.
+func (c *FakePodPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.PodPreset), err
+}
+
+// List takes label and field selectors, and returns the list of PodPresets that match those selectors.
+func (c *FakePodPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(podpresetsResource, podpresetsKind, c.ns, opts), &v1alpha1.PodPresetList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.PodPresetList{ListMeta: obj.(*v1alpha1.PodPresetList).ListMeta}
+ for _, item := range obj.(*v1alpha1.PodPresetList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested podPresets.
+func (c *FakePodPresets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(podpresetsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any.
+func (c *FakePodPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.PodPreset), err
+}
+
+// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any.
+func (c *FakePodPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(podpresetsResource, c.ns, podPreset), &v1alpha1.PodPreset{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.PodPreset), err
+}
+
+// Delete takes name of the podPreset and deletes it. Returns an error if one occurs.
+func (c *FakePodPresets) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(podpresetsResource, c.ns, name), &v1alpha1.PodPreset{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePodPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(podpresetsResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.PodPresetList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched podPreset.
+func (c *FakePodPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(podpresetsResource, c.ns, name, data, subresources...), &v1alpha1.PodPreset{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.PodPreset), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go
new file mode 100644
index 00000000000..a142edfed0f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeSettingsV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeSettingsV1alpha1) PodPresets(namespace string) v1alpha1.PodPresetInterface {
+ return &FakePodPresets{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeSettingsV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go
new file mode 100644
index 00000000000..fc6f98cf6ac
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "k8s.io/client-go/kubernetes/typed/storage/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeStorageV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeStorageV1) StorageClasses() v1.StorageClassInterface {
+ return &FakeStorageClasses{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeStorageV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go
new file mode 100644
index 00000000000..37488a2d7ad
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ storagev1 "k8s.io/api/storage/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeStorageClasses implements StorageClassInterface
+type FakeStorageClasses struct {
+ Fake *FakeStorageV1
+}
+
+var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1", Resource: "storageclasses"}
+
+var storageclassesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1", Kind: "StorageClass"}
+
+// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
+func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *storagev1.StorageClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(storageclassesResource, name), &storagev1.StorageClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*storagev1.StorageClass), err
+}
+
+// List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
+func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *storagev1.StorageClassList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &storagev1.StorageClassList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &storagev1.StorageClassList{ListMeta: obj.(*storagev1.StorageClassList).ListMeta}
+ for _, item := range obj.(*storagev1.StorageClassList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested storageClasses.
+func (c *FakeStorageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts))
+}
+
+// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any.
+func (c *FakeStorageClasses) Create(storageClass *storagev1.StorageClass) (result *storagev1.StorageClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &storagev1.StorageClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*storagev1.StorageClass), err
+}
+
+// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
+func (c *FakeStorageClasses) Update(storageClass *storagev1.StorageClass) (result *storagev1.StorageClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &storagev1.StorageClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*storagev1.StorageClass), err
+}
+
+// Delete takes name of the storageClass and deletes it. Returns an error if one occurs.
+func (c *FakeStorageClasses) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &storagev1.StorageClass{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &storagev1.StorageClassList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched storageClass.
+func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *storagev1.StorageClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &storagev1.StorageClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*storagev1.StorageClass), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go
new file mode 100644
index 00000000000..1a4d9f56fc8
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeStorageV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeStorageV1alpha1) VolumeAttachments() v1alpha1.VolumeAttachmentInterface {
+ return &FakeVolumeAttachments{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeStorageV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go
new file mode 100644
index 00000000000..af04b681c4f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go
@@ -0,0 +1,131 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/api/storage/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeVolumeAttachments implements VolumeAttachmentInterface
+type FakeVolumeAttachments struct {
+ Fake *FakeStorageV1alpha1
+}
+
+var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1alpha1", Resource: "volumeattachments"}
+
+var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1alpha1", Kind: "VolumeAttachment"}
+
+// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
+func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.VolumeAttachment), err
+}
+
+// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
+func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1alpha1.VolumeAttachmentList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.VolumeAttachmentList{ListMeta: obj.(*v1alpha1.VolumeAttachmentList).ListMeta}
+ for _, item := range obj.(*v1alpha1.VolumeAttachmentList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested volumeAttachments.
+func (c *FakeVolumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts))
+}
+
+// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *FakeVolumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.VolumeAttachment), err
+}
+
+// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *FakeVolumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1alpha1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.VolumeAttachment), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1alpha1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.VolumeAttachment), err
+}
+
+// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
+func (c *FakeVolumeAttachments) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &v1alpha1.VolumeAttachment{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.VolumeAttachmentList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched volumeAttachment.
+func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, data, subresources...), &v1alpha1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.VolumeAttachment), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go
new file mode 100644
index 00000000000..16f44399065
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go
new file mode 100644
index 00000000000..e0e3f1d78bb
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeStorageV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeStorageV1beta1) StorageClasses() v1beta1.StorageClassInterface {
+ return &FakeStorageClasses{c}
+}
+
+func (c *FakeStorageV1beta1) VolumeAttachments() v1beta1.VolumeAttachmentInterface {
+ return &FakeVolumeAttachments{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeStorageV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go
new file mode 100644
index 00000000000..cbfbab1a35a
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go
@@ -0,0 +1,120 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/storage/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeStorageClasses implements StorageClassInterface
+type FakeStorageClasses struct {
+ Fake *FakeStorageV1beta1
+}
+
+var storageclassesResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "storageclasses"}
+
+var storageclassesKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "StorageClass"}
+
+// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
+func (c *FakeStorageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(storageclassesResource, name), &v1beta1.StorageClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.StorageClass), err
+}
+
+// List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
+func (c *FakeStorageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(storageclassesResource, storageclassesKind, opts), &v1beta1.StorageClassList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.StorageClassList{ListMeta: obj.(*v1beta1.StorageClassList).ListMeta}
+ for _, item := range obj.(*v1beta1.StorageClassList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested storageClasses.
+func (c *FakeStorageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(storageclassesResource, opts))
+}
+
+// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any.
+func (c *FakeStorageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.StorageClass), err
+}
+
+// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
+func (c *FakeStorageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(storageclassesResource, storageClass), &v1beta1.StorageClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.StorageClass), err
+}
+
+// Delete takes name of the storageClass and deletes it. Returns an error if one occurs.
+func (c *FakeStorageClasses) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(storageclassesResource, name), &v1beta1.StorageClass{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeStorageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(storageclassesResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.StorageClassList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched storageClass.
+func (c *FakeStorageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(storageclassesResource, name, data, subresources...), &v1beta1.StorageClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.StorageClass), err
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go
new file mode 100644
index 00000000000..04c0c463adc
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go
@@ -0,0 +1,131 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/storage/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeVolumeAttachments implements VolumeAttachmentInterface
+type FakeVolumeAttachments struct {
+ Fake *FakeStorageV1beta1
+}
+
+var volumeattachmentsResource = schema.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "volumeattachments"}
+
+var volumeattachmentsKind = schema.GroupVersionKind{Group: "storage.k8s.io", Version: "v1beta1", Kind: "VolumeAttachment"}
+
+// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
+func (c *FakeVolumeAttachments) Get(name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.VolumeAttachment), err
+}
+
+// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
+func (c *FakeVolumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(volumeattachmentsResource, volumeattachmentsKind, opts), &v1beta1.VolumeAttachmentList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.VolumeAttachmentList{ListMeta: obj.(*v1beta1.VolumeAttachmentList).ListMeta}
+ for _, item := range obj.(*v1beta1.VolumeAttachmentList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested volumeAttachments.
+func (c *FakeVolumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(volumeattachmentsResource, opts))
+}
+
+// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *FakeVolumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.VolumeAttachment), err
+}
+
+// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
+func (c *FakeVolumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(volumeattachmentsResource, volumeAttachment), &v1beta1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.VolumeAttachment), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeVolumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(volumeattachmentsResource, "status", volumeAttachment), &v1beta1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.VolumeAttachment), err
+}
+
+// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
+func (c *FakeVolumeAttachments) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteAction(volumeattachmentsResource, name), &v1beta1.VolumeAttachment{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeVolumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(volumeattachmentsResource, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.VolumeAttachmentList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched volumeAttachment.
+func (c *FakeVolumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(volumeattachmentsResource, name, data, subresources...), &v1beta1.VolumeAttachment{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.VolumeAttachment), err
+}
From 3114f07954949bfc8271e5bd5511d06797e8bd0f Mon Sep 17 00:00:00 2001
From: Adam Harwayne
Date: Thu, 18 Apr 2019 19:30:27 -0700
Subject: [PATCH 33/76] Broker is not ready until its constituent pieces are
ready. (#1064)
* Broker is not ready until its constituent pieces are ready.
* Fix broker_lifecycle_test.go.
* Fix broker_test.go.
* Increase coverage.
---
.../eventing/v1alpha1/broker_lifecycle.go | 93 +++++++++---
.../v1alpha1/broker_lifecycle_test.go | 85 +++++++++--
pkg/reconciler/v1alpha1/broker/broker.go | 82 +++++------
pkg/reconciler/v1alpha1/broker/broker_test.go | 138 +++++++++---------
4 files changed, 258 insertions(+), 140 deletions(-)
diff --git a/pkg/apis/eventing/v1alpha1/broker_lifecycle.go b/pkg/apis/eventing/v1alpha1/broker_lifecycle.go
index 31ae7140dca..d6fa93499a5 100644
--- a/pkg/apis/eventing/v1alpha1/broker_lifecycle.go
+++ b/pkg/apis/eventing/v1alpha1/broker_lifecycle.go
@@ -16,7 +16,11 @@ limitations under the License.
package v1alpha1
-import duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+import (
+ v1 "k8s.io/api/apps/v1"
+
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+)
var brokerCondSet = duckv1alpha1.NewLivingConditionSet(
BrokerConditionIngress,
@@ -57,44 +61,91 @@ func (bs *BrokerStatus) InitializeConditions() {
brokerCondSet.Manage(bs).InitializeConditions()
}
-func (bs *BrokerStatus) MarkIngressReady() {
- brokerCondSet.Manage(bs).MarkTrue(BrokerConditionIngress)
+func (bs *BrokerStatus) MarkIngressFailed(reason, format string, args ...interface{}) {
+ brokerCondSet.Manage(bs).MarkFalse(BrokerConditionIngress, reason, format, args...)
}
-func (bs *BrokerStatus) MarkIngressFailed(err error) {
- brokerCondSet.Manage(bs).MarkFalse(BrokerConditionIngress, "failed", "%v", err)
+func (bs *BrokerStatus) PropagateIngressDeploymentAvailability(d *v1.Deployment) {
+ if deploymentIsAvailable(&d.Status) {
+ brokerCondSet.Manage(bs).MarkTrue(BrokerConditionIngress)
+ } else {
+ // I don't know how to propagate the status well, so just give the name of the Deployment
+ // for now.
+ bs.MarkIngressFailed("DeploymentUnavailable", "The Deployment '%s' is unavailable.", d.Name)
+ }
}
-func (bs *BrokerStatus) MarkTriggerChannelReady() {
- brokerCondSet.Manage(bs).MarkTrue(BrokerConditionTriggerChannel)
+func (bs *BrokerStatus) MarkTriggerChannelFailed(reason, format string, args ...interface{}) {
+ brokerCondSet.Manage(bs).MarkFalse(BrokerConditionTriggerChannel, reason, format, args...)
}
-func (bs *BrokerStatus) MarkTriggerChannelFailed(err error) {
- brokerCondSet.Manage(bs).MarkFalse(BrokerConditionTriggerChannel, "failed", "%v", err)
+func (bs *BrokerStatus) PropagateTriggerChannelReadiness(cs *ChannelStatus) {
+ if cs.IsReady() {
+ brokerCondSet.Manage(bs).MarkTrue(BrokerConditionTriggerChannel)
+ } else {
+ msg := "nil"
+ if cc := chanCondSet.Manage(cs).GetCondition(ChannelConditionReady); cc != nil {
+ msg = cc.Message
+ }
+ bs.MarkTriggerChannelFailed("ChannelNotReady", "trigger Channel is not ready: %s", msg)
+ }
}
-func (bs *BrokerStatus) MarkIngressChannelReady() {
- brokerCondSet.Manage(bs).MarkTrue(BrokerConditionIngressChannel)
+func (bs *BrokerStatus) MarkIngressChannelFailed(reason, format string, args ...interface{}) {
+ brokerCondSet.Manage(bs).MarkFalse(BrokerConditionIngressChannel, reason, format, args...)
}
-func (bs *BrokerStatus) MarkIngressChannelFailed(err error) {
- brokerCondSet.Manage(bs).MarkFalse(BrokerConditionIngressChannel, "failed", "%v", err)
+func (bs *BrokerStatus) PropagateIngressChannelReadiness(cs *ChannelStatus) {
+ if cs.IsReady() {
+ brokerCondSet.Manage(bs).MarkTrue(BrokerConditionIngressChannel)
+ } else {
+ msg := "nil"
+ if cc := chanCondSet.Manage(cs).GetCondition(ChannelConditionReady); cc != nil {
+ msg = cc.Message
+ }
+ bs.MarkIngressChannelFailed("ChannelNotReady", "ingress Channel is not ready: %s", msg)
+ }
}
-func (bs *BrokerStatus) MarkIngressSubscriptionReady() {
- brokerCondSet.Manage(bs).MarkTrue(BrokerConditionIngressSubscription)
+func (bs *BrokerStatus) MarkIngressSubscriptionFailed(reason, format string, args ...interface{}) {
+ brokerCondSet.Manage(bs).MarkFalse(BrokerConditionIngressSubscription, reason, format, args...)
}
-func (bs *BrokerStatus) MarkIngressSubscriptionFailed(err error) {
- brokerCondSet.Manage(bs).MarkFalse(BrokerConditionIngressSubscription, "failed", "%v", err)
+func (bs *BrokerStatus) PropagateIngressSubscriptionReadiness(ss *SubscriptionStatus) {
+ if ss.IsReady() {
+ brokerCondSet.Manage(bs).MarkTrue(BrokerConditionIngressSubscription)
+ } else {
+ msg := "nil"
+ if sc := subCondSet.Manage(ss).GetCondition(SubscriptionConditionReady); sc != nil {
+ msg = sc.Message
+ }
+ bs.MarkIngressSubscriptionFailed("SubscriptionNotReady", "ingress Subscription is not ready: %s", msg)
+ }
}
-func (bs *BrokerStatus) MarkFilterReady() {
- brokerCondSet.Manage(bs).MarkTrue(BrokerConditionFilter)
+func (bs *BrokerStatus) MarkFilterFailed(reason, format string, args ...interface{}) {
+ brokerCondSet.Manage(bs).MarkFalse(BrokerConditionFilter, reason, format, args...)
}
-func (bs *BrokerStatus) MarkFilterFailed(err error) {
- brokerCondSet.Manage(bs).MarkFalse(BrokerConditionFilter, "failed", "%v", err)
+func (bs *BrokerStatus) PropagateFilterDeploymentAvailability(d *v1.Deployment) {
+ if deploymentIsAvailable(&d.Status) {
+ brokerCondSet.Manage(bs).MarkTrue(BrokerConditionFilter)
+ } else {
+ // I don't know how to propagate the status well, so just give the name of the Deployment
+ // for now.
+ bs.MarkFilterFailed("DeploymentUnavailable", "The Deployment '%s' is unavailable.", d.Name)
+ }
+}
+
+func deploymentIsAvailable(d *v1.DeploymentStatus) bool {
+ // Check if the Deployment is available.
+ for _, cond := range d.Conditions {
+ if cond.Type == v1.DeploymentAvailable {
+ return cond.Status == "True"
+ }
+ }
+ // Unable to find the Available condition, fail open.
+ return true
}
// SetAddress makes this Broker addressable by setting the hostname. It also
diff --git a/pkg/apis/eventing/v1alpha1/broker_lifecycle_test.go b/pkg/apis/eventing/v1alpha1/broker_lifecycle_test.go
index 73fa5663e24..c5826b94659 100644
--- a/pkg/apis/eventing/v1alpha1/broker_lifecycle_test.go
+++ b/pkg/apis/eventing/v1alpha1/broker_lifecycle_test.go
@@ -17,19 +17,17 @@ limitations under the License.
package v1alpha1
import (
- "errors"
"testing"
"github.com/google/go-cmp/cmp"
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
var (
trueVal = true
falseVal = false
-
- err = errors.New("foobar")
)
var (
@@ -331,39 +329,49 @@ func TestBrokerIsReady(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
bs := &BrokerStatus{}
if test.markIngressReady != nil {
+ var d *v1.Deployment
if *test.markIngressReady {
- bs.MarkIngressReady()
+ d = availableDeployment()
} else {
- bs.MarkIngressFailed(err)
+ d = unavailableDeployment()
}
+ bs.PropagateIngressDeploymentAvailability(d)
}
if test.markTriggerChannelReady != nil {
+ var c *ChannelStatus
if *test.markTriggerChannelReady {
- bs.MarkTriggerChannelReady()
+ c = readyChannelStatus()
} else {
- bs.MarkTriggerChannelFailed(err)
+ c = notReadyChannelStatus()
}
+ bs.PropagateTriggerChannelReadiness(c)
}
if test.markIngressChannelReady != nil {
+ var c *ChannelStatus
if *test.markIngressChannelReady {
- bs.MarkIngressChannelReady()
+ c = readyChannelStatus()
} else {
- bs.MarkIngressChannelFailed(err)
+ c = notReadyChannelStatus()
}
+ bs.PropagateIngressChannelReadiness(c)
}
if test.markIngressSubscriptionReady != nil {
+ var sub *SubscriptionStatus
if *test.markIngressSubscriptionReady {
- bs.MarkIngressSubscriptionReady()
+ sub = readySubscriptionStatus()
} else {
- bs.MarkIngressSubscriptionFailed(err)
+ sub = notReadySubscriptionStatus()
}
+ bs.PropagateIngressSubscriptionReadiness(sub)
}
if test.markFilterReady != nil {
+ var d *v1.Deployment
if *test.markFilterReady {
- bs.MarkFilterReady()
+ d = availableDeployment()
} else {
- bs.MarkFilterFailed(err)
+ d = unavailableDeployment()
}
+ bs.PropagateFilterDeploymentAvailability(d)
}
bs.SetAddress(test.address)
@@ -374,3 +382,54 @@ func TestBrokerIsReady(t *testing.T) {
})
}
}
+
+func unavailableDeployment() *v1.Deployment {
+ d := &v1.Deployment{}
+ d.Name = "unavailable"
+ d.Status.Conditions = []v1.DeploymentCondition{
+ {
+ Type: v1.DeploymentAvailable,
+ Status: "False",
+ },
+ }
+ return d
+}
+
+func availableDeployment() *v1.Deployment {
+ d := unavailableDeployment()
+ d.Name = "available"
+ d.Status.Conditions = []v1.DeploymentCondition{
+ {
+ Type: v1.DeploymentAvailable,
+ Status: "True",
+ },
+ }
+ return d
+}
+
+func readyChannelStatus() *ChannelStatus {
+ cs := &ChannelStatus{}
+ cs.MarkProvisionerInstalled()
+ cs.MarkProvisioned()
+ cs.SetAddress("foo")
+ return cs
+}
+
+func notReadyChannelStatus() *ChannelStatus {
+ cs := readyChannelStatus()
+ cs.MarkNotProvisioned("foo", "bar")
+ return cs
+}
+
+func readySubscriptionStatus() *SubscriptionStatus {
+ ss := &SubscriptionStatus{}
+ ss.MarkChannelReady()
+ ss.MarkReferencesResolved()
+ return ss
+}
+
+func notReadySubscriptionStatus() *SubscriptionStatus {
+ ss := &SubscriptionStatus{}
+ ss.MarkReferencesResolved()
+ return ss
+}
diff --git a/pkg/reconciler/v1alpha1/broker/broker.go b/pkg/reconciler/v1alpha1/broker/broker.go
index 6a67152d253..6a1032b5189 100644
--- a/pkg/reconciler/v1alpha1/broker/broker.go
+++ b/pkg/reconciler/v1alpha1/broker/broker.go
@@ -19,7 +19,6 @@ package broker
import (
"context"
"fmt"
- "time"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/logging"
@@ -29,7 +28,6 @@ import (
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
- "k8s.io/apimachinery/pkg/api/errors"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -52,7 +50,8 @@ const (
controllerAgentName = "broker-controller"
// Name of the corev1.Events emitted from the reconciliation process.
- brokerReconciled = "BrokerReconciled"
+ brokerReadinessChanged = "BrokerReadinessChanged"
+ brokerReconcileError = "BrokerReconcileError"
brokerUpdateStatusFailed = "BrokerUpdateStatusFailed"
ingressSubscriptionDeleteFailed = "IngressSubscriptionDeleteFailed"
ingressSubscriptionCreateFailed = "IngressSubscriptionCreateFailed"
@@ -133,7 +132,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
broker := &v1alpha1.Broker{}
err := r.client.Get(ctx, request.NamespacedName, broker)
- if errors.IsNotFound(err) {
+ if k8serrors.IsNotFound(err) {
logging.FromContext(ctx).Info("Could not find Broker")
return reconcile.Result{}, nil
}
@@ -143,16 +142,19 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
return reconcile.Result{}, err
}
+ originalReadiness := broker.Status.IsReady()
+
// Reconcile this copy of the Broker and then write back any status updates regardless of
// whether the reconcile error out.
- result, reconcileErr := r.reconcile(ctx, broker)
+ reconcileErr := r.reconcile(ctx, broker)
if reconcileErr != nil {
logging.FromContext(ctx).Error("Error reconciling Broker", zap.Error(reconcileErr))
- } else if result.Requeue || result.RequeueAfter > 0 {
- logging.FromContext(ctx).Debug("Broker reconcile requeuing")
+ r.recorder.Event(broker, corev1.EventTypeWarning, brokerReconcileError, fmt.Sprintf("Broker reconcile error: %v", reconcileErr))
} else {
logging.FromContext(ctx).Debug("Broker reconciled")
- r.recorder.Event(broker, corev1.EventTypeNormal, brokerReconciled, "Broker reconciled")
+ if originalReadiness != broker.Status.IsReady() {
+ r.recorder.Event(broker, corev1.EventTypeNormal, brokerReadinessChanged, fmt.Sprintf("Broker readiness changed to %v", broker.Status.IsReady()))
+ }
}
if _, err = r.updateStatus(broker); err != nil {
@@ -162,10 +164,10 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
}
// Requeue if the resource is not ready:
- return result, reconcileErr
+ return reconcile.Result{}, reconcileErr
}
-func (r *reconciler) reconcile(ctx context.Context, b *v1alpha1.Broker) (reconcile.Result, error) {
+func (r *reconciler) reconcile(ctx context.Context, b *v1alpha1.Broker) error {
b.Status.InitializeConditions()
// 1. Trigger Channel is created for all events. Triggers will Subscribe to this Channel.
@@ -181,72 +183,70 @@ func (r *reconciler) reconcile(ctx context.Context, b *v1alpha1.Broker) (reconci
if b.DeletionTimestamp != nil {
// Everything is cleaned up by the garbage collector.
- return reconcile.Result{}, nil
+ return nil
}
triggerChan, err := r.reconcileTriggerChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Problem reconciling the trigger channel", zap.Error(err))
- b.Status.MarkTriggerChannelFailed(err)
- return reconcile.Result{}, err
+ b.Status.MarkTriggerChannelFailed("ChannelFailure", "%v", err)
+ return err
} else if triggerChan.Status.Address.Hostname == "" {
- logging.FromContext(ctx).Info("Trigger Channel is not yet ready", zap.Any("triggerChan", triggerChan))
- // Give the Channel some time to get its address. One second was chosen arbitrarily.
- return reconcile.Result{RequeueAfter: time.Second}, nil
+ // We check the trigger Channel's address here because it is needed to create the Ingress
+ // Deployment.
+ logging.FromContext(ctx).Debug("Trigger Channel does not have an address", zap.Any("triggerChan", triggerChan))
+ b.Status.MarkTriggerChannelFailed("NoAddress", "Channel does not have an address.")
+ return nil
}
- b.Status.MarkTriggerChannelReady()
+ b.Status.PropagateTriggerChannelReadiness(&triggerChan.Status)
- _, err = r.reconcileFilterDeployment(ctx, b)
+ filterDeployment, err := r.reconcileFilterDeployment(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Problem reconciling filter Deployment", zap.Error(err))
- b.Status.MarkFilterFailed(err)
- return reconcile.Result{}, err
+ b.Status.MarkFilterFailed("DeploymentFailure", "%v", err)
+ return err
}
_, err = r.reconcileFilterService(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Problem reconciling filter Service", zap.Error(err))
- b.Status.MarkFilterFailed(err)
- return reconcile.Result{}, err
+ b.Status.MarkFilterFailed("ServiceFailure", "%v", err)
+ return err
}
- b.Status.MarkFilterReady()
+ b.Status.PropagateFilterDeploymentAvailability(filterDeployment)
- _, err = r.reconcileIngressDeployment(ctx, b, triggerChan)
+ ingressDeployment, err := r.reconcileIngressDeployment(ctx, b, triggerChan)
if err != nil {
logging.FromContext(ctx).Error("Problem reconciling ingress Deployment", zap.Error(err))
- b.Status.MarkIngressFailed(err)
- return reconcile.Result{}, err
+ b.Status.MarkIngressFailed("DeploymentFailure", "%v", err)
+ return err
}
svc, err := r.reconcileIngressService(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Problem reconciling ingress Service", zap.Error(err))
- b.Status.MarkIngressFailed(err)
- return reconcile.Result{}, err
+ b.Status.MarkIngressFailed("ServiceFailure", "%v", err)
+ return err
}
- b.Status.MarkIngressReady()
+ b.Status.PropagateIngressDeploymentAvailability(ingressDeployment)
b.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace))
ingressChan, err := r.reconcileIngressChannel(ctx, b)
if err != nil {
logging.FromContext(ctx).Error("Problem reconciling the ingress channel", zap.Error(err))
- b.Status.MarkIngressChannelFailed(err)
- return reconcile.Result{}, err
- } else if ingressChan.Status.Address.Hostname == "" {
- logging.FromContext(ctx).Info("Ingress Channel is not yet ready", zap.Any("ingressChan", ingressChan))
- // Give the Channel some time to get its address. One second was chosen arbitrarily.
- return reconcile.Result{RequeueAfter: time.Second}, nil
+ b.Status.MarkIngressChannelFailed("ChannelFailure", "%v", err)
+ return err
}
- b.Status.MarkIngressChannelReady()
+ b.Status.PropagateIngressChannelReadiness(&ingressChan.Status)
- _, err = r.reconcileIngressSubscription(ctx, b, ingressChan, svc)
+ ingressSub, err := r.reconcileIngressSubscription(ctx, b, ingressChan, svc)
if err != nil {
logging.FromContext(ctx).Error("Problem reconciling the ingress subscription", zap.Error(err))
- b.Status.MarkIngressSubscriptionFailed(err)
- return reconcile.Result{}, err
+ b.Status.MarkIngressSubscriptionFailed("SubscriptionFailure", "%v", err)
+ return err
}
- b.Status.MarkIngressSubscriptionReady()
+ b.Status.PropagateIngressSubscriptionReadiness(&ingressSub.Status)
- return reconcile.Result{}, nil
+ return nil
}
// updateStatus may in fact update the broker's finalizers in addition to the status.
diff --git a/pkg/reconciler/v1alpha1/broker/broker_test.go b/pkg/reconciler/v1alpha1/broker/broker_test.go
index af53b9d1568..e5e37f13411 100644
--- a/pkg/reconciler/v1alpha1/broker/broker_test.go
+++ b/pkg/reconciler/v1alpha1/broker/broker_test.go
@@ -22,7 +22,6 @@ import (
"fmt"
"strings"
"testing"
- "time"
"github.com/google/go-cmp/cmp"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
@@ -32,6 +31,7 @@ import (
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -39,7 +39,6 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
@@ -72,7 +71,8 @@ var (
// Map of events to set test cases' expectations easier.
events = map[string]corev1.Event{
- brokerReconciled: {Reason: brokerReconciled, Type: corev1.EventTypeNormal},
+ brokerReadinessChanged: {Reason: brokerReadinessChanged, Type: corev1.EventTypeNormal},
+ brokerReconcileError: {Reason: brokerReconcileError, Type: corev1.EventTypeWarning},
brokerUpdateStatusFailed: {Reason: brokerUpdateStatusFailed, Type: corev1.EventTypeWarning},
ingressSubscriptionDeleteFailed: {Reason: ingressSubscriptionDeleteFailed, Type: corev1.EventTypeWarning},
ingressSubscriptionCreateFailed: {Reason: ingressSubscriptionCreateFailed, Type: corev1.EventTypeWarning},
@@ -146,11 +146,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeDeletingBroker(),
},
- WantEvent: []corev1.Event{
- {
- Reason: brokerReconciled, Type: corev1.EventTypeNormal,
- },
- },
},
{
Name: "Trigger Channel.List error",
@@ -172,6 +167,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error getting Trigger Channel",
},
{
@@ -192,6 +188,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error creating Trigger Channel",
},
{
@@ -208,11 +205,6 @@ func TestReconcile(t *testing.T) {
// GenerateName.
// makeDifferentTriggerChannel(),
},
- WantEvent: []corev1.Event{
- {
- Reason: brokerReconciled, Type: corev1.EventTypeNormal,
- },
- },
},
{
Name: "Trigger Channel is not yet Addressable",
@@ -221,7 +213,6 @@ func TestReconcile(t *testing.T) {
makeBroker(),
makeNonAddressableTriggerChannel(),
},
- WantResult: reconcile.Result{RequeueAfter: time.Second},
},
{
Name: "Filter Deployment.Get error",
@@ -242,6 +233,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error getting filter Deployment",
},
{
@@ -263,6 +255,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error creating filter Deployment",
},
{
@@ -285,6 +278,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error updating filter Deployment",
},
{
@@ -306,6 +300,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error getting filter Service",
},
{
@@ -327,6 +322,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error creating filter Service",
},
{
@@ -349,6 +345,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error updating filter Service",
},
{
@@ -370,6 +367,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error getting ingress Deployment",
},
{
@@ -391,6 +389,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error creating ingress Deployment",
},
{
@@ -413,6 +412,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error updating ingress Deployment",
},
{
@@ -434,6 +434,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error getting ingress Service",
},
{
@@ -455,6 +456,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error creating ingress Service",
},
{
@@ -477,6 +479,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error updating ingress Service",
},
{
@@ -500,6 +503,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error getting Ingress Channel",
},
{
@@ -537,6 +541,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error creating Ingress Channel",
},
{
@@ -573,11 +578,6 @@ func TestReconcile(t *testing.T) {
// GenerateName.
// makeDifferentIngressChannel(),
},
- WantEvent: []corev1.Event{
- {
- Reason: brokerReconciled, Type: corev1.EventTypeNormal,
- },
- },
},
{
Name: "Ingress Channel is not yet Addressable",
@@ -606,7 +606,6 @@ func TestReconcile(t *testing.T) {
},
},
},
- WantResult: reconcile.Result{RequeueAfter: time.Second},
},
{
Name: "Subscription.List error",
@@ -626,6 +625,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error getting Subscription",
},
{
@@ -646,6 +646,7 @@ func TestReconcile(t *testing.T) {
},
},
},
+ WantEvent: []corev1.Event{events[brokerReconcileError]},
WantErrMsg: "test error creating Subscription",
},
{
@@ -663,11 +664,6 @@ func TestReconcile(t *testing.T) {
// GenerateName.
// makeDifferentSubscription(),
},
- WantEvent: []corev1.Event{
- {
- Reason: brokerReconciled, Type: corev1.EventTypeNormal,
- },
- },
},
{
Name: "Subscription.Delete error",
@@ -688,7 +684,7 @@ func TestReconcile(t *testing.T) {
},
},
},
- WantEvent: []corev1.Event{events[ingressSubscriptionDeleteFailed]},
+ WantEvent: []corev1.Event{events[ingressSubscriptionDeleteFailed], events[brokerReconcileError]},
WantErrMsg: "test error deleting Subscription",
},
{
@@ -710,7 +706,7 @@ func TestReconcile(t *testing.T) {
},
},
},
- WantEvent: []corev1.Event{events[ingressSubscriptionCreateFailed]},
+ WantEvent: []corev1.Event{events[ingressSubscriptionCreateFailed], events[brokerReconcileError]},
WantErrMsg: "test error creating Subscription",
},
{
@@ -740,14 +736,7 @@ func TestReconcile(t *testing.T) {
},
},
WantErrMsg: "test error getting the Broker for status update",
- WantEvent: []corev1.Event{
- {
- Reason: brokerReconciled, Type: corev1.EventTypeNormal,
- },
- {
- Reason: brokerUpdateStatusFailed, Type: corev1.EventTypeWarning,
- },
- },
+ WantEvent: []corev1.Event{events[brokerUpdateStatusFailed]},
},
{
Name: "Broker.Status.Update error",
@@ -768,14 +757,7 @@ func TestReconcile(t *testing.T) {
},
},
WantErrMsg: "test error updating the Broker status",
- WantEvent: []corev1.Event{
- {
- Reason: brokerReconciled, Type: corev1.EventTypeNormal,
- },
- {
- Reason: brokerUpdateStatusFailed, Type: corev1.EventTypeWarning,
- },
- },
+ WantEvent: []corev1.Event{events[brokerUpdateStatusFailed]},
},
{
Name: "Successful reconcile",
@@ -785,6 +767,7 @@ func TestReconcile(t *testing.T) {
// The Channel needs to be addressable for the reconcile to succeed.
makeTriggerChannel(),
makeIngressChannel(),
+ makeTestSubscription(),
},
Mocks: controllertesting.Mocks{
MockLists: []controllertesting.MockList{
@@ -815,13 +798,10 @@ func TestReconcile(t *testing.T) {
makeIngressService(),
// TODO Uncomment makeIngressChannel() when our test framework handles generateName.
// makeIngressChannel(),
- // Because the
makeTestSubscription(),
},
WantEvent: []corev1.Event{
- {
- Reason: brokerReconciled, Type: corev1.EventTypeNormal,
- },
+ events[brokerReadinessChanged],
},
},
}
@@ -866,12 +846,12 @@ func makeBroker() *v1alpha1.Broker {
func makeReadyBroker() *v1alpha1.Broker {
b := makeBroker()
b.Status.InitializeConditions()
- b.Status.MarkIngressReady()
- b.Status.MarkTriggerChannelReady()
- b.Status.MarkIngressChannelReady()
- b.Status.MarkFilterReady()
+ b.Status.PropagateIngressDeploymentAvailability(makeAvailableDeployment())
+ b.Status.PropagateTriggerChannelReadiness(makeReadyChannelStatus())
+ b.Status.PropagateIngressChannelReadiness(makeReadyChannelStatus())
+ b.Status.PropagateFilterDeploymentAvailability(makeAvailableDeployment())
b.Status.SetAddress(fmt.Sprintf("%s-broker.%s.svc.%s", brokerName, testNS, utils.GetClusterDomainName()))
- b.Status.MarkIngressSubscriptionReady()
+ b.Status.PropagateIngressSubscriptionReadiness(makeReadySubscriptionStatus())
return b
}
@@ -882,7 +862,7 @@ func makeDeletingBroker() *v1alpha1.Broker {
}
func makeTriggerChannel() *v1alpha1.Channel {
- return &v1alpha1.Channel{
+ c := &v1alpha1.Channel{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNS,
GenerateName: fmt.Sprintf("%s-broker-", brokerName),
@@ -897,12 +877,11 @@ func makeTriggerChannel() *v1alpha1.Channel {
Spec: v1alpha1.ChannelSpec{
Provisioner: channelProvisioner,
},
- Status: v1alpha1.ChannelStatus{
- Address: duckv1alpha1.Addressable{
- Hostname: triggerChannelHostname,
- },
- },
}
+ c.Status.MarkProvisionerInstalled()
+ c.Status.MarkProvisioned()
+ c.Status.SetAddress(triggerChannelHostname)
+ return c
}
func makeNonAddressableTriggerChannel() *v1alpha1.Channel {
@@ -918,7 +897,7 @@ func makeDifferentTriggerChannel() *v1alpha1.Channel {
}
func makeIngressChannel() *v1alpha1.Channel {
- return &v1alpha1.Channel{
+ c := &v1alpha1.Channel{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNS,
GenerateName: fmt.Sprintf("%s-broker-ingress-", brokerName),
@@ -936,12 +915,11 @@ func makeIngressChannel() *v1alpha1.Channel {
Spec: v1alpha1.ChannelSpec{
Provisioner: channelProvisioner,
},
- Status: v1alpha1.ChannelStatus{
- Address: duckv1alpha1.Addressable{
- Hostname: ingressChannelHostname,
- },
- },
}
+ c.Status.MarkProvisionerInstalled()
+ c.Status.MarkProvisioned()
+ c.Status.SetAddress(ingressChannelHostname)
+ return c
}
func makeNonAddressableIngressChannel() *v1alpha1.Channel {
@@ -1026,7 +1004,7 @@ func makeDifferentIngressService() *corev1.Service {
}
func makeTestSubscription() *v1alpha1.Subscription {
- return &v1alpha1.Subscription{
+ s := &v1alpha1.Subscription{
TypeMeta: metav1.TypeMeta{
APIVersion: "eventing.knative.dev/v1alpha1",
Kind: "Subscription",
@@ -1057,6 +1035,9 @@ func makeTestSubscription() *v1alpha1.Subscription {
},
},
}
+ s.Status.MarkChannelReady()
+ s.Status.MarkReferencesResolved()
+ return s
}
func makeDifferentSubscription() *v1alpha1.Subscription {
@@ -1076,3 +1057,30 @@ func getOwnerReference() metav1.OwnerReference {
BlockOwnerDeletion: &trueVal,
}
}
+
+func makeAvailableDeployment() *v1.Deployment {
+ d := &v1.Deployment{}
+ d.Name = "deployment-name"
+ d.Status.Conditions = []v1.DeploymentCondition{
+ {
+ Type: v1.DeploymentAvailable,
+ Status: "True",
+ },
+ }
+ return d
+}
+
+func makeReadyChannelStatus() *v1alpha1.ChannelStatus {
+ cs := &v1alpha1.ChannelStatus{}
+ cs.MarkProvisionerInstalled()
+ cs.MarkProvisioned()
+ cs.SetAddress("foo")
+ return cs
+}
+
+func makeReadySubscriptionStatus() *v1alpha1.SubscriptionStatus {
+ ss := &v1alpha1.SubscriptionStatus{}
+ ss.MarkChannelReady()
+ ss.MarkReferencesResolved()
+ return ss
+}
From ba92d78028c73690f05c2ff29b82108223ea8a0f Mon Sep 17 00:00:00 2001
From: Chi Zhang
Date: Fri, 19 Apr 2019 11:38:26 -0700
Subject: [PATCH 34/76] Add e2e testing support for gcp-pubsub CCP (#1066)
* add e2e testing for gcp pubsub CCP
* change the variable to local
* 2019, not 2018
* used constants as the CCP name
* delete ccps after tests finish
* add new line for all copyright comments under test...
* shell is not happy with space
* solve all issues except running gcppubsub channel locally
* add debug info for a test case that usually fails
* enable test run on local
* Update test/e2e-tests.sh
Co-Authored-By: Fredy-Z
* Update test/e2e-tests.sh
Co-Authored-By: Fredy-Z
* Update test/e2e-tests.sh
Co-Authored-By: Fredy-Z
* Update test/e2e-tests.sh
Co-Authored-By: Fredy-Z
* Update test/e2e-tests.sh
Co-Authored-By: Fredy-Z
* Update test/e2e-tests.sh
Co-Authored-By: Fredy-Z
* Update test/e2e-tests.sh
Co-Authored-By: Fredy-Z
* Update test/e2e-tests.sh
Co-Authored-By: Fredy-Z
* Update test/e2e-tests.sh
Co-Authored-By: Fredy-Z
* fix CR issues
* remove the test for default
* fix codereview issues
* set E2E_PROJECT_ID globally in this file
---
test/builders.go | 5 +-
test/cleanup.go | 1 +
test/clients.go | 1 +
test/constants.go | 46 +++++++++++++
test/crd.go | 1 +
test/crd_checks.go | 1 +
test/e2e-tests.sh | 88 ++++++++++++++++++++----
test/e2e/broker_trigger_test.go | 1 +
test/e2e/channel_chain_test.go | 1 +
test/e2e/e2e.go | 24 ++++++-
test/e2e/event_transformation_test.go | 8 ++-
test/e2e/main_test.go | 17 ++++-
test/e2e/single_event_test.go | 1 +
test/e2e_flags.go | 32 +++------
test/states.go | 1 +
test/test_images/logevents/main.go | 3 +-
test/test_images/sendevent/main.go | 2 +-
test/test_images/transformevents/main.go | 3 +-
18 files changed, 194 insertions(+), 42 deletions(-)
create mode 100644 test/constants.go
diff --git a/test/builders.go b/test/builders.go
index 4ec5e6bcc56..6b48b2f6f1e 100644
--- a/test/builders.go
+++ b/test/builders.go
@@ -1,9 +1,12 @@
/*
Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/test/cleanup.go b/test/cleanup.go
index f63ba43c873..1c71601c3ea 100644
--- a/test/cleanup.go
+++ b/test/cleanup.go
@@ -1,5 +1,6 @@
/*
Copyright 2018 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
diff --git a/test/clients.go b/test/clients.go
index 5ef37beec56..efec4072888 100644
--- a/test/clients.go
+++ b/test/clients.go
@@ -1,5 +1,6 @@
/*
Copyright 2018 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
diff --git a/test/constants.go b/test/constants.go
new file mode 100644
index 00000000000..c5b68f78289
--- /dev/null
+++ b/test/constants.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package test
+
+const (
+ // DefaultBrokerName is the name of the Broker that is automatically created after the current namespace is labeled.
+ DefaultBrokerName = "default"
+ // DefaultClusterChannelProvisioner is the default ClusterChannelProvisioner we will run tests against.
+ DefaultClusterChannelProvisioner = InMemoryProvisioner
+
+ // InMemoryChannelProvisioner is the in-memory-channel provisioner.
+ // It will be delete in 0.7, see https://github.com/knative/eventing/pull/1062 for more info.
+ InMemoryChannelProvisioner = "in-memory-channel"
+
+ // InMemoryProvisioner is the in-memory provisioner, which is also the default one.
+ InMemoryProvisioner = "in-memory"
+ // GCPPubSubProvisioner is the gcp-pubsub provisioner, which is under contrib/gcppubsub.
+ GCPPubSubProvisioner = "gcp-pubsub"
+ // KafkaProvisioner is the kafka provisioner, which is under contrib/kafka.
+ KafkaProvisioner = "kafka"
+ // NatssProvisioner is the natss provisioner, which is under contrib/natss
+ NatssProvisioner = "natss"
+)
+
+// validProvisioners is a list of provisioners that Eventing currently support.
+var validProvisioners = []string{
+ InMemoryChannelProvisioner,
+ InMemoryProvisioner,
+ GCPPubSubProvisioner,
+ KafkaProvisioner,
+ NatssProvisioner,
+}
diff --git a/test/crd.go b/test/crd.go
index 5fc3816e109..046532f5f7b 100644
--- a/test/crd.go
+++ b/test/crd.go
@@ -1,5 +1,6 @@
/*
Copyright 2018 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
diff --git a/test/crd_checks.go b/test/crd_checks.go
index 4d0aa903c68..d165151b783 100644
--- a/test/crd_checks.go
+++ b/test/crd_checks.go
@@ -1,5 +1,6 @@
/*
Copyright 2018 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh
index b0d2b0f86b7..6536d7b511a 100755
--- a/test/e2e-tests.sh
+++ b/test/e2e-tests.sh
@@ -28,42 +28,104 @@ source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/e2e-tests.s
# Helper functions.
-# Setup the Knative environment for running tests
+readonly EVENTING_CONFIG="config/"
+
+readonly IN_MEMORY_CHANNEL_CONFIG="config/provisioners/in-memory-channel/in-memory-channel.yaml"
+
+# GCP PubSub config template.
+readonly GCP_PUBSUB_CONFIG_TEMPLATE="contrib/gcppubsub/config/gcppubsub.yaml"
+# Real GCP PubSub config, generated from the template.
+readonly GCP_PUBSUB_CONFIG="$(mktemp)"
+
+# TODO(Fredy-Z): delete this flag after https://github.com/knative/test-infra/pull/692 is merged and updated
+E2E_PROJECT_ID=""
+
+# Constants used for creating ServiceAccount for GCP PubSub provisioner setup if it's not running on Prow.
+readonly PUBSUB_SERVICE_ACCOUNT="eventing_pubsub_test"
+readonly PUBSUB_SERVICE_ACCOUNT_KEY="$(mktemp)"
+readonly PUBSUB_SECRET_NAME="gcppubsub-channel-key"
+
+# Setup the Knative environment for running tests.
function knative_setup() {
- # Install the latest stable Knative/serving in the current cluster
+ # Install the latest stable Knative/serving in the current cluster.
start_latest_knative_serving || return 1
- # Install the latest Knative/eventing in the current cluster
+ # Install the latest Knative/eventing in the current cluster.
echo ">> Starting Knative Eventing"
echo "Installing Knative Eventing"
- ko apply -f config/ || return 1
+ ko apply -f ${EVENTING_CONFIG} || return 1
wait_until_pods_running knative-eventing || fail_test "Knative Eventing did not come up"
echo "Installing In-Memory ClusterChannelProvisioner"
- ko apply -f config/provisioners/in-memory-channel/in-memory-channel.yaml || return 1
+ ko apply -f ${IN_MEMORY_CHANNEL_CONFIG} || return 1
wait_until_pods_running knative-eventing || fail_test "Failed to install the In-Memory ClusterChannelProvisioner"
+
+ E2E_PROJECT_ID="$(gcloud config get-value project)"
+ echo "Installing GCPPubSub ClusterChannelProvisioner"
+ gcppubsub_setup
+ sed "s/REPLACE_WITH_GCP_PROJECT/${E2E_PROJECT_ID}/" ${GCP_PUBSUB_CONFIG_TEMPLATE} > ${GCP_PUBSUB_CONFIG}
+ ko apply -f ${GCP_PUBSUB_CONFIG}
+ wait_until_pods_running knative-eventing || fail_test "Failed to install the GCPPubSub ClusterChannelProvisioner"
}
+# Teardown the Knative environment after tests finish.
function knative_teardown() {
echo ">> Stopping Knative Eventing"
echo "Uninstalling Knative Eventing"
- ko delete --ignore-not-found=true --now --timeout 60s -f config/
+ ko delete --ignore-not-found=true --now --timeout 60s -f ${EVENTING_CONFIG}
- wait_until_object_does_not_exist namespaces knative-eventing
+ echo "Uninstalling In-Memory ClusterChannelProvisioner"
+ ko delete --ignore-not-found=true --now --timeout 60s -f ${IN_MEMORY_CHANNEL_CONFIG}
- wait_until_object_does_not_exist customresourcedefinitions subscriptions.eventing.knative.dev
- wait_until_object_does_not_exist customresourcedefinitions channels.eventing.knative.dev
+ echo "Uninstalling GCPPubSub ClusterChannelProvisioner"
+ gcppubsub_teardown
+ ko delete --ignore-not-found=true --now --timeout 60s -f ${GCP_PUBSUB_CONFIG}
+
+ wait_until_object_does_not_exist namespaces knative-eventing
}
-# Setup resources common to all eventing tests
+# Setup resources common to all eventing tests.
function test_setup() {
- # Publish test images
+ # Publish test images.
echo ">> Publishing test images"
$(dirname $0)/upload-test-images.sh e2e || fail_test "Error uploading test images"
}
+# Create resources required for GCP PubSub provisioner setup
+function gcppubsub_setup() {
+ local service_account_key="${GOOGLE_APPLICATION_CREDENTIALS}"
+ # When not running on Prow we need to set up a service account for PubSub
+ if (( ! IS_PROW )); then
+ echo "Set up ServiceAccount for GCP PubSub provisioner"
+ gcloud services enable pubsub.googleapis.com
+ gcloud iam service-accounts create ${PUBSUB_SERVICE_ACCOUNT}
+ gcloud projects add-iam-policy-binding ${E2E_PROJECT_ID} \
+ --member=serviceAccount:${PUBSUB_SERVICE_ACCOUNT}@${E2E_PROJECT_ID}.iam.gserviceaccount.com \
+ --role roles/pubsub.editor
+ gcloud iam service-accounts keys create ${PUBSUB_SERVICE_ACCOUNT_KEY} \
+ --iam-account=${PUBSUB_SERVICE_ACCOUNT}@${E2E_PROJECT_ID}.iam.gserviceaccount.com
+ service_account_key="${PUBSUB_SERVICE_ACCOUNT_KEY}"
+ fi
+ kubectl -n knative-eventing create secret generic ${PUBSUB_SECRET_NAME} --from-file=key.json=${service_account_key}
+}
+
+# Delete resources that were used for GCP PubSub provisioner setup
+function gcppubsub_teardown() {
+ # When not running on Prow we need to delete the service account created for PubSub
+ if (( ! IS_PROW )); then
+ echo "Tear down ServiceAccount for GCP PubSub provisioner"
+ gcloud iam service-accounts keys delete -q ${PUBSUB_SERVICE_ACCOUNT_KEY} \
+ --iam-account=${PUBSUB_SERVICE_ACCOUNT}@${E2E_PROJECT_ID}.iam.gserviceaccount.com
+ gcloud projects remove-iam-policy-binding ${E2E_PROJECT_ID} \
+ --member=serviceAccount:${PUBSUB_SERVICE_ACCOUNT}@${E2E_PROJECT_ID}.iam.gserviceaccount.com \
+ --role roles/pubsub.editor
+ gcloud iam service-accounts delete -q ${PUBSUB_SERVICE_ACCOUNT}@${E2E_PROJECT_ID}.iam.gserviceaccount.com
+ fi
+ kubectl -n knative-eventing delete secret ${PUBSUB_SECRET_NAME}
+}
+
function dump_extra_cluster_state() {
- # Collecting logs from all knative's eventing pods
+ # Collecting logs from all knative's eventing pods.
echo "============================================================"
for namespace in "knative-eventing" "e2etestfn3"; do
for pod in $(kubectl get pod -n $namespace | grep Running | awk '{print $1}' ); do
@@ -83,6 +145,6 @@ function dump_extra_cluster_state() {
initialize $@
-go_test_e2e -timeout=20m ./test/e2e -run ^TestMain$ -runFromMain=true -clusterChannelProvisioners=in-memory-channel || fail_test
+go_test_e2e -timeout=20m ./test/e2e -run ^TestMain$ -runFromMain=true -clusterChannelProvisioners=in-memory-channel,in-memory,gcp-pubsub || fail_test
success
diff --git a/test/e2e/broker_trigger_test.go b/test/e2e/broker_trigger_test.go
index ea9c67ec502..1ced4b166d6 100644
--- a/test/e2e/broker_trigger_test.go
+++ b/test/e2e/broker_trigger_test.go
@@ -2,6 +2,7 @@
/*
Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
diff --git a/test/e2e/channel_chain_test.go b/test/e2e/channel_chain_test.go
index cfa43f4e1e2..c914f987c61 100644
--- a/test/e2e/channel_chain_test.go
+++ b/test/e2e/channel_chain_test.go
@@ -2,6 +2,7 @@
/*
Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go
index 665d13d5d62..53cbf7f7db1 100644
--- a/test/e2e/e2e.go
+++ b/test/e2e/e2e.go
@@ -1,5 +1,6 @@
/*
Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
@@ -21,6 +22,7 @@ import (
"sync"
"testing"
"time"
+ "unicode"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/test"
@@ -82,7 +84,7 @@ func Setup(t *testing.T, runInParallel bool, logf logging.FormatLogger) (*test.C
// Create a new namespace to run this test case.
// Combine the test name and CCP to avoid duplication.
baseFuncName := GetBaseFuncName(t.Name())
- ns := strings.ToLower(baseFuncName) + "-" + ccpToTest
+ ns := makeK8sNamePrefix(baseFuncName)
CreateNamespaceIfNeeded(t, clients, ns, t.Logf)
// Run the test case in parallel if needed.
@@ -93,6 +95,26 @@ func Setup(t *testing.T, runInParallel bool, logf logging.FormatLogger) (*test.C
return clients, ns, ccpToTest, cleaner
}
+// TODO(Fredy-Z): Borrowed this function from Knative/Serving, will delete it after we move it to Knative/pkg/test.
+// makeK8sNamePrefix converts each chunk of non-alphanumeric character into a single dash
+// and also convert camelcase tokens into dash-delimited lowercase tokens.
+func makeK8sNamePrefix(s string) string {
+ var sb strings.Builder
+ newToken := false
+ for _, c := range s {
+ if !(unicode.IsLetter(c) || unicode.IsNumber(c)) {
+ newToken = true
+ continue
+ }
+ if sb.Len() > 0 && (newToken || unicode.IsUpper(c)) {
+ sb.WriteRune('-')
+ }
+ sb.WriteRune(unicode.ToLower(c))
+ newToken = false
+ }
+ return sb.String()
+}
+
// GetBaseFuncName returns the baseFuncName parsed from the fullFuncName.
// eg. test/e2e.TestMain will return TestMain.
// TODO(Fredy-Z): many functions in this file can be moved to knative/pkg/test to make it cleaner.
diff --git a/test/e2e/event_transformation_test.go b/test/e2e/event_transformation_test.go
index 6c7b0e7599d..b3ce832873f 100644
--- a/test/e2e/event_transformation_test.go
+++ b/test/e2e/event_transformation_test.go
@@ -2,6 +2,7 @@
/*
Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
@@ -116,7 +117,12 @@ func TestEventTransformation(t *testing.T) {
// check if the logging service receives the correct number of event messages
expectedContent := body + msgPostfix
expectedContentCount := len(subscriptionNames1) * len(subscriptionNames2)
- if err := WaitForLogContentCount(clients, loggerPod.Name, loggerPod.Spec.Containers[0].Name, ns, expectedContent, expectedContentCount); err != nil {
+ podName := loggerPod.Name
+ containerName := loggerPod.Spec.Containers[0].Name
+ if err := WaitForLogContentCount(clients, podName, containerName, ns, expectedContent, expectedContentCount); err != nil {
+ if logs, err := clients.Kube.PodLogs(podName, containerName, ns); err != nil {
+ t.Logf("Log content: %s\n", string(logs))
+ }
t.Fatalf("String %q does not appear %d times in logs of logger pod %q: %v", expectedContent, expectedContentCount, loggerPod.Name, err)
}
}
diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go
index 3a0f513616f..69d7cdacc8e 100644
--- a/test/e2e/main_test.go
+++ b/test/e2e/main_test.go
@@ -2,6 +2,7 @@
/*
Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
@@ -27,13 +28,25 @@ import (
// channelTestMap indicates which test cases we want to run for a given CCP.
var channelTestMap = map[string][]func(t *testing.T){
- "in-memory-channel": []func(t *testing.T){
+ test.InMemoryProvisioner: []func(t *testing.T){
TestSingleBinaryEvent,
TestSingleStructuredEvent,
TestEventTransformation,
TestChannelChain,
TestDefaultBrokerWithManyTriggers,
},
+ test.InMemoryChannelProvisioner: []func(t *testing.T){
+ TestSingleBinaryEvent,
+ TestSingleStructuredEvent,
+ TestEventTransformation,
+ TestChannelChain,
+ },
+ test.GCPPubSubProvisioner: []func(t *testing.T){
+ TestSingleBinaryEvent,
+ TestSingleStructuredEvent,
+ TestEventTransformation,
+ TestChannelChain,
+ },
}
func TestMain(t *testing.T) {
@@ -50,7 +63,7 @@ func TestMain(t *testing.T) {
funcName := runtime.FuncForPC(reflect.ValueOf(testFunc).Pointer()).Name()
baseFuncName := GetBaseFuncName(funcName)
t.Logf("Running %q with %q ClusterChannelProvisioner", baseFuncName, provisioner)
- t.Run(baseFuncName, testFunc)
+ t.Run(baseFuncName+"-"+provisioner, testFunc)
}
}
}
diff --git a/test/e2e/single_event_test.go b/test/e2e/single_event_test.go
index 5303baf37a9..ddc80207076 100644
--- a/test/e2e/single_event_test.go
+++ b/test/e2e/single_event_test.go
@@ -2,6 +2,7 @@
/*
Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
diff --git a/test/e2e_flags.go b/test/e2e_flags.go
index 78597a42d3b..7614518a52a 100644
--- a/test/e2e_flags.go
+++ b/test/e2e_flags.go
@@ -1,5 +1,6 @@
/*
Copyright 2018 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
@@ -29,29 +30,8 @@ import (
testLogging "github.com/knative/pkg/test/logging"
)
-const (
- // E2ETestNamespacePrefix is the namespace prefix used for running all e2e tests.
- E2ETestNamespacePrefix = "e2e-ns"
- // DefaultClusterChannelProvisioner is the default ClusterChannelProvisioner we will run tests against.
- DefaultClusterChannelProvisioner = "in-memory-channel"
- // DefaultBrokerName is the name of the Broker that is automatically created after the current namespace is labeled.
- DefaultBrokerName = "default"
-)
-
var logger = logging.FromContext(context.Background()).Named("eventing-e2e-testing")
-// validProvisioners is a list of provisioners that Eventing currently support.
-var validProvisioners = []string{DefaultClusterChannelProvisioner}
-
-func isValid(provisioner string) bool {
- for i := range validProvisioners {
- if provisioner == validProvisioners[i] {
- return true
- }
- }
- return false
-}
-
// EventingFlags holds the command line flags specific to knative/eventing.
var EventingFlags = initializeEventingFlags()
@@ -76,6 +56,16 @@ func (ps *Provisioners) Set(value string) error {
return nil
}
+// Check if the provisioner is a valid one.
+func isValid(provisioner string) bool {
+ for i := range validProvisioners {
+ if provisioner == validProvisioners[i] {
+ return true
+ }
+ }
+ return false
+}
+
// EventingEnvironmentFlags holds the e2e flags needed only by the eventing repo.
type EventingEnvironmentFlags struct {
Provisioners
diff --git a/test/states.go b/test/states.go
index 1ff898b05f4..a0466e5dc18 100644
--- a/test/states.go
+++ b/test/states.go
@@ -1,5 +1,6 @@
/*
Copyright 2018 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
diff --git a/test/test_images/logevents/main.go b/test/test_images/logevents/main.go
index 476edb2d9d8..f0a3a64253d 100644
--- a/test/test_images/logevents/main.go
+++ b/test/test_images/logevents/main.go
@@ -1,5 +1,6 @@
/*
Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
@@ -19,7 +20,7 @@ import (
"context"
"log"
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
)
func handler(event cloudevents.Event) {
diff --git a/test/test_images/sendevent/main.go b/test/test_images/sendevent/main.go
index 0b384392b38..7650afd6290 100644
--- a/test/test_images/sendevent/main.go
+++ b/test/test_images/sendevent/main.go
@@ -26,7 +26,7 @@ import (
"strconv"
"time"
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
)
diff --git a/test/test_images/transformevents/main.go b/test/test_images/transformevents/main.go
index 1b14a185a69..88e8db3ae6f 100644
--- a/test/test_images/transformevents/main.go
+++ b/test/test_images/transformevents/main.go
@@ -1,5 +1,6 @@
/*
Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
@@ -21,7 +22,7 @@ import (
"fmt"
"log"
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
)
type example struct {
From ca196e7b419a5613f0a4e494d5d5a7f7db83cc72 Mon Sep 17 00:00:00 2001
From: Adam Harwayne
Date: Fri, 19 Apr 2019 13:53:27 -0700
Subject: [PATCH 35/76] Trigger is not ready until its constituent pieces are
ready. (#1071)
* Broker is not ready until its constituent pieces are ready.
* Fix broker_lifecycle_test.go.
* Fix broker_test.go.
* Increase coverage.
* Propagate readiness from the Trigger's constituent pieces to the Trigger's readiness.
* Fix unit tests.
* Fix unit tests.
* Remove extra newline.
---
.../v1alpha1/broker_lifecycle_test.go | 71 ++------------
pkg/apis/eventing/v1alpha1/test_helper.go | 97 +++++++++++++++++++
.../eventing/v1alpha1/trigger_lifecycle.go | 35 +++++--
.../v1alpha1/trigger_lifecycle_test.go | 20 ++--
pkg/reconciler/v1alpha1/broker/broker_test.go | 35 +------
pkg/reconciler/v1alpha1/trigger/trigger.go | 10 +-
.../v1alpha1/trigger/trigger_test.go | 24 +++--
7 files changed, 166 insertions(+), 126 deletions(-)
create mode 100644 pkg/apis/eventing/v1alpha1/test_helper.go
diff --git a/pkg/apis/eventing/v1alpha1/broker_lifecycle_test.go b/pkg/apis/eventing/v1alpha1/broker_lifecycle_test.go
index c5826b94659..0b2d61768ba 100644
--- a/pkg/apis/eventing/v1alpha1/broker_lifecycle_test.go
+++ b/pkg/apis/eventing/v1alpha1/broker_lifecycle_test.go
@@ -331,45 +331,45 @@ func TestBrokerIsReady(t *testing.T) {
if test.markIngressReady != nil {
var d *v1.Deployment
if *test.markIngressReady {
- d = availableDeployment()
+ d = TestHelper.AvailableDeployment()
} else {
- d = unavailableDeployment()
+ d = TestHelper.UnavailableDeployment()
}
bs.PropagateIngressDeploymentAvailability(d)
}
if test.markTriggerChannelReady != nil {
var c *ChannelStatus
if *test.markTriggerChannelReady {
- c = readyChannelStatus()
+ c = TestHelper.ReadyChannelStatus()
} else {
- c = notReadyChannelStatus()
+ c = TestHelper.NotReadyChannelStatus()
}
bs.PropagateTriggerChannelReadiness(c)
}
if test.markIngressChannelReady != nil {
var c *ChannelStatus
if *test.markIngressChannelReady {
- c = readyChannelStatus()
+ c = TestHelper.ReadyChannelStatus()
} else {
- c = notReadyChannelStatus()
+ c = TestHelper.NotReadyChannelStatus()
}
bs.PropagateIngressChannelReadiness(c)
}
if test.markIngressSubscriptionReady != nil {
var sub *SubscriptionStatus
if *test.markIngressSubscriptionReady {
- sub = readySubscriptionStatus()
+ sub = TestHelper.ReadySubscriptionStatus()
} else {
- sub = notReadySubscriptionStatus()
+ sub = TestHelper.NotReadySubscriptionStatus()
}
bs.PropagateIngressSubscriptionReadiness(sub)
}
if test.markFilterReady != nil {
var d *v1.Deployment
if *test.markFilterReady {
- d = availableDeployment()
+ d = TestHelper.AvailableDeployment()
} else {
- d = unavailableDeployment()
+ d = TestHelper.UnavailableDeployment()
}
bs.PropagateFilterDeploymentAvailability(d)
}
@@ -382,54 +382,3 @@ func TestBrokerIsReady(t *testing.T) {
})
}
}
-
-func unavailableDeployment() *v1.Deployment {
- d := &v1.Deployment{}
- d.Name = "unavailable"
- d.Status.Conditions = []v1.DeploymentCondition{
- {
- Type: v1.DeploymentAvailable,
- Status: "False",
- },
- }
- return d
-}
-
-func availableDeployment() *v1.Deployment {
- d := unavailableDeployment()
- d.Name = "available"
- d.Status.Conditions = []v1.DeploymentCondition{
- {
- Type: v1.DeploymentAvailable,
- Status: "True",
- },
- }
- return d
-}
-
-func readyChannelStatus() *ChannelStatus {
- cs := &ChannelStatus{}
- cs.MarkProvisionerInstalled()
- cs.MarkProvisioned()
- cs.SetAddress("foo")
- return cs
-}
-
-func notReadyChannelStatus() *ChannelStatus {
- cs := readyChannelStatus()
- cs.MarkNotProvisioned("foo", "bar")
- return cs
-}
-
-func readySubscriptionStatus() *SubscriptionStatus {
- ss := &SubscriptionStatus{}
- ss.MarkChannelReady()
- ss.MarkReferencesResolved()
- return ss
-}
-
-func notReadySubscriptionStatus() *SubscriptionStatus {
- ss := &SubscriptionStatus{}
- ss.MarkReferencesResolved()
- return ss
-}
diff --git a/pkg/apis/eventing/v1alpha1/test_helper.go b/pkg/apis/eventing/v1alpha1/test_helper.go
new file mode 100644
index 00000000000..3cbbea8716d
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/test_helper.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/api/apps/v1"
+)
+
+type testHelper struct{}
+
+// TestHelper contains helpers for unit tests.
+var TestHelper = testHelper{}
+
+func (testHelper) ReadyChannelStatus() *ChannelStatus {
+ cs := &ChannelStatus{}
+ cs.MarkProvisionerInstalled()
+ cs.MarkProvisioned()
+ cs.SetAddress("foo")
+ return cs
+}
+
+func (t testHelper) NotReadyChannelStatus() *ChannelStatus {
+ cs := t.ReadyChannelStatus()
+ cs.MarkNotProvisioned("foo", "bar")
+ return cs
+}
+
+func (testHelper) ReadySubscriptionStatus() *SubscriptionStatus {
+ ss := &SubscriptionStatus{}
+ ss.MarkChannelReady()
+ ss.MarkReferencesResolved()
+ return ss
+}
+
+func (testHelper) NotReadySubscriptionStatus() *SubscriptionStatus {
+ ss := &SubscriptionStatus{}
+ ss.MarkReferencesResolved()
+ return ss
+}
+
+func (t testHelper) ReadyBrokerStatus() *BrokerStatus {
+ bs := &BrokerStatus{}
+ bs.PropagateIngressDeploymentAvailability(t.AvailableDeployment())
+ bs.PropagateIngressChannelReadiness(t.ReadyChannelStatus())
+ bs.PropagateTriggerChannelReadiness(t.ReadyChannelStatus())
+ bs.PropagateIngressSubscriptionReadiness(t.ReadySubscriptionStatus())
+ bs.PropagateFilterDeploymentAvailability(t.AvailableDeployment())
+ bs.SetAddress("foo")
+ return bs
+}
+
+func (t testHelper) ReadyTriggerStatus() *TriggerStatus {
+ ts := &TriggerStatus{}
+ ts.InitializeConditions()
+ ts.SubscriberURI = "http://foo/"
+ ts.PropagateBrokerStatus(t.ReadyBrokerStatus())
+ ts.PropagateSubscriptionStatus(t.ReadySubscriptionStatus())
+ return ts
+}
+
+func (testHelper) UnavailableDeployment() *v1.Deployment {
+ d := &v1.Deployment{}
+ d.Name = "unavailable"
+ d.Status.Conditions = []v1.DeploymentCondition{
+ {
+ Type: v1.DeploymentAvailable,
+ Status: "False",
+ },
+ }
+ return d
+}
+
+func (t testHelper) AvailableDeployment() *v1.Deployment {
+ d := t.UnavailableDeployment()
+ d.Name = "available"
+ d.Status.Conditions = []v1.DeploymentCondition{
+ {
+ Type: v1.DeploymentAvailable,
+ Status: "True",
+ },
+ }
+ return d
+}
diff --git a/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go b/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go
index 07c20ffb99c..5e7e5997b39 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go
@@ -18,12 +18,13 @@ package v1alpha1
import duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
-var triggerCondSet = duckv1alpha1.NewLivingConditionSet(TriggerConditionBrokerExists, TriggerConditionSubscribed)
+var triggerCondSet = duckv1alpha1.NewLivingConditionSet(TriggerConditionBroker, TriggerConditionSubscribed)
const (
// TriggerConditionReady has status True when all subconditions below have been set to True.
- TriggerConditionReady = duckv1alpha1.ConditionReady
- TriggerConditionBrokerExists duckv1alpha1.ConditionType = "BrokerExists"
+ TriggerConditionReady = duckv1alpha1.ConditionReady
+
+ TriggerConditionBroker duckv1alpha1.ConditionType = "Broker"
TriggerConditionSubscribed duckv1alpha1.ConditionType = "Subscribed"
@@ -46,16 +47,32 @@ func (ts *TriggerStatus) InitializeConditions() {
triggerCondSet.Manage(ts).InitializeConditions()
}
-func (ts *TriggerStatus) MarkBrokerExists() {
- triggerCondSet.Manage(ts).MarkTrue(TriggerConditionBrokerExists)
+func (ts *TriggerStatus) PropagateBrokerStatus(bs *BrokerStatus) {
+ if bs.IsReady() {
+ triggerCondSet.Manage(ts).MarkTrue(TriggerConditionBroker)
+ } else {
+ msg := "nil"
+ if bc := brokerCondSet.Manage(bs).GetCondition(BrokerConditionReady); bc != nil {
+ msg = bc.Message
+ }
+ ts.MarkBrokerFailed("BrokerNotReady", "Broker is not ready: %s", msg)
+ }
}
-func (ts *TriggerStatus) MarkBrokerDoesNotExist() {
- triggerCondSet.Manage(ts).MarkFalse(TriggerConditionBrokerExists, "doesNotExist", "Broker does not exist")
+func (ts *TriggerStatus) MarkBrokerFailed(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkFalse(TriggerConditionBroker, reason, messageFormat, messageA...)
}
-func (ts *TriggerStatus) MarkSubscribed() {
- triggerCondSet.Manage(ts).MarkTrue(TriggerConditionSubscribed)
+func (ts *TriggerStatus) PropagateSubscriptionStatus(ss *SubscriptionStatus) {
+ if ss.IsReady() {
+ triggerCondSet.Manage(ts).MarkTrue(TriggerConditionSubscribed)
+ } else {
+ msg := "nil"
+ if sc := subCondSet.Manage(ss).GetCondition(SubscriptionConditionReady); sc != nil {
+ msg = sc.Message
+ }
+ ts.MarkNotSubscribed("SubscriptionNotReady", "Subscription is not ready: %s", msg)
+ }
}
func (ts *TriggerStatus) MarkNotSubscribed(reason, messageFormat string, messageA ...interface{}) {
diff --git a/pkg/apis/eventing/v1alpha1/trigger_lifecycle_test.go b/pkg/apis/eventing/v1alpha1/trigger_lifecycle_test.go
index 86759afbd12..c5c1b68e59f 100644
--- a/pkg/apis/eventing/v1alpha1/trigger_lifecycle_test.go
+++ b/pkg/apis/eventing/v1alpha1/trigger_lifecycle_test.go
@@ -30,8 +30,8 @@ var (
Status: corev1.ConditionTrue,
}
- triggerConditionBrokerExists = duckv1alpha1.Condition{
- Type: TriggerConditionBrokerExists,
+ triggerConditionBroker = duckv1alpha1.Condition{
+ Type: TriggerConditionBroker,
Status: corev1.ConditionTrue,
}
@@ -63,7 +63,7 @@ func TestTriggerGetCondition(t *testing.T) {
ts: &TriggerStatus{
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{
- triggerConditionBrokerExists,
+ triggerConditionBroker,
triggerConditionSubscribed,
},
},
@@ -75,7 +75,7 @@ func TestTriggerGetCondition(t *testing.T) {
ts: &TriggerStatus{
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{
- triggerConditionBrokerExists,
+ triggerConditionBroker,
triggerConditionSubscribed,
},
},
@@ -116,7 +116,7 @@ func TestTriggerInitializeConditions(t *testing.T) {
want: &TriggerStatus{
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{{
- Type: TriggerConditionBrokerExists,
+ Type: TriggerConditionBroker,
Status: corev1.ConditionUnknown,
}, {
Type: TriggerConditionReady,
@@ -132,7 +132,7 @@ func TestTriggerInitializeConditions(t *testing.T) {
ts: &TriggerStatus{
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{{
- Type: TriggerConditionBrokerExists,
+ Type: TriggerConditionBroker,
Status: corev1.ConditionFalse,
}},
},
@@ -140,7 +140,7 @@ func TestTriggerInitializeConditions(t *testing.T) {
want: &TriggerStatus{
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{{
- Type: TriggerConditionBrokerExists,
+ Type: TriggerConditionBroker,
Status: corev1.ConditionFalse,
}, {
Type: TriggerConditionReady,
@@ -164,7 +164,7 @@ func TestTriggerInitializeConditions(t *testing.T) {
want: &TriggerStatus{
Status: duckv1alpha1.Status{
Conditions: []duckv1alpha1.Condition{{
- Type: TriggerConditionBrokerExists,
+ Type: TriggerConditionBroker,
Status: corev1.ConditionUnknown,
}, {
Type: TriggerConditionReady,
@@ -228,10 +228,10 @@ func TestTriggerIsReady(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
ts := &TriggerStatus{}
if test.markBrokerExists {
- ts.MarkBrokerExists()
+ ts.PropagateBrokerStatus(TestHelper.ReadyBrokerStatus())
}
if test.markSubscribed {
- ts.MarkSubscribed()
+ ts.PropagateSubscriptionStatus(TestHelper.ReadySubscriptionStatus())
}
got := ts.IsReady()
if test.wantReady != got {
diff --git a/pkg/reconciler/v1alpha1/broker/broker_test.go b/pkg/reconciler/v1alpha1/broker/broker_test.go
index e5e37f13411..615912947a7 100644
--- a/pkg/reconciler/v1alpha1/broker/broker_test.go
+++ b/pkg/reconciler/v1alpha1/broker/broker_test.go
@@ -31,7 +31,6 @@ import (
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
- v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -845,13 +844,8 @@ func makeBroker() *v1alpha1.Broker {
func makeReadyBroker() *v1alpha1.Broker {
b := makeBroker()
- b.Status.InitializeConditions()
- b.Status.PropagateIngressDeploymentAvailability(makeAvailableDeployment())
- b.Status.PropagateTriggerChannelReadiness(makeReadyChannelStatus())
- b.Status.PropagateIngressChannelReadiness(makeReadyChannelStatus())
- b.Status.PropagateFilterDeploymentAvailability(makeAvailableDeployment())
+ b.Status = *v1alpha1.TestHelper.ReadyBrokerStatus()
b.Status.SetAddress(fmt.Sprintf("%s-broker.%s.svc.%s", brokerName, testNS, utils.GetClusterDomainName()))
- b.Status.PropagateIngressSubscriptionReadiness(makeReadySubscriptionStatus())
return b
}
@@ -1057,30 +1051,3 @@ func getOwnerReference() metav1.OwnerReference {
BlockOwnerDeletion: &trueVal,
}
}
-
-func makeAvailableDeployment() *v1.Deployment {
- d := &v1.Deployment{}
- d.Name = "deployment-name"
- d.Status.Conditions = []v1.DeploymentCondition{
- {
- Type: v1.DeploymentAvailable,
- Status: "True",
- },
- }
- return d
-}
-
-func makeReadyChannelStatus() *v1alpha1.ChannelStatus {
- cs := &v1alpha1.ChannelStatus{}
- cs.MarkProvisionerInstalled()
- cs.MarkProvisioned()
- cs.SetAddress("foo")
- return cs
-}
-
-func makeReadySubscriptionStatus() *v1alpha1.SubscriptionStatus {
- ss := &v1alpha1.SubscriptionStatus{}
- ss.MarkChannelReady()
- ss.MarkReferencesResolved()
- return ss
-}
diff --git a/pkg/reconciler/v1alpha1/trigger/trigger.go b/pkg/reconciler/v1alpha1/trigger/trigger.go
index ff60d792c16..24cc38bb9b3 100644
--- a/pkg/reconciler/v1alpha1/trigger/trigger.go
+++ b/pkg/reconciler/v1alpha1/trigger/trigger.go
@@ -229,10 +229,10 @@ func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
b, err := r.getBroker(ctx, t)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err))
- t.Status.MarkBrokerDoesNotExist()
+ t.Status.MarkBrokerFailed("DoesNotExist", "Broker does not exist")
return err
}
- t.Status.MarkBrokerExists()
+ t.Status.PropagateBrokerStatus(&b.Status)
brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b)
if err != nil {
@@ -258,13 +258,13 @@ func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
}
t.Status.SubscriberURI = subscriberURI
- _, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc)
+ sub, err := r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc)
if err != nil {
logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err))
- t.Status.MarkNotSubscribed("notSubscribed", "%v", err)
+ t.Status.MarkNotSubscribed("NotSubscribed", "%v", err)
return err
}
- t.Status.MarkSubscribed()
+ t.Status.PropagateSubscriptionStatus(&sub.Status)
return nil
}
diff --git a/pkg/reconciler/v1alpha1/trigger/trigger_test.go b/pkg/reconciler/v1alpha1/trigger/trigger_test.go
index e0e28920b68..cfad0508255 100644
--- a/pkg/reconciler/v1alpha1/trigger/trigger_test.go
+++ b/pkg/reconciler/v1alpha1/trigger/trigger_test.go
@@ -432,10 +432,10 @@ func TestReconcile(t *testing.T) {
Name: "Trigger reconciliation success",
InitialState: []runtime.Object{
makeTrigger(),
- makeBroker(),
+ makeReadyBroker(),
makeTriggerChannel(),
makeBrokerFilterService(),
- makeSameSubscription(),
+ makeReadySubscription(),
},
Objects: []runtime.Object{
makeSubscriberServiceAsUnstructured(),
@@ -606,10 +606,8 @@ func makeTrigger() *v1alpha1.Trigger {
func makeReadyTrigger() *v1alpha1.Trigger {
t := makeTrigger()
- t.Status.InitializeConditions()
- t.Status.MarkBrokerExists()
+ t.Status = *v1alpha1.TestHelper.ReadyTriggerStatus()
t.Status.SubscriberURI = fmt.Sprintf("http://%s.%s.svc.%s/", subscriberName, testNS, utils.GetClusterDomainName())
- t.Status.MarkSubscribed()
return t
}
@@ -644,6 +642,12 @@ func makeBroker() *v1alpha1.Broker {
}
}
+func makeReadyBroker() *v1alpha1.Broker {
+ b := makeBroker()
+ b.Status = *v1alpha1.TestHelper.ReadyBrokerStatus()
+ return b
+}
+
func makeChannelProvisioner() *corev1.ObjectReference {
return &corev1.ObjectReference{
APIVersion: "eventing.knative.dev/v1alpha1",
@@ -704,8 +708,8 @@ func makeBrokerFilterService() *corev1.Service {
func makeServiceURI() *url.URL {
return &url.URL{
Scheme: "http",
- Host: "service-uri",
- Path: "/path",
+ Host: fmt.Sprintf("%s.%s.svc.%s", makeBrokerFilterService().Name, testNS, utils.GetClusterDomainName()),
+ Path: fmt.Sprintf("/triggers/%s/%s", testNS, triggerName),
}
}
@@ -717,6 +721,12 @@ func makeDifferentSubscription() *v1alpha1.Subscription {
return resources.NewSubscription(makeTrigger(), makeTriggerChannel(), makeDifferentChannel(), makeServiceURI())
}
+func makeReadySubscription() *v1alpha1.Subscription {
+ s := makeSameSubscription()
+ s.Status = *v1alpha1.TestHelper.ReadySubscriptionStatus()
+ return s
+}
+
func getOwnerReference() metav1.OwnerReference {
return metav1.OwnerReference{
APIVersion: v1alpha1.SchemeGroupVersion.String(),
From bd4135e455c06f70a9fb723f032ee6a782230a83 Mon Sep 17 00:00:00 2001
From: akashrv <45154452+akashrv@users.noreply.github.com>
Date: Fri, 19 Apr 2019 14:30:26 -0700
Subject: [PATCH 36/76] Remove Istio dependency from Eventing (Part - 2)
(#1058)
* WIP
* WIP - In-memory working with E2E tests
* WIP - remove istio dependency from in-memroy channel
* UTs pass, E2E tests pass with in-memory as well as kafka
* fixed uts that failed due to last K8s service change
* Removed unnecessary space from a line
* dding istio annotation to test POD. This will ve needed when running E2E
tests against channels other than in-memory
* Bug fix to set clusterIp of K8s service only when it is not of type ExternalName
* WIP kafka channel
* WIP kafka - UTs and E2E pass
More UTs needded
* Updated code based on PR comments
* WIP
* Updates based on PR comments
* Updates based on PR comments
* Fixed UTs
* Updated VENDOR_LICENSE
* WIP. Update fanout sidecar
* Merge from upstream master
* UTs pass, ITs passed. COde ready for PR
* Updates based on PR comments
* Changes based on PR comments
* Added back permission that was removed by mistake
* Remove istio references
* Removed one more reference of istio
* Updates based on PR comments. Ready to merge into master
* Fixed a typo
---
cmd/fanoutsidecar/main.go | 65 +-----
contrib/gcppubsub/pkg/dispatcher/cmd/main.go | 5 +-
.../pkg/dispatcher/receiver/receiver.go | 10 +-
.../pkg/dispatcher/receiver/receiver_test.go | 11 +-
contrib/kafka/cmd/controller/main.go | 2 -
contrib/kafka/cmd/dispatcher/main.go | 38 ++--
contrib/kafka/config/kafka.yaml | 32 +--
contrib/kafka/main.go | 2 -
.../kafka/pkg/controller/channel/provider.go | 24 +--
.../kafka/pkg/controller/channel/reconcile.go | 140 +------------
.../pkg/controller/channel/reconcile_test.go | 75 +++----
contrib/kafka/pkg/dispatcher/dispatcher.go | 75 ++++++-
.../kafka/pkg/dispatcher/dispatcher_test.go | 102 ++++++++--
.../pkg/dispatcher/dispatcher/dispatcher.go | 6 +-
pkg/channelwatcher/channel_watcher.go | 66 +++++-
pkg/channelwatcher/channel_watcher_test.go | 189 ++++++++++++++++++
pkg/provisioners/channel_util.go | 8 +-
pkg/provisioners/message_receiver.go | 49 +++--
pkg/provisioners/message_receiver_test.go | 5 +-
pkg/sidecar/fanout/fanout_handler.go | 11 +-
pkg/sidecar/fanout/fanout_handler_test.go | 11 +-
pkg/sidecar/multichannelfanout/config.go | 57 ++++++
pkg/sidecar/multichannelfanout/config_test.go | 126 ++++++++++++
.../multi_channel_fanout_handler.go | 20 +-
24 files changed, 745 insertions(+), 384 deletions(-)
create mode 100644 pkg/channelwatcher/channel_watcher_test.go
create mode 100644 pkg/sidecar/multichannelfanout/config.go
create mode 100644 pkg/sidecar/multichannelfanout/config_test.go
diff --git a/cmd/fanoutsidecar/main.go b/cmd/fanoutsidecar/main.go
index 370289ffb5e..fc1927d0f01 100644
--- a/cmd/fanoutsidecar/main.go
+++ b/cmd/fanoutsidecar/main.go
@@ -29,15 +29,9 @@ import (
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/channelwatcher"
- "github.com/knative/eventing/pkg/logging"
- "github.com/knative/eventing/pkg/sidecar/fanout"
- "github.com/knative/eventing/pkg/sidecar/multichannelfanout"
"github.com/knative/eventing/pkg/sidecar/swappable"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
@@ -137,48 +131,11 @@ func setupChannelWatcher(logger *zap.Logger, configUpdated swappable.UpdateConfi
logger.Error("Error while adding eventing scheme to manager.", zap.Error(err))
return nil, err
}
- channelwatcher.New(mgr, logger, updateChannelConfig(configUpdated))
+ channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(configUpdated, shouldWatch))
return mgr, nil
}
-func updateChannelConfig(updateConfig swappable.UpdateConfig) channelwatcher.WatchHandlerFunc {
- return func(ctx context.Context, c client.Client, chanNamespacedName types.NamespacedName) error {
- channels, err := listAllChannels(ctx, c)
- if err != nil {
- logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err))
- return err
- }
- config := multiChannelFanoutConfig(channels)
- return updateConfig(config)
- }
-}
-
-func listAllChannels(ctx context.Context, c client.Client) ([]v1alpha1.Channel, error) {
- channels := make([]v1alpha1.Channel, 0)
- for {
- cl := &v1alpha1.ChannelList{}
- opts := &client.ListOptions{
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
- if err := c.List(ctx, opts, cl); err != nil {
- return nil, err
- }
- for _, c := range cl.Items {
- if c.Status.IsReady() && shouldWatch(&c) {
- channels = append(channels, c)
- }
- }
- if cl.Continue != "" {
- opts.Raw.Continue = cl.Continue
- } else {
- return channels, nil
- }
- }
-}
-
func shouldWatch(ch *v1alpha1.Channel) bool {
if ch.Spec.Provisioner != nil && ch.Spec.Provisioner.Namespace == "" {
for _, v := range channelProvisioners {
@@ -190,26 +147,6 @@ func shouldWatch(ch *v1alpha1.Channel) bool {
return false
}
-func multiChannelFanoutConfig(channels []v1alpha1.Channel) *multichannelfanout.Config {
- cc := make([]multichannelfanout.ChannelConfig, 0)
- for _, c := range channels {
- channelConfig := multichannelfanout.ChannelConfig{
- Namespace: c.Namespace,
- Name: c.Name,
- HostName: c.Status.Address.Hostname,
- }
- if c.Spec.Subscribable != nil {
- channelConfig.FanoutConfig = fanout.Config{
- Subscriptions: c.Spec.Subscribable.Subscribers,
- }
- }
- cc = append(cc, channelConfig)
- }
- return &multichannelfanout.Config{
- ChannelConfigs: cc,
- }
-}
-
// runnableServer is a small wrapper around http.Server so that it matches the manager.Runnable
// interface.
type runnableServer struct {
diff --git a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go
index a2a82a4e638..078e08bf2d4 100644
--- a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go
+++ b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go
@@ -61,7 +61,10 @@ func main() {
// PubSub) and the dispatcher (takes messages in PubSub and sends them in cluster) in this
// binary.
- _, runnables := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator)
+ _, runnables, err := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator)
+ if err != nil {
+ logger.Fatal("Unable to create new receiver and runnable", zap.Error(err))
+ }
for _, runnable := range runnables {
err = mgr.Add(runnable)
if err != nil {
diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go
index 702ee7fd5f4..665bb80dda5 100644
--- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go
+++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go
@@ -44,7 +44,7 @@ type Receiver struct {
// New creates a new Receiver and its associated MessageReceiver. The caller is responsible for
// Start()ing the returned MessageReceiver.
-func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubSubClientCreator) (*Receiver, []manager.Runnable) {
+func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubSubClientCreator) (*Receiver, []manager.Runnable, error) {
r := &Receiver{
logger: logger,
client: client,
@@ -52,10 +52,14 @@ func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubS
pubSubClientCreator: pubSubClientCreator,
cache: cache.NewTTL(),
}
- return r, []manager.Runnable{r.newMessageReceiver(), r.cache}
+ receiver, err := r.newMessageReceiver()
+ if err != nil {
+ return nil, nil, err
+ }
+ return r, []manager.Runnable{receiver, r.cache}, nil
}
-func (r *Receiver) newMessageReceiver() *provisioners.MessageReceiver {
+func (r *Receiver) newMessageReceiver() (*provisioners.MessageReceiver, error) {
return provisioners.NewMessageReceiver(r.sendEventToTopic, r.logger.Sugar())
}
diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go
index 6d9b2353b30..c4789c2c9ed 100644
--- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go
+++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go
@@ -129,14 +129,21 @@ func TestReceiver(t *testing.T) {
}
for n, tc := range testCases {
t.Run(n, func(t *testing.T) {
- mr, _ := New(
+ mr, _, err := New(
zap.NewNop(),
fake.NewFakeClient(tc.initialState...),
fakepubsub.Creator(tc.pubSubData))
+ if err != nil {
+ t.Fatalf("Error when creating a New receiver. Error:%s", err)
+ }
resp := httptest.NewRecorder()
req := httptest.NewRequest("POST", "/", strings.NewReader(validMessage))
req.Host = "test-channel.test-namespace.channels." + utils.GetClusterDomainName()
- mr.newMessageReceiver().HandleRequest(resp, req)
+ receiver, err := mr.newMessageReceiver()
+ if err != nil {
+ t.Fatalf("Error when creating a new message receiver. Error:%s", err)
+ }
+ receiver.HandleRequest(resp, req)
if tc.expectedErr {
if resp.Result().StatusCode >= 200 && resp.Result().StatusCode < 300 {
t.Errorf("Expected an error. Actual: %v", resp.Result())
diff --git a/contrib/kafka/cmd/controller/main.go b/contrib/kafka/cmd/controller/main.go
index 375361f4af3..be99d7231ef 100644
--- a/contrib/kafka/cmd/controller/main.go
+++ b/contrib/kafka/cmd/controller/main.go
@@ -8,7 +8,6 @@ import (
"github.com/knative/eventing/contrib/kafka/pkg/controller/channel"
eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/provisioners"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/config"
@@ -47,7 +46,6 @@ func _main() int {
// Add custom types to this array to get them into the manager's scheme.
schemeFuncs := []SchemeFunc{
eventingv1alpha.AddToScheme,
- istiov1alpha3.AddToScheme,
}
for _, schemeFunc := range schemeFuncs {
schemeFunc(mgr.GetScheme())
diff --git a/contrib/kafka/cmd/dispatcher/main.go b/contrib/kafka/cmd/dispatcher/main.go
index 9ef18689623..b0d1c2ac286 100644
--- a/contrib/kafka/cmd/dispatcher/main.go
+++ b/contrib/kafka/cmd/dispatcher/main.go
@@ -18,38 +18,25 @@ package main
import (
"flag"
- "fmt"
"log"
- "os"
+ "github.com/knative/eventing/contrib/kafka/pkg/controller"
provisionerController "github.com/knative/eventing/contrib/kafka/pkg/controller"
"github.com/knative/eventing/contrib/kafka/pkg/dispatcher"
- "github.com/knative/eventing/pkg/sidecar/configmap/watcher"
- "github.com/knative/eventing/pkg/utils"
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/channelwatcher"
"github.com/knative/pkg/signals"
- "github.com/knative/pkg/system"
"go.uber.org/zap"
- "k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
func main() {
- configMapName := os.Getenv("DISPATCHER_CONFIGMAP_NAME")
- if configMapName == "" {
- configMapName = provisionerController.DispatcherConfigMapName
- }
- configMapNamespace := os.Getenv("DISPATCHER_CONFIGMAP_NAMESPACE")
- if configMapNamespace == "" {
- configMapNamespace = system.Namespace()
- }
-
flag.Parse()
logger, err := zap.NewProduction()
if err != nil {
log.Fatalf("unable to create logger: %v", err)
}
-
provisionerConfig, err := provisionerController.GetProvisionerConfig("/etc/config-provisioner")
if err != nil {
logger.Fatal("unable to load provisioner config", zap.Error(err))
@@ -68,17 +55,12 @@ func main() {
logger.Fatal("Unable to add kafkaDispatcher", zap.Error(err))
}
- kc, err := kubernetes.NewForConfig(mgr.GetConfig())
- if err != nil {
- logger.Fatal("unable to create kubernetes client.", zap.Error(err))
+ if err := v1alpha1.AddToScheme(mgr.GetScheme()); err != nil {
+ logger.Fatal("Unable to add scheme for eventing apis.", zap.Error(err))
}
- cmw, err := watcher.NewWatcher(logger, kc, configMapNamespace, configMapName, kafkaDispatcher.UpdateConfig)
- if err != nil {
- logger.Fatal("unable to create configMap watcher", zap.String("configMap", fmt.Sprintf("%s/%s", configMapNamespace, configMapName)))
- }
- if err = mgr.Add(utils.NewBlockingStart(logger, cmw)); err != nil {
- logger.Fatal("Unable to add the configMap watcher to the manager", zap.Error(err))
+ if err := channelwatcher.New(mgr, logger, channelwatcher.UpdateConfigWatchHandler(kafkaDispatcher.UpdateConfig, shouldWatch)); err != nil {
+ logger.Fatal("Unable to create channel watcher.", zap.Error(err))
}
// set up signals so we handle the first shutdown signal gracefully
@@ -89,3 +71,9 @@ func main() {
}
logger.Info("Exiting...")
}
+
+func shouldWatch(ch *v1alpha1.Channel) bool {
+ return ch.Spec.Provisioner != nil &&
+ ch.Spec.Provisioner.Namespace == "" &&
+ ch.Spec.Provisioner.Name == controller.Name
+}
diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml
index dc28a8636da..31506c804de 100644
--- a/contrib/kafka/config/kafka.yaml
+++ b/contrib/kafka/config/kafka.yaml
@@ -68,22 +68,12 @@ rules:
verbs:
- update
- apiGroups:
- - networking.istio.io
+ - "" # Core API Group.
resources:
- - virtualservices
+ - events
verbs:
- - get
- - list
- - watch
- - create
- - update
- - apiGroups:
- - "" # Core API Group.
- resources:
- - events
- verbs:
- - create
- - patch
+ - create
+ - patch
- update
---
@@ -170,6 +160,13 @@ rules:
- get
- list
- watch
+ - apiGroups:
+ - eventing.knative.dev
+ resources:
+ - channels
+ verbs:
+ - list
+ - watch
---
@@ -211,13 +208,6 @@ spec:
containers:
- name: dispatcher
image: github.com/knative/eventing/contrib/kafka/cmd/dispatcher
- env:
- - name: DISPATCHER_CONFIGMAP_NAME
- value: kafka-channel-dispatcher
- - name: DISPATCHER_CONFIGMAP_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
volumeMounts:
- name: kafka-channel-controller-config
mountPath: /etc/config-provisioner
diff --git a/contrib/kafka/main.go b/contrib/kafka/main.go
index ed98481c20b..62df224cc98 100644
--- a/contrib/kafka/main.go
+++ b/contrib/kafka/main.go
@@ -8,7 +8,6 @@ import (
"github.com/knative/eventing/contrib/kafka/pkg/controller/channel"
eventingv1alpha "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/provisioners"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/config"
@@ -47,7 +46,6 @@ func main() {
// Add custom types to this array to get them into the manager's scheme.
schemeFuncs := []SchemeFunc{
eventingv1alpha.AddToScheme,
- istiov1alpha3.AddToScheme,
}
for _, schemeFunc := range schemeFuncs {
schemeFunc(mgr.GetScheme())
diff --git a/contrib/kafka/pkg/controller/channel/provider.go b/contrib/kafka/pkg/controller/channel/provider.go
index 7c9d413d246..73eab2e8d22 100644
--- a/contrib/kafka/pkg/controller/channel/provider.go
+++ b/contrib/kafka/pkg/controller/channel/provider.go
@@ -18,7 +18,6 @@ package channel
import (
"github.com/Shopify/sarama"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@@ -49,11 +48,10 @@ var (
)
type reconciler struct {
- client client.Client
- recorder record.EventRecorder
- logger *zap.Logger
- config *common.KafkaProvisionerConfig
- configMapKey client.ObjectKey
+ client client.Client
+ recorder record.EventRecorder
+ logger *zap.Logger
+ config *common.KafkaProvisionerConfig
// Using a shared kafkaClusterAdmin does not work currently because of an issue with
// Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162.
kafkaClusterAdmin sarama.ClusterAdmin
@@ -67,10 +65,9 @@ func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfi
// Setup a new controller to Reconcile Channel.
c, err := controller.New(controllerAgentName, mgr, controller.Options{
Reconciler: &reconciler{
- recorder: mgr.GetRecorder(controllerAgentName),
- logger: logger,
- config: config,
- configMapKey: defaultConfigMapKey,
+ recorder: mgr.GetRecorder(controllerAgentName),
+ logger: logger,
+ config: config,
},
})
if err != nil {
@@ -89,13 +86,6 @@ func ProvideController(mgr manager.Manager, config *common.KafkaProvisionerConfi
return nil, err
}
- // Watch the VirtualServices that are owned by Channels.
- err = c.Watch(&source.Kind{Type: &istiov1alpha3.VirtualService{}}, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true})
- if err != nil {
- logger.Error("unable to watch VirtualServices.", zap.Error(err))
- return nil, err
- }
-
return c, nil
}
diff --git a/contrib/kafka/pkg/controller/channel/reconcile.go b/contrib/kafka/pkg/controller/channel/reconcile.go
index cf64955bc43..34a7e1c9b71 100644
--- a/contrib/kafka/pkg/controller/channel/reconcile.go
+++ b/contrib/kafka/pkg/controller/channel/reconcile.go
@@ -23,10 +23,8 @@ import (
"github.com/Shopify/sarama"
"go.uber.org/zap"
- corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -35,10 +33,6 @@ import (
util "github.com/knative/eventing/pkg/provisioners"
topicUtils "github.com/knative/eventing/pkg/provisioners/utils"
eventingNames "github.com/knative/eventing/pkg/reconciler/names"
- "github.com/knative/eventing/pkg/sidecar/configmap"
- "github.com/knative/eventing/pkg/sidecar/fanout"
- "github.com/knative/eventing/pkg/sidecar/multichannelfanout"
- "k8s.io/apimachinery/pkg/api/equality"
)
const (
@@ -97,30 +91,28 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
return reconcile.Result{}, nil
}
- newChannel := channel.DeepCopy()
-
- newChannel.Status.InitializeConditions()
+ channel.Status.InitializeConditions()
var requeue = false
if clusterChannelProvisioner.Status.IsReady() {
// Reconcile this copy of the Channel and then write back any status
// updates regardless of whether the reconcile error out.
- requeue, err = r.reconcile(ctx, newChannel)
+ requeue, err = r.reconcile(ctx, channel)
} else {
- newChannel.Status.MarkNotProvisioned("NotProvisioned", "ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name)
+ channel.Status.MarkNotProvisioned("NotProvisioned", "ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name)
err = fmt.Errorf("ClusterChannelProvisioner %s is not ready", clusterChannelProvisioner.Name)
}
if err != nil {
r.logger.Error("Dispatcher reconciliation failed", zap.Error(err))
- r.recorder.Eventf(newChannel, v1.EventTypeWarning, dispatcherReconcileFailed, "Dispatcher reconciliation failed: %v", err)
+ r.recorder.Eventf(channel, v1.EventTypeWarning, dispatcherReconcileFailed, "Dispatcher reconciliation failed: %v", err)
} else {
r.logger.Debug("Channel reconciled")
}
- if updateChannelErr := util.UpdateChannel(ctx, r.client, newChannel); updateChannelErr != nil {
+ if updateChannelErr := util.UpdateChannel(ctx, r.client, channel); updateChannelErr != nil {
r.logger.Info("failed to update channel status", zap.Error(updateChannelErr))
- r.recorder.Eventf(newChannel, v1.EventTypeWarning, dispatcherUpdateStatusFailed, "Failed to update Channel's dispatcher status: %v", err)
+ r.recorder.Eventf(channel, v1.EventTypeWarning, dispatcherUpdateStatusFailed, "Failed to update Channel's dispatcher status: %v", err)
return reconcile.Result{}, updateChannelErr
}
@@ -134,13 +126,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
// boolean indicates if this Channel should be immediately requeued for another reconcile loop. The
// returned error indicates an error during reconciliation.
func (r *reconciler) reconcile(ctx context.Context, channel *eventingv1alpha1.Channel) (bool, error) {
-
- // We always need to sync the Channel config, so do it first.
- if err := r.syncChannelConfig(ctx); err != nil {
- r.logger.Info("error updating syncing the Channel config", zap.Error(err))
- return false, err
- }
-
// We don't currently initialize r.kafkaClusterAdmin, hence we end up creating the cluster admin client every time.
// This is because of an issue with Shopify/sarama. See https://github.com/Shopify/sarama/issues/1162.
// Once the issue is fixed we should use a shared cluster admin client. Also, r.kafkaClusterAdmin is currently
@@ -177,19 +162,12 @@ func (r *reconciler) reconcile(ctx context.Context, channel *eventingv1alpha1.Ch
return false, err
}
- svc, err := util.CreateK8sService(ctx, r.client, channel)
+ svc, err := util.CreateK8sService(ctx, r.client, channel, util.ExternalService(channel))
if err != nil {
r.logger.Info("error creating the Channel's K8s Service", zap.Error(err))
return false, err
}
channel.Status.SetAddress(eventingNames.ServiceHostName(svc.Name, svc.Namespace))
-
- _, err = util.CreateVirtualService(ctx, r.client, channel, svc)
- if err != nil {
- r.logger.Info("error creating the Virtual Service for the Channel", zap.Error(err))
- return false, err
- }
-
channel.Status.MarkProvisioned()
// close the connection
@@ -268,110 +246,6 @@ func (r *reconciler) getClusterChannelProvisioner() (*eventingv1alpha1.ClusterCh
return clusterChannelProvisioner, nil
}
-func (r *reconciler) syncChannelConfig(ctx context.Context) error {
- channels, err := r.listAllChannels(ctx)
- if err != nil {
- r.logger.Info("Unable to list channels", zap.Error(err))
- return err
- }
- config := multiChannelFanoutConfig(channels)
- return r.writeConfigMap(ctx, config)
-}
-
-func (r *reconciler) writeConfigMap(ctx context.Context, config *multichannelfanout.Config) error {
- logger := r.logger.With(zap.Any("configMap", r.configMapKey))
-
- updated, err := configmap.SerializeConfig(*config)
- if err != nil {
- r.logger.Error("Unable to serialize config", zap.Error(err), zap.Any("config", config))
- return err
- }
-
- cm := &corev1.ConfigMap{}
- err = r.client.Get(ctx, r.configMapKey, cm)
- if errors.IsNotFound(err) {
- cm = r.createNewConfigMap(updated)
- err = r.client.Create(ctx, cm)
- if err != nil {
- logger.Info("Unable to create ConfigMap", zap.Error(err))
- return err
- }
- }
- if err != nil {
- logger.Info("Unable to get ConfigMap", zap.Error(err))
- return err
- }
-
- if equality.Semantic.DeepEqual(cm.Data, updated) {
- // Nothing to update.
- return nil
- }
-
- cm.Data = updated
- return r.client.Update(ctx, cm)
-}
-
-func (r *reconciler) createNewConfigMap(data map[string]string) *corev1.ConfigMap {
- return &corev1.ConfigMap{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: r.configMapKey.Namespace,
- Name: r.configMapKey.Name,
- },
- Data: data,
- }
-}
-
-func multiChannelFanoutConfig(channels []eventingv1alpha1.Channel) *multichannelfanout.Config {
- cc := make([]multichannelfanout.ChannelConfig, 0)
- for _, c := range channels {
- channelConfig := multichannelfanout.ChannelConfig{
- Namespace: c.Namespace,
- Name: c.Name,
- }
- if c.Spec.Subscribable != nil {
- channelConfig.FanoutConfig = fanout.Config{
- Subscriptions: c.Spec.Subscribable.Subscribers,
- }
- }
- cc = append(cc, channelConfig)
- }
- return &multichannelfanout.Config{
- ChannelConfigs: cc,
- }
-}
-
-func (r *reconciler) listAllChannels(ctx context.Context) ([]eventingv1alpha1.Channel, error) {
- clusterChannelProvisioner, err := r.getClusterChannelProvisioner()
- if err != nil {
- return nil, err
- }
-
- channels := make([]eventingv1alpha1.Channel, 0)
-
- opts := &client.ListOptions{
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
- for {
- cl := &eventingv1alpha1.ChannelList{}
- if err = r.client.List(ctx, opts, cl); err != nil {
- return nil, err
- }
-
- for _, c := range cl.Items {
- if r.shouldReconcile(&c, clusterChannelProvisioner) {
- channels = append(channels, c)
- }
- }
- if cl.Continue != "" {
- opts.Raw.Continue = cl.Continue
- } else {
- return channels, nil
- }
- }
-}
-
func createKafkaAdminClient(config *controller.KafkaProvisionerConfig) (sarama.ClusterAdmin, error) {
saramaConf := sarama.NewConfig()
saramaConf.Version = sarama.V1_1_0_0
diff --git a/contrib/kafka/pkg/controller/channel/reconcile_test.go b/contrib/kafka/pkg/controller/channel/reconcile_test.go
index aeec9ab7de1..02836e06a54 100644
--- a/contrib/kafka/pkg/controller/channel/reconcile_test.go
+++ b/contrib/kafka/pkg/controller/channel/reconcile_test.go
@@ -30,10 +30,11 @@ import (
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/provisioners"
util "github.com/knative/eventing/pkg/provisioners"
+ "github.com/knative/eventing/pkg/reconciler/names"
controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
"github.com/knative/eventing/pkg/utils"
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ "github.com/knative/pkg/system"
_ "github.com/knative/pkg/system/testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -70,7 +71,6 @@ var (
func init() {
// Add types to scheme
eventingv1alpha1.AddToScheme(scheme.Scheme)
- istiov1alpha3.AddToScheme(scheme.Scheme)
}
var mockFetchError = controllertesting.Mocks{
@@ -142,7 +142,6 @@ var testCases = []controllertesting.TestCase{
InitialState: []runtime.Object{
getNewClusterChannelProvisioner(clusterChannelProvisionerName, true),
getNewChannel(channelName, clusterChannelProvisionerName),
- makeVirtualService(),
},
WantResult: reconcile.Result{
Requeue: true,
@@ -156,10 +155,10 @@ var testCases = []controllertesting.TestCase{
InitialState: []runtime.Object{
getNewClusterChannelProvisioner(clusterChannelProvisionerName, true),
getNewChannelWithStatusAndFinalizer(channelName, clusterChannelProvisionerName),
- makeVirtualService(),
},
WantPresent: []runtime.Object{
getNewChannelProvisionedStatus(channelName, clusterChannelProvisionerName),
+ makeK8sService(),
},
},
{
@@ -523,18 +522,35 @@ func getNewClusterChannelProvisioner(name string, isReady bool) *eventingv1alpha
return clusterChannelProvisioner
}
-func makeVirtualService() *istiov1alpha3.VirtualService {
- return &istiov1alpha3.VirtualService{
+func om(namespace, name string) metav1.ObjectMeta {
+ return metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name),
+ UID: testUID,
+ }
+}
+
+func getControllerConfig() *controller.KafkaProvisionerConfig {
+ return &controller.KafkaProvisionerConfig{
+ Brokers: []string{"test-broker"},
+ }
+}
+
+func makeK8sService() *corev1.Service {
+ return &corev1.Service{
TypeMeta: metav1.TypeMeta{
- APIVersion: istiov1alpha3.SchemeGroupVersion.String(),
- Kind: "VirtualService",
+ APIVersion: "v1",
+ Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s-channel", testNS),
- Namespace: testNS,
+ GenerateName: fmt.Sprintf("%s-channel-", channelName),
+ Namespace: testNS,
Labels: map[string]string{
- "channel": channelName,
- "provisioner": clusterChannelProvisionerName,
+ util.EventingChannelLabel: channelName,
+ util.OldEventingChannelLabel: channelName,
+ util.EventingProvisionerLabel: clusterChannelProvisionerName,
+ util.OldEventingProvisionerLabel: clusterChannelProvisionerName,
},
OwnerReferences: []metav1.OwnerReference{
{
@@ -547,38 +563,9 @@ func makeVirtualService() *istiov1alpha3.VirtualService {
},
},
},
- Spec: istiov1alpha3.VirtualServiceSpec{
- Hosts: []string{
- serviceAddress,
- fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()),
- },
- HTTP: []istiov1alpha3.HTTPRoute{{
- Rewrite: &istiov1alpha3.HTTPRewrite{
- Authority: fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()),
- },
- Route: []istiov1alpha3.HTTPRouteDestination{{
- Destination: istiov1alpha3.Destination{
- Host: "kafka-provisioner.knative-testing.svc." + utils.GetClusterDomainName(),
- Port: istiov1alpha3.PortSelector{
- Number: util.PortNumber,
- },
- }},
- }},
- },
+ Spec: corev1.ServiceSpec{
+ ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", clusterChannelProvisionerName), system.Namespace()),
+ Type: "ExternalName",
},
}
}
-
-func om(namespace, name string) metav1.ObjectMeta {
- return metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name),
- }
-}
-
-func getControllerConfig() *controller.KafkaProvisionerConfig {
- return &controller.KafkaProvisionerConfig{
- Brokers: []string{"test-broker"},
- }
-}
diff --git a/contrib/kafka/pkg/dispatcher/dispatcher.go b/contrib/kafka/pkg/dispatcher/dispatcher.go
index a7f362f2575..718abd88584 100644
--- a/contrib/kafka/pkg/dispatcher/dispatcher.go
+++ b/contrib/kafka/pkg/dispatcher/dispatcher.go
@@ -34,8 +34,10 @@ import (
)
type KafkaDispatcher struct {
- config atomic.Value
- updateLock sync.Mutex
+ // TODO: config doesn't have to be atomic as it is read and updated using updateLock.
+ config atomic.Value
+ hostToChannelMap atomic.Value
+ updateLock sync.Mutex
receiver *provisioners.MessageReceiver
dispatcher *provisioners.MessageDispatcher
@@ -83,10 +85,10 @@ type subscription struct {
ReplyURI string
}
-// ConfigDiff diffs the new config with the existing config. If there are no differences, then the
+// configDiff diffs the new config with the existing config. If there are no differences, then the
// empty string is returned. If there are differences, then a non-empty string is returned
// describing the differences.
-func (d *KafkaDispatcher) ConfigDiff(updated *multichannelfanout.Config) string {
+func (d *KafkaDispatcher) configDiff(updated *multichannelfanout.Config) string {
return cmp.Diff(d.getConfig(), updated)
}
@@ -98,12 +100,21 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error
d.updateLock.Lock()
defer d.updateLock.Unlock()
- if diff := d.ConfigDiff(config); diff != "" {
+ if diff := d.configDiff(config); diff != "" {
d.logger.Info("Updating config (-old +new)", zap.String("diff", diff))
+ // Create hostToChannelMap before updating kafkaConsumers.
+ // But update the map only after updating kafkaConsumers.
+ hcMap, err := createHostToChannelMap(config)
+ if err != nil {
+ return err
+ }
+
newSubs := make(map[subscription]bool)
- // Subscribe to new subscriptions
+ // Subscribe to new subscriptions.
+ // TODO: Error returned by subscribe/unsubscribe must be handled.
+ // https://github.com/knative/eventing/issues/1072.
for _, cc := range config.ChannelConfigs {
channelRef := provisioners.ChannelReference{
Name: cc.Name,
@@ -129,6 +140,9 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error
}
}
}
+ // At this point all updates are done and hostToChannelMap is created successfully.
+ // Update the atomic value.
+ d.setHostToChannelMap(hcMap)
// Update the config so that it can be used for comparison during next sync
d.setConfig(config)
@@ -136,6 +150,23 @@ func (d *KafkaDispatcher) UpdateConfig(config *multichannelfanout.Config) error
return nil
}
+func createHostToChannelMap(config *multichannelfanout.Config) (map[string]provisioners.ChannelReference, error) {
+ hcMap := make(map[string]provisioners.ChannelReference, len(config.ChannelConfigs))
+ for _, cConfig := range config.ChannelConfigs {
+ if cr, ok := hcMap[cConfig.HostName]; ok {
+ return nil, fmt.Errorf(
+ "duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s",
+ cConfig.HostName,
+ cConfig.Namespace,
+ cConfig.Name,
+ cr.Namespace,
+ cr.Name)
+ }
+ hcMap[cConfig.HostName] = provisioners.ChannelReference{Name: cConfig.Name, Namespace: cConfig.Namespace}
+ }
+ return hcMap, nil
+}
+
// Start starts the kafka dispatcher's message processing.
func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error {
if d.receiver == nil {
@@ -162,6 +193,8 @@ func (d *KafkaDispatcher) Start(stopCh <-chan struct{}) error {
return d.receiver.Start(stopCh)
}
+// subscribe reads kafkaConsumers which gets updated in UpdateConfig in a separate go-routine.
+// subscribe must be called under updateLock.
func (d *KafkaDispatcher) subscribe(channelRef provisioners.ChannelReference, sub subscription) error {
d.logger.Info("Subscribing", zap.Any("channelRef", channelRef), zap.Any("subscription", sub))
@@ -234,6 +267,8 @@ func (d *KafkaDispatcher) dispatch(channelRef provisioners.ChannelReference, sub
return err
}
+// unsubscribe reads kafkaConsumers which gets updated in UpdateConfig in a separate go-routine.
+// unsubscribe must be called under updateLock.
func (d *KafkaDispatcher) unsubscribe(channel provisioners.ChannelReference, sub subscription) error {
d.logger.Info("Unsubscribing from channel", zap.Any("channel", channel), zap.Any("subscription", sub))
if consumer, ok := d.kafkaConsumers[channel][sub]; ok {
@@ -257,8 +292,15 @@ func (d *KafkaDispatcher) setConfig(config *multichannelfanout.Config) {
d.config.Store(config)
}
-func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger *zap.Logger) (*KafkaDispatcher, error) {
+func (d *KafkaDispatcher) getHostToChannelMap() map[string]provisioners.ChannelReference {
+ return d.hostToChannelMap.Load().(map[string]provisioners.ChannelReference)
+}
+func (d *KafkaDispatcher) setHostToChannelMap(hcMap map[string]provisioners.ChannelReference) {
+ d.hostToChannelMap.Store(hcMap)
+}
+
+func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger *zap.Logger) (*KafkaDispatcher, error) {
conf := sarama.NewConfig()
conf.Version = sarama.V1_1_0_0
conf.ClientID = controller.Name + "-dispatcher"
@@ -281,16 +323,31 @@ func NewDispatcher(brokers []string, consumerMode cluster.ConsumerMode, logger *
logger: logger,
}
- receiverFunc := provisioners.NewMessageReceiver(
+ receiverFunc, err := provisioners.NewMessageReceiver(
func(channel provisioners.ChannelReference, message *provisioners.Message) error {
dispatcher.kafkaAsyncProducer.Input() <- toKafkaMessage(channel, message)
return nil
- }, logger.Sugar())
+ },
+ logger.Sugar(),
+ provisioners.ResolveChannelFromHostHeader(provisioners.ResolveChannelFromHostFunc(dispatcher.getChannelReferenceFromHost)))
+ if err != nil {
+ return nil, err
+ }
dispatcher.receiver = receiverFunc
dispatcher.setConfig(&multichannelfanout.Config{})
+ dispatcher.setHostToChannelMap(map[string]provisioners.ChannelReference{})
return dispatcher, nil
}
+func (d *KafkaDispatcher) getChannelReferenceFromHost(host string) (provisioners.ChannelReference, error) {
+ chMap := d.getHostToChannelMap()
+ cr, ok := chMap[host]
+ if !ok {
+ return cr, fmt.Errorf("invalid Hostname:%s. Hostname not found in ConfigMap for any Channel", host)
+ }
+ return cr, nil
+}
+
func fromKafkaMessage(kafkaMessage *sarama.ConsumerMessage) *provisioners.Message {
headers := make(map[string]string)
for _, header := range kafkaMessage.Headers {
diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go
index 04e312777e4..eda73205238 100644
--- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go
+++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go
@@ -182,23 +182,29 @@ func (c *mockSaramaCluster) GetConsumerMode() cluster.ConsumerMode {
func TestDispatcher_UpdateConfig(t *testing.T) {
testCases := []struct {
- name string
- oldConfig *multichannelfanout.Config
- newConfig *multichannelfanout.Config
- subscribes []string
- unsubscribes []string
- createErr string
+ name string
+ oldConfig *multichannelfanout.Config
+ newConfig *multichannelfanout.Config
+ subscribes []string
+ unsubscribes []string
+ createErr string
+ oldHostToChanMap map[string]provisioners.ChannelReference
+ newHostToChanMap map[string]provisioners.ChannelReference
}{
{
- name: "nil config",
- oldConfig: &multichannelfanout.Config{},
- newConfig: nil,
- createErr: "nil config",
+ name: "nil config",
+ oldConfig: &multichannelfanout.Config{},
+ newConfig: nil,
+ createErr: "nil config",
+ oldHostToChanMap: map[string]provisioners.ChannelReference{},
+ newHostToChanMap: map[string]provisioners.ChannelReference{},
},
{
- name: "same config",
- oldConfig: &multichannelfanout.Config{},
- newConfig: &multichannelfanout.Config{},
+ name: "same config",
+ oldConfig: &multichannelfanout.Config{},
+ newConfig: &multichannelfanout.Config{},
+ oldHostToChanMap: map[string]provisioners.ChannelReference{},
+ newHostToChanMap: map[string]provisioners.ChannelReference{},
},
{
name: "config with no subscription",
@@ -208,9 +214,14 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
{
Namespace: "default",
Name: "test-channel",
+ HostName: "a.b.c.d",
},
},
},
+ oldHostToChanMap: map[string]provisioners.ChannelReference{},
+ newHostToChanMap: map[string]provisioners.ChannelReference{
+ "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"},
+ },
},
{
name: "single channel w/ new subscriptions",
@@ -220,6 +231,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
{
Namespace: "default",
Name: "test-channel",
+ HostName: "a.b.c.d",
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -235,7 +247,11 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
},
},
},
- subscribes: []string{"subscription-1", "subscription-2"},
+ subscribes: []string{"subscription-1", "subscription-2"},
+ oldHostToChanMap: map[string]provisioners.ChannelReference{},
+ newHostToChanMap: map[string]provisioners.ChannelReference{
+ "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"},
+ },
},
{
name: "single channel w/ existing subscriptions",
@@ -244,6 +260,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
{
Namespace: "default",
Name: "test-channel",
+ HostName: "a.b.c.d",
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -260,6 +277,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
{
Namespace: "default",
Name: "test-channel",
+ HostName: "a.b.c.d",
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -277,6 +295,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
},
subscribes: []string{"subscription-2", "subscription-3"},
unsubscribes: []string{"subscription-1"},
+ oldHostToChanMap: map[string]provisioners.ChannelReference{
+ "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"},
+ },
+ newHostToChanMap: map[string]provisioners.ChannelReference{
+ "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"},
+ },
},
{
name: "multi channel w/old and new subscriptions",
@@ -285,6 +309,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
{
Namespace: "default",
Name: "test-channel-1",
+ HostName: "a.b.c.d",
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -302,6 +327,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
{
Namespace: "default",
Name: "test-channel-1",
+ HostName: "a.b.c.d",
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -314,6 +340,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
{
Namespace: "default",
Name: "test-channel-2",
+ HostName: "e.f.g.h",
FanoutConfig: fanout.Config{
Subscriptions: []eventingduck.ChannelSubscriberSpec{
{
@@ -331,6 +358,33 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
},
subscribes: []string{"subscription-1", "subscription-3", "subscription-4"},
unsubscribes: []string{"subscription-2"},
+ oldHostToChanMap: map[string]provisioners.ChannelReference{
+ "a.b.c.d": provisioners.ChannelReference{Name: "test-channel-1", Namespace: "default"},
+ },
+ newHostToChanMap: map[string]provisioners.ChannelReference{
+ "a.b.c.d": provisioners.ChannelReference{Name: "test-channel-1", Namespace: "default"},
+ "e.f.g.h": provisioners.ChannelReference{Name: "test-channel-2", Namespace: "default"},
+ },
+ },
+ {
+ name: "Duplicate hostnames",
+ oldConfig: &multichannelfanout.Config{},
+ newConfig: &multichannelfanout.Config{
+ ChannelConfigs: []multichannelfanout.ChannelConfig{
+ {
+ Namespace: "default",
+ Name: "test-channel-1",
+ HostName: "a.b.c.d",
+ },
+ {
+ Namespace: "default",
+ Name: "test-channel-2",
+ HostName: "a.b.c.d",
+ },
+ },
+ },
+ createErr: "duplicate hostName found. Each channel must have a unique host header. HostName:a.b.c.d, channel:default.test-channel-2, channel:default.test-channel-1",
+ oldHostToChanMap: map[string]provisioners.ChannelReference{},
},
}
@@ -344,10 +398,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
logger: zap.NewNop(),
}
d.setConfig(&multichannelfanout.Config{})
+ d.setHostToChannelMap(map[string]provisioners.ChannelReference{})
// Initialize using oldConfig
err := d.UpdateConfig(tc.oldConfig)
if err != nil {
+
t.Errorf("unexpected error: %v", err)
}
oldSubscribers := sets.NewString()
@@ -359,6 +415,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
if diff := sets.NewString(tc.unsubscribes...).Difference(oldSubscribers); diff.Len() != 0 {
t.Errorf("subscriptions %+v were never subscribed", diff)
}
+ if diff := cmp.Diff(tc.oldConfig, d.getConfig()); diff != "" {
+ t.Errorf("unexpected config (-want, +got) = %v", diff)
+ }
+ if diff := cmp.Diff(tc.oldHostToChanMap, d.getHostToChannelMap()); diff != "" {
+ t.Errorf("unexpected hostToChannelMap (-want, +got) = %v", diff)
+ }
// Update with new config
err = d.UpdateConfig(tc.newConfig)
@@ -383,6 +445,12 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
if diff := cmp.Diff(tc.subscribes, newSubscribers, sortStrings); diff != "" {
t.Errorf("unexpected subscribers (-want, +got) = %v", diff)
}
+ if diff := cmp.Diff(tc.newHostToChanMap, d.getHostToChannelMap()); diff != "" {
+ t.Errorf("unexpected hostToChannelMap (-want, +got) = %v", diff)
+ }
+ if diff := cmp.Diff(tc.newConfig, d.getConfig()); diff != "" {
+ t.Errorf("unexpected config (-want, +got) = %v", diff)
+ }
})
}
@@ -604,9 +672,13 @@ func TestKafkaDispatcher_Start(t *testing.T) {
t.Errorf("Expected error want %s, got %s", "message receiver is not set", err)
}
- d.receiver = provisioners.NewMessageReceiver(func(channel provisioners.ChannelReference, message *provisioners.Message) error {
+ receiver, err := provisioners.NewMessageReceiver(func(channel provisioners.ChannelReference, message *provisioners.Message) error {
return nil
}, zap.NewNop().Sugar())
+ if err != nil {
+ t.Fatalf("Error creating new message receiver. Error:%s", err)
+ }
+ d.receiver = receiver
err = d.Start(make(chan struct{}))
if err == nil {
t.Errorf("Expected error want %s, got %s", "kafkaAsyncProducer is not set", err)
diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
index f9c8963a4f0..28a0b9ad7de 100644
--- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
+++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
@@ -71,7 +71,11 @@ func NewDispatcher(natssURL, clusterID string, logger *zap.Logger) (*Subscriptio
clusterID: clusterID,
subscriptions: make(map[provisioners.ChannelReference]map[subscriptionReference]*stan.Subscription),
}
- d.receiver = provisioners.NewMessageReceiver(createReceiverFunction(d, logger.Sugar()), logger.Sugar())
+ receiver, err := provisioners.NewMessageReceiver(createReceiverFunction(d, logger.Sugar()), logger.Sugar())
+ if err != nil {
+ return nil, err
+ }
+ d.receiver = receiver
return d, nil
}
diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go
index b9a77670ce8..26b77b362b1 100644
--- a/pkg/channelwatcher/channel_watcher.go
+++ b/pkg/channelwatcher/channel_watcher.go
@@ -1,23 +1,37 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
package channelwatcher
import (
"context"
- "k8s.io/apimachinery/pkg/types"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
-
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/sidecar/multichannelfanout"
+ "github.com/knative/eventing/pkg/sidecar/swappable"
"go.uber.org/zap"
+ "k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
-type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error
-
type reconciler struct {
client client.Client
logger *zap.Logger
@@ -34,6 +48,7 @@ func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error)
return reconcile.Result{}, nil
}
+// New creates a new instance of Channel Watcher that watches channels and calls the watchHandler on add, update, delete and generic event
func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc) error {
c, err := controller.New("ChannelWatcher", mgr, controller.Options{
Reconciler: &reconciler{
@@ -57,3 +72,42 @@ func New(mgr manager.Manager, logger *zap.Logger, watchHandler WatchHandlerFunc)
}
return nil
}
+
+// WatchHandlerFunc is called whenever an add, update, delete or generic event is triggered on a channel
+type WatchHandlerFunc func(context.Context, client.Client, types.NamespacedName) error
+
+// ShouldWatchFunc is called while returning list of channels.
+// Channels are included in the list if the return value is true.
+type ShouldWatchFunc func(ch *v1alpha1.Channel) bool
+
+// UpdateConfigWatchHandler is a special handler that
+// 1. Lists the channels for which shouldWatch returns true.
+// 2. Creates a multi-channel-fanout-config.
+// 3. Calls the updateConfig func with the new multi-channel-fanout-config.
+// This is used by dispatchers or receivers to update their configs by watching channels.
+func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc {
+ return func(ctx context.Context, c client.Client, _ types.NamespacedName) error {
+ channels, err := listAllChannels(ctx, c, shouldWatch)
+ if err != nil {
+ logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err))
+ return err
+ }
+ config := multichannelfanout.NewConfigFromChannels(channels)
+ return updateConfig(config)
+ }
+}
+
+// listAllChannels queries client and gets list of all channels for which shouldWatch returns true.
+func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) {
+ channels := make([]v1alpha1.Channel, 0)
+ cl := &v1alpha1.ChannelList{}
+ if err := c.List(ctx, &client.ListOptions{}, cl); err != nil {
+ return nil, err
+ }
+ for _, c := range cl.Items {
+ if c.Status.IsReady() && shouldWatch(&c) {
+ channels = append(channels, c)
+ }
+ }
+ return channels, nil
+}
diff --git a/pkg/channelwatcher/channel_watcher_test.go b/pkg/channelwatcher/channel_watcher_test.go
new file mode 100644
index 00000000000..5ff41cec642
--- /dev/null
+++ b/pkg/channelwatcher/channel_watcher_test.go
@@ -0,0 +1,189 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package channelwatcher
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
+ "github.com/knative/eventing/pkg/sidecar/fanout"
+ "github.com/knative/eventing/pkg/sidecar/multichannelfanout"
+ "github.com/knative/eventing/pkg/sidecar/swappable"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/kubernetes/scheme"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
+)
+
+func init() {
+ // Add types to scheme
+ _ = v1alpha1.AddToScheme(scheme.Scheme)
+}
+
+func TestUpdateConfigWatchHandler(t *testing.T) {
+ tests := []struct {
+ name string
+ channels []runtime.Object
+ clientListError error
+ updateConfigError error
+ expectedConfig *multichannelfanout.Config
+ }{
+ {
+ name: "Client list error",
+ clientListError: fmt.Errorf("Client list error"),
+ },
+ {
+ name: "update config error",
+ updateConfigError: fmt.Errorf("error updating config"),
+ expectedConfig: &multichannelfanout.Config{
+ ChannelConfigs: []multichannelfanout.ChannelConfig{},
+ },
+ },
+ {
+ name: "Successfully update config",
+ channels: []runtime.Object{
+ makeChannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))),
+ makeChannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))),
+ makeChannel("chan-3", "donotwatch", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))),
+ },
+ expectedConfig: &multichannelfanout.Config{
+ ChannelConfigs: []multichannelfanout.ChannelConfig{
+ {
+ Name: "chan-1",
+ Namespace: "ns-1",
+ HostName: "e.f.g.h",
+ FanoutConfig: fanout.Config{
+ Subscriptions: []eventingduck.ChannelSubscriberSpec{
+ makeSubscriber("sub1"),
+ makeSubscriber("sub2"),
+ },
+ },
+ }, {
+ Name: "chan-2",
+ Namespace: "ns-2",
+ HostName: "i.j.k.l",
+ FanoutConfig: fanout.Config{
+ Subscriptions: []eventingduck.ChannelSubscriberSpec{
+ makeSubscriber("sub3"),
+ makeSubscriber("sub4"),
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ actualConfig := ConfigHolder{}
+ watchHandler := UpdateConfigWatchHandler(updateConfigWrapper(&actualConfig, test.updateConfigError), shouldWatch)
+ mockClient := getClient(test.channels, getClientMocks(test.clientListError))
+
+ actualError := watchHandler(context.TODO(), mockClient, types.NamespacedName{})
+ if actualError != nil {
+ if test.clientListError != nil {
+ if diff := cmp.Diff(test.clientListError.Error(), actualError.Error()); diff != "" {
+ t.Fatalf("Unexpected difference (-want +got): %v", diff)
+ }
+ }
+ if test.updateConfigError != nil {
+ if diff := cmp.Diff(test.updateConfigError.Error(), actualError.Error()); diff != "" {
+ t.Fatalf("Unexpected difference (-want +got): %v", diff)
+ }
+ }
+ } else {
+ if test.clientListError != nil {
+ t.Fatalf("Want error %v \n Got nil", test.clientListError)
+ }
+ if test.updateConfigError != nil {
+ t.Fatalf("Want error %v \n Got nil", test.updateConfigError)
+ }
+ }
+ if diff := cmp.Diff(test.expectedConfig, actualConfig.config); diff != "" {
+ t.Fatalf("Unexpected difference (-want +got): %v", diff)
+ }
+ })
+ }
+}
+
+type ConfigHolder struct {
+ config *multichannelfanout.Config
+}
+
+func shouldWatch(c *v1alpha1.Channel) bool {
+ if c.Namespace == "donotwatch" {
+ return false
+ }
+ return true
+}
+func updateConfigWrapper(ch *ConfigHolder, returnError error) swappable.UpdateConfig {
+ return func(c *multichannelfanout.Config) error {
+ ch.config = c
+ return returnError
+ }
+}
+
+func getClient(objs []runtime.Object, mocks controllertesting.Mocks) *controllertesting.MockClient {
+ innerClient := fake.NewFakeClient(objs...)
+ return controllertesting.NewMockClient(innerClient, mocks)
+}
+
+func getClientMocks(listError error) controllertesting.Mocks {
+ if listError != nil {
+ return controllertesting.Mocks{
+ MockLists: []controllertesting.MockList{
+ func(_ client.Client, _ context.Context, _ *client.ListOptions, _ runtime.Object) (controllertesting.MockHandled, error) {
+ return controllertesting.Handled, listError
+ },
+ },
+ }
+ }
+ return controllertesting.Mocks{}
+}
+
+func makeChannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) *v1alpha1.Channel {
+ c := v1alpha1.Channel{
+ Spec: v1alpha1.ChannelSpec{
+ Subscribable: subscribable,
+ },
+ }
+ c.Name = name
+ c.Namespace = namespace
+ c.Status.InitializeConditions()
+ c.Status.MarkProvisioned()
+ c.Status.MarkProvisionerInstalled()
+ c.Status.SetAddress(hostname)
+ return &c
+}
+func makeSubscribable(subsriberSpec ...eventingduck.ChannelSubscriberSpec) *eventingduck.Subscribable {
+ return &eventingduck.Subscribable{
+ Subscribers: subsriberSpec,
+ }
+}
+
+func makeSubscriber(name string) eventingduck.ChannelSubscriberSpec {
+ return eventingduck.ChannelSubscriberSpec{
+ SubscriberURI: name + "-suburi",
+ ReplyURI: name + "-replyuri",
+ }
+}
diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go
index d31d490b139..69e1224e062 100644
--- a/pkg/provisioners/channel_util.go
+++ b/pkg/provisioners/channel_util.go
@@ -62,15 +62,17 @@ func AddFinalizer(o metav1.Object, finalizerName string) AddFinalizerResult {
return FinalizerAdded
}
+// RemoveFinalizer removes the finalizer(finalizerName) from the object(o) if the finalizer is present.
+// Returns: - FinalizerRemoved, if the finalizer was found and removed.
+// - FinalizerNotFound, if the finalizer was not found.
func RemoveFinalizer(o metav1.Object, finalizerName string) RemoveFinalizerResult {
- result := FinalizerNotFound
finalizers := sets.NewString(o.GetFinalizers()...)
if finalizers.Has(finalizerName) {
- result = FinalizerRemoved
finalizers.Delete(finalizerName)
o.SetFinalizers(finalizers.List())
+ return FinalizerRemoved
}
- return result
+ return FinalizerNotFound
}
// K8sServiceOption is a functional option that can modify the K8s Service in CreateK8sService
diff --git a/pkg/provisioners/message_receiver.go b/pkg/provisioners/message_receiver.go
index 3874fded80b..175c796762b 100644
--- a/pkg/provisioners/message_receiver.go
+++ b/pkg/provisioners/message_receiver.go
@@ -32,26 +32,47 @@ const (
MessageReceiverPort = 8080
)
-// Message receiver receives messages.
+// MessageReceiver receives messages.
type MessageReceiver struct {
- receiverFunc func(ChannelReference, *Message) error
- forwardHeaders sets.String
- forwardPrefixes []string
+ receiverFunc func(ChannelReference, *Message) error
+ forwardHeaders sets.String
+ forwardPrefixes []string
+ logger *zap.SugaredLogger
+ hostToChannelFunc ResolveChannelFromHostFunc
+}
+
+// ReceiverOptions provides functional options to MessageReceiver function.
+type ReceiverOptions func(*MessageReceiver) error
- logger *zap.SugaredLogger
+// ResolveChannelFromHostFunc function enables MessageReceiver to get the Channel Reference from incoming request HostHeader
+// before calling receiverFunc.
+type ResolveChannelFromHostFunc func(string) (ChannelReference, error)
+
+// ResolveChannelFromHostHeader is a ReceiverOption for NewMessageReceiver which enables the caller to overwrite the
+// default behaviour defined by ParseChannel function.
+func ResolveChannelFromHostHeader(hostToChannelFunc ResolveChannelFromHostFunc) ReceiverOptions {
+ return func(r *MessageReceiver) error {
+ r.hostToChannelFunc = hostToChannelFunc
+ return nil
+ }
}
// NewMessageReceiver creates a message receiver passing new messages to the
// receiverFunc.
-func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger) *MessageReceiver {
+func NewMessageReceiver(receiverFunc func(ChannelReference, *Message) error, logger *zap.SugaredLogger, opts ...ReceiverOptions) (*MessageReceiver, error) {
receiver := &MessageReceiver{
- receiverFunc: receiverFunc,
- forwardHeaders: sets.NewString(forwardHeaders...),
- forwardPrefixes: forwardPrefixes,
-
- logger: logger,
+ receiverFunc: receiverFunc,
+ forwardHeaders: sets.NewString(forwardHeaders...),
+ forwardPrefixes: forwardPrefixes,
+ hostToChannelFunc: ResolveChannelFromHostFunc(ParseChannel),
+ logger: logger,
+ }
+ for _, opt := range opts {
+ if err := opt(receiver); err != nil {
+ return nil, err
+ }
}
- return receiver
+ return receiver, nil
}
// Start begings to receive messages for the receiver.
@@ -116,13 +137,13 @@ func (r *MessageReceiver) handler() http.Handler {
func (r *MessageReceiver) HandleRequest(res http.ResponseWriter, req *http.Request) {
host := req.Host
r.logger.Infof("Received request for %s", host)
- channel, err := ParseChannel(host)
+ channel, err := r.hostToChannelFunc(host)
if err != nil {
r.logger.Info("Could not extract channel", zap.Error(err))
res.WriteHeader(http.StatusInternalServerError)
return
}
-
+ r.logger.Infof("Request mapped to channel: %s", channel.String())
message, err := r.fromRequest(req)
if err != nil {
res.WriteHeader(http.StatusInternalServerError)
diff --git a/pkg/provisioners/message_receiver_test.go b/pkg/provisioners/message_receiver_test.go
index 8cd8ca9bfbe..e4ec33a718a 100644
--- a/pkg/provisioners/message_receiver_test.go
+++ b/pkg/provisioners/message_receiver_test.go
@@ -126,7 +126,10 @@ func TestMessageReceiver_HandleRequest(t *testing.T) {
}
f := tc.receiverFunc
- r := NewMessageReceiver(f, zap.NewNop().Sugar())
+ r, err := NewMessageReceiver(f, zap.NewNop().Sugar())
+ if err != nil {
+ t.Fatalf("Error creating new message receiver. Error:%s", err)
+ }
h := r.handler()
body := tc.bodyReader
diff --git a/pkg/sidecar/fanout/fanout_handler.go b/pkg/sidecar/fanout/fanout_handler.go
index 2fd4ae97f9a..bd67bfe32c8 100644
--- a/pkg/sidecar/fanout/fanout_handler.go
+++ b/pkg/sidecar/fanout/fanout_handler.go
@@ -69,7 +69,7 @@ type forwardMessage struct {
}
// NewHandler creates a new fanout.Handler.
-func NewHandler(logger *zap.Logger, config Config) *Handler {
+func NewHandler(logger *zap.Logger, config Config) (*Handler, error) {
handler := &Handler{
logger: logger,
config: config,
@@ -79,9 +79,12 @@ func NewHandler(logger *zap.Logger, config Config) *Handler {
}
// The receiver function needs to point back at the handler itself, so set it up after
// initialization.
- handler.receiver = provisioners.NewMessageReceiver(createReceiverFunction(handler), logger.Sugar())
-
- return handler
+ receiver, err := provisioners.NewMessageReceiver(createReceiverFunction(handler), logger.Sugar())
+ if err != nil {
+ return nil, err
+ }
+ handler.receiver = receiver
+ return handler, nil
}
func createReceiverFunction(f *Handler) func(provisioners.ChannelReference, *provisioners.Message) error {
diff --git a/pkg/sidecar/fanout/fanout_handler_test.go b/pkg/sidecar/fanout/fanout_handler_test.go
index 03b756ca8d9..95e4752b1c6 100644
--- a/pkg/sidecar/fanout/fanout_handler_test.go
+++ b/pkg/sidecar/fanout/fanout_handler_test.go
@@ -225,12 +225,19 @@ func TestFanoutHandler_ServeHTTP(t *testing.T) {
subs = append(subs, sub)
}
- h := NewHandler(zap.NewNop(), Config{Subscriptions: subs})
+ h, err := NewHandler(zap.NewNop(), Config{Subscriptions: subs})
+ if err != nil {
+ t.Fatalf("NewHandler failed. Error:%s", err)
+ }
if tc.asyncHandler {
h.config.AsyncHandler = true
}
if tc.receiverFunc != nil {
- h.receiver = provisioners.NewMessageReceiver(tc.receiverFunc, zap.NewNop().Sugar())
+ receiver, err := provisioners.NewMessageReceiver(tc.receiverFunc, zap.NewNop().Sugar())
+ if err != nil {
+ t.Fatalf("NewMessageReceiver failed. Error:%s", err)
+ }
+ h.receiver = receiver
}
if tc.timeout != 0 {
h.timeout = tc.timeout
diff --git a/pkg/sidecar/multichannelfanout/config.go b/pkg/sidecar/multichannelfanout/config.go
new file mode 100644
index 00000000000..77f97a2e807
--- /dev/null
+++ b/pkg/sidecar/multichannelfanout/config.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package multichannelfanout
+
+import (
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/sidecar/fanout"
+)
+
+// Config for a multichannelfanout.Handler.
+type Config struct {
+ // The configuration of each channel in this handler.
+ ChannelConfigs []ChannelConfig `json:"channelConfigs"`
+}
+
+// ChannelConfig is the configuration for a single Channel.
+type ChannelConfig struct {
+ Namespace string `json:"namespace"`
+ Name string `json:"name"`
+ HostName string `json:"hostname"`
+ FanoutConfig fanout.Config `json:"fanoutConfig"`
+}
+
+// NewConfigFromChannels creates a new Config from the list of channels.
+func NewConfigFromChannels(channels []v1alpha1.Channel) *Config {
+ cc := make([]ChannelConfig, 0)
+ for _, c := range channels {
+ channelConfig := ChannelConfig{
+ Namespace: c.Namespace,
+ Name: c.Name,
+ HostName: c.Status.Address.Hostname,
+ }
+ if c.Spec.Subscribable != nil {
+ channelConfig.FanoutConfig = fanout.Config{
+ Subscriptions: c.Spec.Subscribable.Subscribers,
+ }
+ }
+ cc = append(cc, channelConfig)
+ }
+ return &Config{
+ ChannelConfigs: cc,
+ }
+}
diff --git a/pkg/sidecar/multichannelfanout/config_test.go b/pkg/sidecar/multichannelfanout/config_test.go
new file mode 100644
index 00000000000..e27048c4f31
--- /dev/null
+++ b/pkg/sidecar/multichannelfanout/config_test.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package multichannelfanout
+
+import (
+ "testing"
+
+ "github.com/knative/eventing/pkg/sidecar/fanout"
+
+ "github.com/google/go-cmp/cmp"
+ eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+)
+
+func TestNewConfigFromChannels(t *testing.T) {
+ tests := []struct {
+ name string
+ channels []v1alpha1.Channel
+ expected *Config
+ }{
+ {
+ name: "empty channels list",
+ channels: []v1alpha1.Channel{},
+ expected: &Config{
+ ChannelConfigs: []ChannelConfig{},
+ },
+ }, {
+ name: "one channel with no subscribers",
+ channels: []v1alpha1.Channel{
+ makeChannel("chan-1", "ns-1", "a.b.c.d", nil),
+ },
+ expected: &Config{
+ ChannelConfigs: []ChannelConfig{
+ {
+ Name: "chan-1",
+ Namespace: "ns-1",
+ HostName: "a.b.c.d",
+ },
+ },
+ },
+ }, {
+ name: "multiple channels with subscribers",
+ channels: []v1alpha1.Channel{
+ makeChannel("chan-1", "ns-1", "e.f.g.h", makeSubscribable(makeSubscriber("sub1"), makeSubscriber("sub2"))),
+ makeChannel("chan-2", "ns-2", "i.j.k.l", makeSubscribable(makeSubscriber("sub3"), makeSubscriber("sub4"))),
+ },
+ expected: &Config{
+ ChannelConfigs: []ChannelConfig{
+ {
+ Name: "chan-1",
+ Namespace: "ns-1",
+ HostName: "e.f.g.h",
+ FanoutConfig: fanout.Config{
+ Subscriptions: []eventingduck.ChannelSubscriberSpec{
+ makeSubscriber("sub1"),
+ makeSubscriber("sub2"),
+ },
+ },
+ }, {
+ Name: "chan-2",
+ Namespace: "ns-2",
+ HostName: "i.j.k.l",
+ FanoutConfig: fanout.Config{
+ Subscriptions: []eventingduck.ChannelSubscriberSpec{
+ makeSubscriber("sub3"),
+ makeSubscriber("sub4"),
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ actual := NewConfigFromChannels(test.channels)
+ if diff := cmp.Diff(test.expected, actual); diff != "" {
+ t.Fatalf("Unexpected difference (-want +got): %v", diff)
+ }
+ })
+ }
+}
+
+func makeChannel(name string, namespace string, hostname string, subscribable *eventingduck.Subscribable) v1alpha1.Channel {
+ c := v1alpha1.Channel{
+ Spec: v1alpha1.ChannelSpec{
+ Subscribable: subscribable,
+ },
+ Status: v1alpha1.ChannelStatus{
+ Address: duckv1alpha1.Addressable{
+ Hostname: hostname,
+ },
+ },
+ }
+ c.Name = name
+ c.Namespace = namespace
+ return c
+}
+func makeSubscribable(subsriberSpec ...eventingduck.ChannelSubscriberSpec) *eventingduck.Subscribable {
+ return &eventingduck.Subscribable{
+ Subscribers: subsriberSpec,
+ }
+}
+
+func makeSubscriber(name string) eventingduck.ChannelSubscriberSpec {
+ return eventingduck.ChannelSubscriberSpec{
+ SubscriberURI: name + "-suburi",
+ ReplyURI: name + "-replyuri",
+ }
+}
diff --git a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go
index 282a1c0985d..c14cd53725c 100644
--- a/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go
+++ b/pkg/sidecar/multichannelfanout/multi_channel_fanout_handler.go
@@ -34,20 +34,6 @@ import (
"go.uber.org/zap"
)
-// Config for a multichannelfanout.Handler.
-type Config struct {
- // The configuration of each channel in this handler.
- ChannelConfigs []ChannelConfig `json:"channelConfigs"`
-}
-
-// ChannelConfig is the configuration for a single Channel.
-type ChannelConfig struct {
- Namespace string `json:"namespace"`
- Name string `json:"name"`
- HostName string `json:"hostname"`
- FanoutConfig fanout.Config `json:"fanoutConfig"`
-}
-
// makeChannelKeyFromConfig creates the channel key for a given channelConfig. It is a helper around
// MakeChannelKey.
func makeChannelKeyFromConfig(config ChannelConfig) string {
@@ -74,7 +60,11 @@ func NewHandler(logger *zap.Logger, conf Config) (*Handler, error) {
for _, cc := range conf.ChannelConfigs {
key := makeChannelKeyFromConfig(cc)
- handler := fanout.NewHandler(logger, cc.FanoutConfig)
+ handler, err := fanout.NewHandler(logger, cc.FanoutConfig)
+ if err != nil {
+ logger.Error("Failed creating new fanout handler.", zap.Error(err))
+ return nil, err
+ }
if _, present := handlers[key]; present {
logger.Error("Duplicate channel key", zap.String("channelKey", key))
return nil, fmt.Errorf("duplicate channel key: %v", key)
From 0f60da6348c2d96e62bf9ba24ab45025b34855e4 Mon Sep 17 00:00:00 2001
From: Chi Zhang
Date: Fri, 19 Apr 2019 20:31:27 -0700
Subject: [PATCH 37/76] add more logs, try to find the reason for test
flakiness (#1075)
---
test/e2e/broker_trigger_test.go | 1 +
test/e2e/channel_chain_test.go | 1 +
test/e2e/e2e.go | 10 ++++++++++
test/e2e/event_transformation_test.go | 4 +---
test/e2e/single_event_test.go | 9 +++------
5 files changed, 16 insertions(+), 9 deletions(-)
diff --git a/test/e2e/broker_trigger_test.go b/test/e2e/broker_trigger_test.go
index 1ced4b166d6..862f39a828b 100644
--- a/test/e2e/broker_trigger_test.go
+++ b/test/e2e/broker_trigger_test.go
@@ -218,6 +218,7 @@ func TestDefaultBrokerWithManyTriggers(t *testing.T) {
subscriberContainerName := subscriberPod.Spec.Containers[0].Name
t.Logf("Dumper %q expecting %q", subscriberPodName, strings.Join(expectedEvents[subscriberPodName], ","))
if err := WaitForLogContents(clients, t.Logf, subscriberPodName, subscriberContainerName, ns, expectedEvents[subscriberPodName]); err != nil {
+ logPodLogsForDebugging(clients, subscriberPodName, subscriberContainerName, ns, t.Logf)
t.Fatalf("Event(s) not found in logs of subscriber pod %q: %v", subscriberPodName, err)
}
// At this point all the events should have been received in the pod.
diff --git a/test/e2e/channel_chain_test.go b/test/e2e/channel_chain_test.go
index c914f987c61..61b922d7f03 100644
--- a/test/e2e/channel_chain_test.go
+++ b/test/e2e/channel_chain_test.go
@@ -98,6 +98,7 @@ func TestChannelChain(t *testing.T) {
// check if the logging service receives the correct number of event messages
expectedContentCount := len(subscriptionNames1) * len(subscriptionNames2)
if err := WaitForLogContentCount(clients, loggerPodName, loggerPod.Spec.Containers[0].Name, ns, body, expectedContentCount); err != nil {
+ logPodLogsForDebugging(clients, loggerPodName, loggerPod.Spec.Containers[0].Name, ns, t.Logf)
t.Fatalf("String %q does not appear %d times in logs of logger pod %q: %v", body, expectedContentCount, loggerPodName, err)
}
}
diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go
index 53cbf7f7db1..142583ff0f9 100644
--- a/test/e2e/e2e.go
+++ b/test/e2e/e2e.go
@@ -489,3 +489,13 @@ func DeleteNameSpace(clients *test.Clients, namespace string) error {
}
return err
}
+
+// logPodLogsForDebugging add the pod logs in the testing log for further debugging.
+func logPodLogsForDebugging(clients *test.Clients, podName, containerName, namespace string, logf logging.FormatLogger) {
+ logs, err := clients.Kube.PodLogs(podName, containerName, namespace)
+ if err != nil {
+ logf("Failed to get the logs for container %q of the pod %q in namespace %q: %v", containerName, podName, namespace, err)
+ } else {
+ logf("Logs for the container %q of the pod % in namespace %q:\n%s", containerName, podName, namespace, string(logs))
+ }
+}
diff --git a/test/e2e/event_transformation_test.go b/test/e2e/event_transformation_test.go
index b3ce832873f..7d5b1c87fd4 100644
--- a/test/e2e/event_transformation_test.go
+++ b/test/e2e/event_transformation_test.go
@@ -120,9 +120,7 @@ func TestEventTransformation(t *testing.T) {
podName := loggerPod.Name
containerName := loggerPod.Spec.Containers[0].Name
if err := WaitForLogContentCount(clients, podName, containerName, ns, expectedContent, expectedContentCount); err != nil {
- if logs, err := clients.Kube.PodLogs(podName, containerName, ns); err != nil {
- t.Logf("Log content: %s\n", string(logs))
- }
+ logPodLogsForDebugging(clients, podName, containerName, ns, t.Logf)
t.Fatalf("String %q does not appear %d times in logs of logger pod %q: %v", expectedContent, expectedContentCount, loggerPod.Name, err)
}
}
diff --git a/test/e2e/single_event_test.go b/test/e2e/single_event_test.go
index ddc80207076..c6158cee871 100644
--- a/test/e2e/single_event_test.go
+++ b/test/e2e/single_event_test.go
@@ -83,12 +83,9 @@ func singleEvent(t *testing.T, encoding string) {
}
if err := pkgTest.WaitForLogContent(clients.Kube, loggerPodName, loggerPod.Spec.Containers[0].Name, ns, body); err != nil {
- if logs, err := clients.Kube.PodLogs(senderName, "sendevent", ns); err != nil {
- t.Logf("Logs for sendevent container of the sender pod:\n %s", string(logs))
- }
- if logs, err := clients.Kube.PodLogs(senderName, "istio-proxy", ns); err != nil {
- t.Logf("Logs for istio-proxy container of the sender pod:\n %s", string(logs))
- }
+ logPodLogsForDebugging(clients, loggerPodName, loggerPod.Spec.Containers[0].Name, ns, t.Logf)
+ logPodLogsForDebugging(clients, senderName, "sendevent", ns, t.Logf)
+ logPodLogsForDebugging(clients, senderName, "istio-proxy", ns, t.Logf)
t.Fatalf("String %q not found in logs of logger pod %q: %v", body, loggerPodName, err)
}
}
From c935a11512a1ec8e70d1321487af5653e1d0849c Mon Sep 17 00:00:00 2001
From: Scott Nichols <32305648+n3wscott@users.noreply.github.com>
Date: Mon, 22 Apr 2019 14:02:28 -0700
Subject: [PATCH 38/76] Fix imports for sub test. (#1078)
---
.../subscription/subscription_test.go | 425 +++++++++---------
1 file changed, 213 insertions(+), 212 deletions(-)
diff --git a/pkg/reconciler/subscription/subscription_test.go b/pkg/reconciler/subscription/subscription_test.go
index fe9f0441a88..84bf5e5255d 100644
--- a/pkg/reconciler/subscription/subscription_test.go
+++ b/pkg/reconciler/subscription/subscription_test.go
@@ -19,7 +19,6 @@ package subscription
import (
"encoding/json"
"fmt"
- testing2 "github.com/knative/eventing/pkg/reconciler/testing"
"testing"
corev1 "k8s.io/api/core/v1"
@@ -39,6 +38,8 @@ import (
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
"github.com/knative/pkg/controller"
logtesting "github.com/knative/pkg/logging/testing"
+
+ . "github.com/knative/eventing/pkg/reconciler/testing"
. "github.com/knative/pkg/reconciler/testing"
)
@@ -115,14 +116,14 @@ func TestAllCases(t *testing.T) {
}, {
Name: "subscription, but subscriber is not addressable",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
),
- testing2.NewUnstructured(subscriberGVK, subscriberName, testNS),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewUnstructured(subscriberGVK, subscriberName, testNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
},
Key: testNS + "/" + subscriptionName,
@@ -131,23 +132,23 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeWarning, "SubscriberResolveFailed", "Failed to resolve spec.subscriber: status does not contain address"),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
+ WithInitSubscriptionConditions,
),
}},
}, {
Name: "subscription, but subscriber does not exist",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
},
Key: testNS + "/" + subscriptionName,
@@ -156,26 +157,26 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeWarning, "SubscriberResolveFailed", "Failed to resolve spec.subscriber: subscribers.testing.eventing.knative.dev %q not found", subscriberName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
+ WithInitSubscriptionConditions,
),
}},
}, {
Name: "subscription, reply does not exist",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithSubscriptionReply(channelGVK, replyName),
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithSubscriptionReply(channelGVK, replyName),
),
- testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
- testing2.WithUnstructuredAddressable(subscriberDNS)),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewUnstructured(subscriberGVK, subscriberName, testNS,
+ WithUnstructuredAddressable(subscriberDNS)),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
},
Key: testNS + "/" + subscriptionName,
@@ -184,31 +185,31 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeWarning, "ResultResolveFailed", "Failed to resolve spec.reply: channels.eventing.knative.dev %q not found", replyName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithSubscriptionReply(channelGVK, replyName),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithSubscriptionReply(channelGVK, replyName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ WithInitSubscriptionConditions,
+ WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
),
}},
}, {
Name: "subscription, reply is not addressable",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithSubscriptionReply(subscriberGVK, replyName), // reply will be a subscriberGVK for this test
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithSubscriptionReply(subscriberGVK, replyName), // reply will be a subscriberGVK for this test
),
- testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
- testing2.WithUnstructuredAddressable(subscriberDNS),
+ NewUnstructured(subscriberGVK, subscriberName, testNS,
+ WithUnstructuredAddressable(subscriberDNS),
),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
- testing2.NewUnstructured(subscriberGVK, replyName, testNS),
+ NewUnstructured(subscriberGVK, replyName, testNS),
},
Key: testNS + "/" + subscriptionName,
WantErr: true,
@@ -217,28 +218,28 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeWarning, "SubscriptionUpdateStatusFailed", "Failed to update Subscription's status: status does not contain address"), // TODO: BUGBUG THIS IS WEIRD
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithSubscriptionReply(subscriberGVK, replyName),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithSubscriptionReply(subscriberGVK, replyName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ WithInitSubscriptionConditions,
+ WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
),
}},
}, {
Name: "subscription, valid channel+subscriber",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
),
- testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
- testing2.WithUnstructuredAddressable(subscriberDNS),
+ NewUnstructured(subscriberGVK, subscriberName, testNS,
+ WithUnstructuredAddressable(subscriberDNS),
),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
},
Key: testNS + "/" + subscriptionName,
@@ -247,13 +248,13 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
),
}},
WantPatches: []clientgotesting.PatchActionImpl{
@@ -265,17 +266,17 @@ func TestAllCases(t *testing.T) {
}, {
Name: "subscription, valid channel+reply",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionReply(channelGVK, replyName),
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionReply(channelGVK, replyName),
),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
- testing2.NewChannel(replyName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(replyDNS),
+ NewChannel(replyName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(replyDNS),
),
},
Key: testNS + "/" + subscriptionName,
@@ -284,13 +285,13 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionReply(channelGVK, replyName),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionReply(channelGVK, replyName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionReply(replyURI),
),
}},
WantPatches: []clientgotesting.PatchActionImpl{
@@ -302,21 +303,21 @@ func TestAllCases(t *testing.T) {
}, {
Name: "subscription, valid channel+subscriber+reply",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithSubscriptionReply(channelGVK, replyName),
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithSubscriptionReply(channelGVK, replyName),
),
- testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
- testing2.WithUnstructuredAddressable(subscriberDNS),
+ NewUnstructured(subscriberGVK, subscriberName, testNS,
+ WithUnstructuredAddressable(subscriberDNS),
),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
- testing2.NewChannel(replyName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(replyDNS),
+ NewChannel(replyName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(replyDNS),
),
},
Key: testNS + "/" + subscriptionName,
@@ -325,15 +326,15 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithSubscriptionReply(channelGVK, replyName),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithSubscriptionReply(channelGVK, replyName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
- testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ WithSubscriptionPhysicalSubscriptionReply(replyURI),
),
}},
WantPatches: []clientgotesting.PatchActionImpl{
@@ -345,21 +346,21 @@ func TestAllCases(t *testing.T) {
}, {
Name: "subscription, valid remove reply",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
- testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
- ),
- testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
- testing2.WithUnstructuredAddressable(subscriberDNS),
- ),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
- testing2.WithChannelSubscribers([]v1alpha1.ChannelSubscriberSpec{
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ ),
+ NewUnstructured(subscriberGVK, subscriberName, testNS,
+ WithUnstructuredAddressable(subscriberDNS),
+ ),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
+ WithChannelSubscribers([]v1alpha1.ChannelSubscriberSpec{
{UID: subscriptionUID, SubscriberURI: subscriberURI, ReplyURI: replyURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
}),
),
@@ -370,12 +371,12 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
),
}},
WantPatches: []clientgotesting.PatchActionImpl{
@@ -387,24 +388,24 @@ func TestAllCases(t *testing.T) {
}, {
Name: "subscription, valid remove subscriber",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithInitSubscriptionConditions,
- testing2.WithSubscriptionReply(channelGVK, replyName),
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
- testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
- ),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
- testing2.WithChannelSubscribers([]v1alpha1.ChannelSubscriberSpec{
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithInitSubscriptionConditions,
+ WithSubscriptionReply(channelGVK, replyName),
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
+ WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ ),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
+ WithChannelSubscribers([]v1alpha1.ChannelSubscriberSpec{
{UID: subscriptionUID, SubscriberURI: subscriberURI, ReplyURI: replyURI, DeprecatedRef: &corev1.ObjectReference{Name: subscriptionName, Namespace: testNS, UID: subscriptionUID}},
}),
),
- testing2.NewChannel(replyName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(replyDNS),
+ NewChannel(replyName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(replyDNS),
),
},
Key: testNS + "/" + subscriptionName,
@@ -413,12 +414,12 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionReply(channelGVK, replyName),
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionReply(replyURI),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionReply(channelGVK, replyName),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionReply(replyURI),
),
}},
WantPatches: []clientgotesting.PatchActionImpl{
@@ -430,13 +431,13 @@ func TestAllCases(t *testing.T) {
}, {
Name: "subscription, channel+subscriber as service, does not exist",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(serviceGVK, serviceName),
),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
},
Key: testNS + "/" + subscriptionName,
@@ -445,25 +446,25 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeWarning, "SubscriberResolveFailed", "Failed to resolve spec.subscriber: services %q not found", serviceName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(serviceGVK, serviceName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
+ WithInitSubscriptionConditions,
),
}},
}, {
Name: "subscription, valid channel+subscriber as service",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(serviceGVK, serviceName),
),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
- testing2.NewService(serviceName, testNS),
+ NewService(serviceName, testNS),
},
Key: testNS + "/" + subscriptionName,
WantErr: false,
@@ -471,13 +472,13 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(serviceGVK, serviceName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
),
}},
WantPatches: []clientgotesting.PatchActionImpl{
@@ -489,23 +490,23 @@ func TestAllCases(t *testing.T) {
}, {
Name: "subscription, two subscribers for a channel",
Objects: []runtime.Object{
- testing2.NewSubscription("a_"+subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ NewSubscription("a_"+subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(serviceGVK, serviceName),
),
// an already rec'ed subscription
- testing2.NewSubscription("b_"+subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ NewSubscription("b_"+subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
- testing2.NewService(serviceName, testNS),
+ NewService(serviceName, testNS),
},
Key: testNS + "/" + "a_" + subscriptionName,
WantErr: false,
@@ -513,13 +514,13 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", "a_"+subscriptionName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription("a_"+subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(serviceGVK, serviceName),
+ Object: NewSubscription("a_"+subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(serviceGVK, serviceName),
// The first reconciliation will initialize the status conditions.
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
),
}},
WantPatches: []clientgotesting.PatchActionImpl{
@@ -532,26 +533,26 @@ func TestAllCases(t *testing.T) {
}, {
Name: "subscription deleted",
Objects: []runtime.Object{
- testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithSubscriptionReply(channelGVK, replyName),
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionFinalizers(finalizerName),
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
- testing2.WithSubscriptionDeleted,
+ NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithSubscriptionReply(channelGVK, replyName),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionFinalizers(finalizerName),
+ WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ WithSubscriptionDeleted,
),
- testing2.NewUnstructured(subscriberGVK, subscriberName, testNS,
- testing2.WithUnstructuredAddressable(subscriberDNS),
+ NewUnstructured(subscriberGVK, subscriberName, testNS,
+ WithUnstructuredAddressable(subscriberDNS),
),
- testing2.NewChannel(channelName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(channelDNS),
+ NewChannel(channelName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(channelDNS),
),
- testing2.NewChannel(replyName, testNS,
- testing2.WithInitChannelConditions,
- testing2.WithChannelAddress(replyDNS),
+ NewChannel(replyName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(replyDNS),
),
},
Key: testNS + "/" + subscriptionName,
@@ -560,14 +561,14 @@ func TestAllCases(t *testing.T) {
Eventf(corev1.EventTypeNormal, "SubscriptionReconciled", "Subscription reconciled: %q", subscriptionName),
},
WantUpdates: []clientgotesting.UpdateActionImpl{{
- Object: testing2.NewSubscription(subscriptionName, testNS,
- testing2.WithSubscriptionChannel(channelGVK, channelName),
- testing2.WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
- testing2.WithSubscriptionReply(channelGVK, replyName),
- testing2.WithInitSubscriptionConditions,
- testing2.MarkSubscriptionReady,
- testing2.WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
- testing2.WithSubscriptionDeleted,
+ Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionChannel(channelGVK, channelName),
+ WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
+ WithSubscriptionReply(channelGVK, replyName),
+ WithInitSubscriptionConditions,
+ MarkSubscriptionReady,
+ WithSubscriptionPhysicalSubscriptionSubscriber(serviceURI),
+ WithSubscriptionDeleted,
),
}},
WantPatches: []clientgotesting.PatchActionImpl{
@@ -577,7 +578,7 @@ func TestAllCases(t *testing.T) {
}
defer logtesting.ClearAll()
- table.Test(t, testing2.MakeFactory(func(listers *testing2.Listers, opt reconciler.Options) controller.Reconciler {
+ table.Test(t, MakeFactory(func(listers *Listers, opt reconciler.Options) controller.Reconciler {
return &Reconciler{
Base: reconciler.NewBase(opt, controllerAgentName),
subscriptionLister: listers.GetSubscriptionLister(),
From de0edc16df1bdd343670bc41f0183618a2607f9e Mon Sep 17 00:00:00 2001
From: Matthias Wessendorf
Date: Tue, 23 Apr 2019 18:31:32 +0200
Subject: [PATCH 39/76] Fix webhook controller on openshift (#1086)
---
config/200-webhook-clusterrole.yaml | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/config/200-webhook-clusterrole.yaml b/config/200-webhook-clusterrole.yaml
index fdd974c4223..739eb63aa65 100644
--- a/config/200-webhook-clusterrole.yaml
+++ b/config/200-webhook-clusterrole.yaml
@@ -41,22 +41,23 @@ rules:
- "apps"
resources:
- "deployments"
- verbs:
- - "get"
+ - "deployments/finalizers"
+ verbs: &everything
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+
# For actually registering our webhook.
- apiGroups:
- "admissionregistration.k8s.io"
resources:
- "mutatingwebhookconfigurations"
- verbs:
- - "get"
- - "list"
- - "create"
- - "update"
- - "delete"
- - "patch"
- - "watch"
+ verbs: *everything
# Our own resources and statuses we care about.
- apiGroups:
From 9024f2b0c27327153ba4fd7ca2a8ca9999410e0c Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Tue, 23 Apr 2019 10:27:55 -0700
Subject: [PATCH 40/76] Add program caching
CEL programs are LRU cached with the expression string as key. Cache size
is fixed at 100 entries, but we may want to make this configurable.
If an error occurs while generating the program, that error is cached so
the invalid expression isn't parsed for every event.
---
pkg/broker/cel.go | 76 ++++++++++++++-----------
pkg/broker/program_cache.go | 109 ++++++++++++++++++++++++++++++++++++
2 files changed, 152 insertions(+), 33 deletions(-)
create mode 100644 pkg/broker/program_cache.go
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index 04bff962b8d..4e4db2a2a61 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -1,3 +1,19 @@
+/*
+ * Copyright 2019 The Knative Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package broker
import (
@@ -19,28 +35,9 @@ const (
)
func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *cloudevents.Event) (bool, error) {
- e, err := cel.NewEnv(
- cel.Declarations(
- decls.NewIdent(CELVarKeyContext, decls.Dyn, nil),
- decls.NewIdent(CELVarKeyData, decls.Dyn, nil),
- ),
- )
- if err != nil {
- return false, err
- }
+ expr := ts.Filter.CEL.Expression
- p, iss := e.Parse(ts.Filter.CEL.Expression)
- if iss != nil && iss.Err() != nil {
- return false, iss.Err()
- }
- c, iss := e.Check(p)
- if iss != nil && iss.Err() != nil {
- return false, iss.Err()
- }
-
- // TODO cache these by hash of expression. Programs are thread-safe so it's
- // ok to share them between triggers and events.
- prg, err := e.Program(c)
+ prg, err := getOrCacheProgram(expr, programForExpression)
if err != nil {
return false, err
}
@@ -77,8 +74,7 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
// a dynamic struct.
data := make(map[string]interface{})
if ts.Filter.CEL.ParseData {
- data, err = ceParsedData(event)
- if err != nil {
+ if err := event.DataAs(&data); err != nil {
r.logger.Error("Failed to parse event data for CEL filtering", zap.String("id", event.Context.AsV02().ID), zap.Error(err))
}
}
@@ -94,15 +90,29 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
return out == types.True, nil
}
-func ceParsedData(event *cloudevents.Event) (map[string]interface{}, error) {
- // TODO CloudEvents SDK might have a better way to do this with data codecs
- if event.DataMediaType() == "application/json" {
- var decodedData map[string]interface{}
- err := event.DataAs(&decodedData)
- if err != nil {
- return nil, err
- }
- return decodedData, nil
+func programForExpression(expr string) (cel.Program, error) {
+ env, err := cel.NewEnv(
+ cel.Declarations(
+ decls.NewIdent(CELVarKeyContext, decls.Dyn, nil),
+ decls.NewIdent(CELVarKeyData, decls.Dyn, nil),
+ ),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ parsed, iss := env.Parse(expr)
+ if iss != nil && iss.Err() != nil {
+ return nil, iss.Err()
+ }
+ checked, iss := env.Check(parsed)
+ if iss != nil && iss.Err() != nil {
+ return nil, iss.Err()
+ }
+
+ prg, err := env.Program(checked)
+ if err != nil {
+ return nil, err
}
- return nil, nil
+ return prg, nil
}
diff --git a/pkg/broker/program_cache.go b/pkg/broker/program_cache.go
new file mode 100644
index 00000000000..67120410698
--- /dev/null
+++ b/pkg/broker/program_cache.go
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2019 The Knative Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package broker
+
+import (
+ "sync"
+
+ "github.com/google/cel-go/cel"
+ "github.com/hashicorp/golang-lru"
+)
+
+const (
+ // programCacheSize is the default size of the CEL program cache.
+ programCacheSize = 100
+)
+
+var (
+ programCache programCacheInterface
+ cacheOnce sync.Once
+)
+
+type cacheEntry struct {
+ program cel.Program
+ err error
+}
+
+type programCacheInterface interface {
+ Add(string, cacheEntry) bool
+ Get(string) (cacheEntry, bool)
+}
+
+type fakeCache struct{}
+
+func (c *fakeCache) Add(_ string, _ cacheEntry) bool {
+ return false
+}
+
+func (c *fakeCache) Get(_ string) (cacheEntry, bool) {
+ return cacheEntry{}, false
+}
+
+type lruCache struct {
+ lru *lru.Cache
+}
+
+func newCache() programCacheInterface {
+ // Currently this can only return an error if size is <=0
+ lru, err := lru.New(programCacheSize)
+ if err != nil {
+ //TODO need a package-level logger here
+ return &fakeCache{}
+ }
+ return &lruCache{
+ lru: lru,
+ }
+}
+
+func (c *lruCache) Add(k string, e cacheEntry) bool {
+ return c.lru.Add(k, e)
+}
+
+func (c *lruCache) Get(k string) (cacheEntry, bool) {
+ res, exists := c.lru.Get(k)
+ if !exists {
+ return cacheEntry{}, false
+ }
+ entry, ok := res.(cacheEntry)
+ if !ok {
+ //TODO need a package-level logger here
+ return cacheEntry{}, false
+ }
+ return entry, ok
+}
+
+type cachePopulatingFunc func(string) (cel.Program, error)
+
+func getOrCacheProgram(expr string, pf cachePopulatingFunc) (cel.Program, error) {
+ cacheOnce.Do(func() {
+ programCache = newCache()
+ })
+
+ entry, ok := programCache.Get(expr)
+ if !ok {
+ prg, err := pf(expr)
+ // If there was an error, cache that error so we don't try to parse this
+ // program every time.
+ entry = cacheEntry{program: prg, err: err}
+ programCache.Add(expr, entry)
+ }
+
+ if entry.err != nil {
+ return nil, entry.err
+ }
+ return entry.program, nil
+}
From a7c5431f6433e733cf4738e6dd5128fb5ba5f1c8 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Tue, 23 Apr 2019 10:36:56 -0700
Subject: [PATCH 41/76] Clarify timestamp comment
---
pkg/broker/cel.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/broker/cel.go b/pkg/broker/cel.go
index 4e4db2a2a61..10966825b98 100644
--- a/pkg/broker/cel.go
+++ b/pkg/broker/cel.go
@@ -51,7 +51,7 @@ func (r *Receiver) filterEventByCEL(ts *eventingv1alpha1.TriggerSpec, event *clo
"source": event.Source(),
"subject": event.Subject(),
"id": event.ID(),
- // TODO Time. Should this be a string or a (cel-native) protobuf timestamp?
+ // TODO Time. This should be a protobuf Timestamp
"schemaurl": event.SchemaURL(),
"datacontenttype": event.DataContentType(),
"datamediatype": event.DataMediaType(),
From 7be0f6abfc7b6e9007f69c1a89cce96585206768 Mon Sep 17 00:00:00 2001
From: Adam Harwayne
Date: Tue, 23 Apr 2019 10:59:28 -0700
Subject: [PATCH 42/76] Update Trigger docs to match #1045. (#1055)
---
docs/broker/README.md | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/docs/broker/README.md b/docs/broker/README.md
index 335e132b21f..bc9db2011e4 100644
--- a/docs/broker/README.md
+++ b/docs/broker/README.md
@@ -327,11 +327,6 @@ it reconciles:
1. Determines the subscriber's URI.
- Currently uses the same logic as the `Subscription` Reconciler, so supports
Addressables and Kubernetes `Service`s.
-1. Creates a Kubernetes `Service` and Istio `VirtualService` pair. This allows
- all Istio enabled `Pod`s to send to the `Trigger`'s address.
- - This is the same as the current `Channel` implementation. The `Service`
- points nowhere. The `VirtualService` reroutes requests that originally went
- to the `Service`, to instead go to the `Broker`'s 'filter' `Service`.
-1. Creates `Subscription` from the `Broker`'s 'trigger' `Channel` to the
- `Trigger`'s Kubernetes `Service`. Replies are sent to the `Broker`'s
+1. Creates a `Subscription` from the `Broker`'s 'trigger' `Channel` to the
+ `Trigger`'s Kubernetes `Service` using the HTTP path `/triggers/{namespace}/{name}`. Replies are sent to the `Broker`'s
'ingress' `Channel`.
From dd4ce240ade48331fa964d6911af6a72b22929e2 Mon Sep 17 00:00:00 2001
From: Chi Zhang
Date: Tue, 23 Apr 2019 11:50:28 -0700
Subject: [PATCH 43/76] wait for the default ServiceAccount being created
before creating new pods (#1091)
---
test/e2e/e2e.go | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go
index 142583ff0f9..f559c3935e6 100644
--- a/test/e2e/e2e.go
+++ b/test/e2e/e2e.go
@@ -462,9 +462,27 @@ func CreateNamespaceIfNeeded(t *testing.T, clients *test.Clients, namespace stri
if err != nil {
t.Fatalf("Failed to create Namespace: %s; %v", namespace, err)
}
+
+ // https://github.com/kubernetes/kubernetes/issues/66689
+ // We can only start creating pods after the default ServiceAccount is created by the kube-controller-manager.
+ err = WaitForServiceAccountExists(t, clients, "default", namespace, logf)
+ if err != nil {
+ t.Fatalf("The default ServiceAccount was not created for the Namespace: %s", namespace)
+ }
}
}
+// WaitForServiceAccountExists waits until the ServiceAccount exists.
+func WaitForServiceAccountExists(t *testing.T, clients *test.Clients, name, namespace string, logf logging.FormatLogger) error {
+ return wait.PollImmediate(interval, timeout, func() (bool, error) {
+ sas := clients.Kube.Kube.CoreV1().ServiceAccounts(namespace)
+ if _, err := sas.Get(name, metav1.GetOptions{}); err == nil {
+ return true, nil
+ }
+ return false, nil
+ })
+}
+
// LabelNamespace labels the given namespace with the labels map.
func LabelNamespace(clients *test.Clients, namespace string, labels map[string]string, logf logging.FormatLogger) error {
nsSpec, err := clients.Kube.Kube.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
@@ -496,6 +514,6 @@ func logPodLogsForDebugging(clients *test.Clients, podName, containerName, names
if err != nil {
logf("Failed to get the logs for container %q of the pod %q in namespace %q: %v", containerName, podName, namespace, err)
} else {
- logf("Logs for the container %q of the pod % in namespace %q:\n%s", containerName, podName, namespace, string(logs))
+ logf("Logs for the container %q of the pod %q in namespace %q:\n%s", containerName, podName, namespace, string(logs))
}
}
From 6d6da06d6620c5c4edbed4fb7653c68eca40cb19 Mon Sep 17 00:00:00 2001
From: Scott Nichols <32305648+n3wscott@users.noreply.github.com>
Date: Tue, 23 Apr 2019 13:25:29 -0700
Subject: [PATCH 44/76] Update Boilerplate to be 2019. (#1088)
---
hack/boilerplate/boilerplate.go.txt | 2 +-
pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go | 2 +-
pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go | 2 +-
pkg/client/clientset/versioned/clientset.go | 2 +-
pkg/client/clientset/versioned/doc.go | 2 +-
pkg/client/clientset/versioned/fake/clientset_generated.go | 2 +-
pkg/client/clientset/versioned/fake/doc.go | 2 +-
pkg/client/clientset/versioned/fake/register.go | 2 +-
pkg/client/clientset/versioned/scheme/doc.go | 2 +-
pkg/client/clientset/versioned/scheme/register.go | 2 +-
.../clientset/versioned/typed/eventing/v1alpha1/broker.go | 2 +-
.../clientset/versioned/typed/eventing/v1alpha1/channel.go | 2 +-
.../typed/eventing/v1alpha1/clusterchannelprovisioner.go | 2 +-
pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go | 2 +-
.../versioned/typed/eventing/v1alpha1/eventing_client.go | 2 +-
.../clientset/versioned/typed/eventing/v1alpha1/fake/doc.go | 2 +-
.../versioned/typed/eventing/v1alpha1/fake/fake_broker.go | 2 +-
.../versioned/typed/eventing/v1alpha1/fake/fake_channel.go | 2 +-
.../eventing/v1alpha1/fake/fake_clusterchannelprovisioner.go | 2 +-
.../typed/eventing/v1alpha1/fake/fake_eventing_client.go | 2 +-
.../versioned/typed/eventing/v1alpha1/fake/fake_subscription.go | 2 +-
.../versioned/typed/eventing/v1alpha1/fake/fake_trigger.go | 2 +-
.../versioned/typed/eventing/v1alpha1/generated_expansion.go | 2 +-
.../clientset/versioned/typed/eventing/v1alpha1/subscription.go | 2 +-
.../clientset/versioned/typed/eventing/v1alpha1/trigger.go | 2 +-
pkg/client/informers/externalversions/eventing/interface.go | 2 +-
.../informers/externalversions/eventing/v1alpha1/broker.go | 2 +-
.../informers/externalversions/eventing/v1alpha1/channel.go | 2 +-
.../eventing/v1alpha1/clusterchannelprovisioner.go | 2 +-
.../informers/externalversions/eventing/v1alpha1/interface.go | 2 +-
.../externalversions/eventing/v1alpha1/subscription.go | 2 +-
.../informers/externalversions/eventing/v1alpha1/trigger.go | 2 +-
pkg/client/informers/externalversions/factory.go | 2 +-
pkg/client/informers/externalversions/generic.go | 2 +-
.../externalversions/internalinterfaces/factory_interfaces.go | 2 +-
pkg/client/listers/eventing/v1alpha1/broker.go | 2 +-
pkg/client/listers/eventing/v1alpha1/channel.go | 2 +-
.../listers/eventing/v1alpha1/clusterchannelprovisioner.go | 2 +-
pkg/client/listers/eventing/v1alpha1/expansion_generated.go | 2 +-
pkg/client/listers/eventing/v1alpha1/subscription.go | 2 +-
pkg/client/listers/eventing/v1alpha1/trigger.go | 2 +-
41 files changed, 41 insertions(+), 41 deletions(-)
diff --git a/hack/boilerplate/boilerplate.go.txt b/hack/boilerplate/boilerplate.go.txt
index 02c504e9302..1f43b023ad2 100644
--- a/hack/boilerplate/boilerplate.go.txt
+++ b/hack/boilerplate/boilerplate.go.txt
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go
index e1885fcac5e..6c64827c713 100644
--- a/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/duck/v1alpha1/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
index 14ada61e705..9807e6caf2a 100644
--- a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go
index 4f8f6c7c4c3..e93eb9138e5 100644
--- a/pkg/client/clientset/versioned/clientset.go
+++ b/pkg/client/clientset/versioned/clientset.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/doc.go b/pkg/client/clientset/versioned/doc.go
index 3fe4685848a..1122e50bfc3 100644
--- a/pkg/client/clientset/versioned/doc.go
+++ b/pkg/client/clientset/versioned/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go
index 0a4b10bfa14..5fc64b9c1c7 100644
--- a/pkg/client/clientset/versioned/fake/clientset_generated.go
+++ b/pkg/client/clientset/versioned/fake/clientset_generated.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/fake/doc.go b/pkg/client/clientset/versioned/fake/doc.go
index 86f64bb7aae..87f3c3e0b01 100644
--- a/pkg/client/clientset/versioned/fake/doc.go
+++ b/pkg/client/clientset/versioned/fake/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go
index 60103d7155b..e29260ec25f 100644
--- a/pkg/client/clientset/versioned/fake/register.go
+++ b/pkg/client/clientset/versioned/fake/register.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/scheme/doc.go b/pkg/client/clientset/versioned/scheme/doc.go
index 60ea8ba90eb..7d76538485b 100644
--- a/pkg/client/clientset/versioned/scheme/doc.go
+++ b/pkg/client/clientset/versioned/scheme/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go
index a6263f98cb9..5df4708af46 100644
--- a/pkg/client/clientset/versioned/scheme/register.go
+++ b/pkg/client/clientset/versioned/scheme/register.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/broker.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/broker.go
index 7ae965feb0e..9f46c3da894 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/broker.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/broker.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/channel.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/channel.go
index 962c0a450a8..ca01753411a 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/channel.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/channel.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/clusterchannelprovisioner.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/clusterchannelprovisioner.go
index 910eb0aec16..a29701415fd 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/clusterchannelprovisioner.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/clusterchannelprovisioner.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go
index 75445c17900..a1c6bb9fe8f 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
index 753d1081b84..68722de4481 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/doc.go
index 128aa183a91..a00e5d7b21a 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/doc.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_broker.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_broker.go
index c7b30557ec1..7354136c45c 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_broker.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_broker.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_channel.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_channel.go
index 2f9f36739e7..78e51719d31 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_channel.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_channel.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_clusterchannelprovisioner.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_clusterchannelprovisioner.go
index 0c908f50172..bc3f4efdeea 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_clusterchannelprovisioner.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_clusterchannelprovisioner.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go
index 4362e785f5a..52759cda929 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_subscription.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_subscription.go
index e8f4d302ca3..0cdbd3372b4 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_subscription.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_subscription.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_trigger.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_trigger.go
index 5e4b588c6b1..87204eb96f9 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_trigger.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_trigger.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
index 2f88fb3320b..00ba65313fd 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/subscription.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/subscription.go
index f06468c1027..40e66cda099 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/subscription.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/subscription.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/trigger.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/trigger.go
index 72207e79d34..777411ed328 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/trigger.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/trigger.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/eventing/interface.go b/pkg/client/informers/externalversions/eventing/interface.go
index 17435a38470..4d02d2b9f00 100644
--- a/pkg/client/informers/externalversions/eventing/interface.go
+++ b/pkg/client/informers/externalversions/eventing/interface.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/broker.go b/pkg/client/informers/externalversions/eventing/v1alpha1/broker.go
index 5fec443fb8c..e1c1421817f 100644
--- a/pkg/client/informers/externalversions/eventing/v1alpha1/broker.go
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/broker.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/channel.go b/pkg/client/informers/externalversions/eventing/v1alpha1/channel.go
index 79aa31e1ec6..7483c4e4152 100644
--- a/pkg/client/informers/externalversions/eventing/v1alpha1/channel.go
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/channel.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/clusterchannelprovisioner.go b/pkg/client/informers/externalversions/eventing/v1alpha1/clusterchannelprovisioner.go
index d32ee951d49..4d6f0a1c518 100644
--- a/pkg/client/informers/externalversions/eventing/v1alpha1/clusterchannelprovisioner.go
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/clusterchannelprovisioner.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go b/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
index 29ad6f191f0..004212a1b29 100644
--- a/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/subscription.go b/pkg/client/informers/externalversions/eventing/v1alpha1/subscription.go
index 54722fbc624..224da4fd65c 100644
--- a/pkg/client/informers/externalversions/eventing/v1alpha1/subscription.go
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/subscription.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/trigger.go b/pkg/client/informers/externalversions/eventing/v1alpha1/trigger.go
index b715cde7005..91bc5844bd9 100644
--- a/pkg/client/informers/externalversions/eventing/v1alpha1/trigger.go
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/trigger.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go
index 0cde7d2cdd1..f5f0897b7c9 100644
--- a/pkg/client/informers/externalversions/factory.go
+++ b/pkg/client/informers/externalversions/factory.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go
index 6c22f720af9..8aa9b1ad555 100644
--- a/pkg/client/informers/externalversions/generic.go
+++ b/pkg/client/informers/externalversions/generic.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go
index 260a03dbcf7..e168391e2ec 100644
--- a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go
+++ b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/listers/eventing/v1alpha1/broker.go b/pkg/client/listers/eventing/v1alpha1/broker.go
index 4916e953b2f..8bba839ee6d 100644
--- a/pkg/client/listers/eventing/v1alpha1/broker.go
+++ b/pkg/client/listers/eventing/v1alpha1/broker.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/listers/eventing/v1alpha1/channel.go b/pkg/client/listers/eventing/v1alpha1/channel.go
index 1572b0d8408..6d66b1c2fdc 100644
--- a/pkg/client/listers/eventing/v1alpha1/channel.go
+++ b/pkg/client/listers/eventing/v1alpha1/channel.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/listers/eventing/v1alpha1/clusterchannelprovisioner.go b/pkg/client/listers/eventing/v1alpha1/clusterchannelprovisioner.go
index daa65a79b81..d3cb4fcd45f 100644
--- a/pkg/client/listers/eventing/v1alpha1/clusterchannelprovisioner.go
+++ b/pkg/client/listers/eventing/v1alpha1/clusterchannelprovisioner.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/listers/eventing/v1alpha1/expansion_generated.go b/pkg/client/listers/eventing/v1alpha1/expansion_generated.go
index 2a49e1b434b..4bd2b3c78eb 100644
--- a/pkg/client/listers/eventing/v1alpha1/expansion_generated.go
+++ b/pkg/client/listers/eventing/v1alpha1/expansion_generated.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/listers/eventing/v1alpha1/subscription.go b/pkg/client/listers/eventing/v1alpha1/subscription.go
index 0a834e5ebce..8fa57c95e60 100644
--- a/pkg/client/listers/eventing/v1alpha1/subscription.go
+++ b/pkg/client/listers/eventing/v1alpha1/subscription.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/pkg/client/listers/eventing/v1alpha1/trigger.go b/pkg/client/listers/eventing/v1alpha1/trigger.go
index 56c323a8238..ffa132e1fb2 100644
--- a/pkg/client/listers/eventing/v1alpha1/trigger.go
+++ b/pkg/client/listers/eventing/v1alpha1/trigger.go
@@ -1,5 +1,5 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
From 5f9198cc9d4d2e7da1198bfbab808e19da986492 Mon Sep 17 00:00:00 2001
From: Ville Aikas
Date: Tue, 23 Apr 2019 15:05:28 -0700
Subject: [PATCH 45/76] Issue 734 Move trigger to knative/pkg/controller
(#1079)
* Initial changes, compiles sans tests
* merge in pr1071 to trigger.go
* Use the new test format, just one test
* Start the informers...
* after running hack/update-codegen
* address pr comments, use tracker
* run hack/update-codegen
* yeah, enque the right type (trigger), not reconciler... doh
* yeah, enque the right type (trigger), not reconciler... doh. part2
* Add unit tests. update pkg
* remove cruft
* more cruft removed
* address pr comments
---
Gopkg.lock | 6 +-
Gopkg.toml | 4 +-
cmd/controller/main.go | 25 +-
docs/broker/README.md | 9 +-
pkg/broker/receiver.go | 2 +-
pkg/reconciler/testing/listers.go | 12 +
pkg/reconciler/testing/trigger.go | 108 +++
.../{v1alpha1 => }/trigger/path/path.go | 0
.../trigger/resources/subscription.go | 0
pkg/reconciler/trigger/trigger.go | 424 ++++++++++
pkg/reconciler/trigger/trigger_test.go | 441 +++++++++++
pkg/reconciler/v1alpha1/trigger/trigger.go | 451 -----------
.../v1alpha1/trigger/trigger_test.go | 738 ------------------
.../github.com/knative/pkg/apis/deprecated.go | 180 +++++
.../apis/duck/v1alpha1/addressable_types.go | 8 +-
.../pkg/apis/duck/v1alpha1/condition_set.go | 2 +-
.../duck/v1alpha1/retired_targetable_types.go | 10 +-
.../knative/pkg/apis/field_error.go | 26 +
.../istio/v1alpha3/virtualservice_types.go | 2 +-
.../github.com/knative/pkg/metrics/config.go | 2 +
.../knative/pkg/metrics/exporter.go | 21 +
.../knative/pkg/reconciler/testing/table.go | 10 +-
.../knative/pkg/test/spoof/spoof.go | 2 +-
.../github.com/knative/pkg/webhook/webhook.go | 2 +
24 files changed, 1270 insertions(+), 1215 deletions(-)
create mode 100644 pkg/reconciler/testing/trigger.go
rename pkg/reconciler/{v1alpha1 => }/trigger/path/path.go (100%)
rename pkg/reconciler/{v1alpha1 => }/trigger/resources/subscription.go (100%)
create mode 100644 pkg/reconciler/trigger/trigger.go
create mode 100644 pkg/reconciler/trigger/trigger_test.go
delete mode 100644 pkg/reconciler/v1alpha1/trigger/trigger.go
delete mode 100644 pkg/reconciler/v1alpha1/trigger/trigger_test.go
create mode 100644 vendor/github.com/knative/pkg/apis/deprecated.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 7d6b6d09c1b..a23c284149b 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -425,7 +425,7 @@
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
[[projects]]
- digest = "1:b657ec75371e8baf47023f7eb4f20d78e2a744f51ec824b40dd6cf74ad6fdaf4"
+ digest = "1:57d04562d05dd4500ff1e7e47f2e62b9be0531388377a3b691a012ce70b210d5"
name = "github.com/knative/pkg"
packages = [
"apis",
@@ -468,7 +468,7 @@
"webhook",
]
pruneopts = "NUT"
- revision = "418e675f88c29db0217475a74e61ebfa239f0f50"
+ revision = "6916051a96ae4d87d0e0e557a3c3b887ee4a3cea"
[[projects]]
branch = "master"
@@ -1380,6 +1380,7 @@
"github.com/knative/pkg/system/testing",
"github.com/knative/pkg/test",
"github.com/knative/pkg/test/logging",
+ "github.com/knative/pkg/tracker",
"github.com/knative/pkg/webhook",
"github.com/knative/test-infra/scripts",
"github.com/knative/test-infra/tools/dep-collector",
@@ -1424,6 +1425,7 @@
"k8s.io/client-go/dynamic",
"k8s.io/client-go/dynamic/fake",
"k8s.io/client-go/informers",
+ "k8s.io/client-go/informers/core/v1",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/kubernetes/fake",
"k8s.io/client-go/kubernetes/scheme",
diff --git a/Gopkg.toml b/Gopkg.toml
index 524bd6aff1c..155bd1cfa10 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -102,8 +102,8 @@ required = [
# This controls when we upgrade apis independently of Serving.
[[override]]
name = "github.com/knative/pkg"
- # HEAD as of 2019-04-17
- revision = "418e675f88c29db0217475a74e61ebfa239f0f50"
+ # HEAD as of 2019-04-23
+ revision = "6916051a96ae4d87d0e0e557a3c3b887ee4a3cea"
# TODO why is this overridden?
[[override]]
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 2eea7fdd832..8aa7523e323 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -19,7 +19,6 @@ package main
import (
"context"
"flag"
- "github.com/knative/eventing/pkg/reconciler/subscription"
"log"
"net/http"
"os"
@@ -37,10 +36,11 @@ import (
"github.com/knative/eventing/pkg/logconfig"
"github.com/knative/eventing/pkg/logging"
"github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/subscription"
+ "github.com/knative/eventing/pkg/reconciler/trigger"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/channel"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/namespace"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"github.com/knative/pkg/configmap"
kncontroller "github.com/knative/pkg/controller"
@@ -95,7 +95,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
logger = logger.With(zap.String("controller/impl", "pkg"))
logger.Info("Starting the controller")
- const numControllers = 1
+ const numControllers = 2
cfg.QPS = numControllers * rest.DefaultQPS
cfg.Burst = numControllers * rest.DefaultBurst
opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
@@ -103,7 +103,12 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(opt.KubeClientSet, opt.ResyncPeriod)
eventingInformerFactory := informers.NewSharedInformerFactory(opt.EventingClientSet, opt.ResyncPeriod)
+ triggerInformer := eventingInformerFactory.Eventing().V1alpha1().Triggers()
+ channelInformer := eventingInformerFactory.Eventing().V1alpha1().Channels()
subscriptionInformer := eventingInformerFactory.Eventing().V1alpha1().Subscriptions()
+ brokerInformer := eventingInformerFactory.Eventing().V1alpha1().Brokers()
+ coreServiceInformer := kubeInformerFactory.Core().V1().Services()
+
// TODO: remove unused after done integrating all controllers.
//deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
//coreServiceInformer := kubeInformerFactory.Core().V1().Services()
@@ -111,11 +116,20 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
// Build all of our controllers, with the clients constructed above.
// Add new controllers to this array.
+ // You also need to modify numControllers above to match this.
controllers := []*kncontroller.Impl{
subscription.NewController(
opt,
subscriptionInformer,
),
+ trigger.NewController(
+ opt,
+ triggerInformer,
+ channelInformer,
+ subscriptionInformer,
+ brokerInformer,
+ coreServiceInformer,
+ ),
}
if len(controllers) != numControllers {
logger.Fatalf("Number of controllers and QPS settings mismatch: %d != %d", len(controllers), numControllers)
@@ -135,6 +149,10 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
stopCh,
subscriptionInformer.Informer(),
configMapInformer.Informer(),
+ triggerInformer.Informer(),
+ channelInformer.Informer(),
+ brokerInformer.Informer(),
+ coreServiceInformer.Informer(),
); err != nil {
logger.Fatalf("Failed to start informers: %v", err)
}
@@ -189,7 +207,6 @@ func startControllerRuntime(stopCh <-chan struct{}, cfg *rest.Config, logger *za
FilterImage: getRequiredEnv("BROKER_FILTER_IMAGE"),
FilterServiceAccountName: getRequiredEnv("BROKER_FILTER_SERVICE_ACCOUNT"),
}),
- trigger.ProvideController,
namespace.ProvideController,
}
for _, provider := range providers {
diff --git a/docs/broker/README.md b/docs/broker/README.md
index bc9db2011e4..e95ca70422a 100644
--- a/docs/broker/README.md
+++ b/docs/broker/README.md
@@ -321,10 +321,15 @@ reconciles:
### Trigger
`Trigger`s are reconciled by the
-[Trigger Reconciler](../../pkg/reconciler/v1alpha1/trigger). For each `Trigger`,
+[Trigger Reconciler](../../pkg/reconciler/trigger). For each `Trigger`,
it reconciles:
-1. Determines the subscriber's URI.
+1. Verify the Broker Exists
+1. Get the Broker's:
+ - Trigger Channel
+ - Ingress Channel
+ - Filter Service
+1. Determine the Subscriber's URI
- Currently uses the same logic as the `Subscription` Reconciler, so supports
Addressables and Kubernetes `Service`s.
1. Creates a `Subscription` from the `Broker`'s 'trigger' `Channel` to the
diff --git a/pkg/broker/receiver.go b/pkg/broker/receiver.go
index d0fb081ae98..675af6a3ca9 100644
--- a/pkg/broker/receiver.go
+++ b/pkg/broker/receiver.go
@@ -25,7 +25,7 @@ import (
"github.com/cloudevents/sdk-go"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path"
+ "github.com/knative/eventing/pkg/reconciler/trigger/path"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
diff --git a/pkg/reconciler/testing/listers.go b/pkg/reconciler/testing/listers.go
index 814135227c2..310ccb568c3 100644
--- a/pkg/reconciler/testing/listers.go
+++ b/pkg/reconciler/testing/listers.go
@@ -98,6 +98,18 @@ func (l *Listers) GetSubscriptionLister() eventinglisters.SubscriptionLister {
return eventinglisters.NewSubscriptionLister(l.indexerFor(&eventingv1alpha1.Subscription{}))
}
+func (l *Listers) GetTriggerLister() eventinglisters.TriggerLister {
+ return eventinglisters.NewTriggerLister(l.indexerFor(&eventingv1alpha1.Trigger{}))
+}
+
+func (l *Listers) GetBrokerLister() eventinglisters.BrokerLister {
+ return eventinglisters.NewBrokerLister(l.indexerFor(&eventingv1alpha1.Broker{}))
+}
+
+func (l *Listers) GetChannelLister() eventinglisters.ChannelLister {
+ return eventinglisters.NewChannelLister(l.indexerFor(&eventingv1alpha1.Channel{}))
+}
+
func (l *Listers) GetVirtualServiceLister() istiolisters.VirtualServiceLister {
return istiolisters.NewVirtualServiceLister(l.indexerFor(&istiov1alpha3.VirtualService{}))
}
diff --git a/pkg/reconciler/testing/trigger.go b/pkg/reconciler/testing/trigger.go
new file mode 100644
index 00000000000..e752db1e3cc
--- /dev/null
+++ b/pkg/reconciler/testing/trigger.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+ "time"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// TriggerOption enables further configuration of a Trigger.
+type TriggerOption func(*v1alpha1.Trigger)
+
+// NewTrigger creates a Trigger with TriggerOptions.
+func NewTrigger(name, namespace, broker string, to ...TriggerOption) *v1alpha1.Trigger {
+ t := &v1alpha1.Trigger{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: v1alpha1.TriggerSpec{
+ Broker: broker,
+ },
+ }
+ for _, opt := range to {
+ opt(t)
+ }
+ t.SetDefaults(context.Background())
+ return t
+}
+
+func WithTriggerSubscriberURI(uri string) TriggerOption {
+ return func(t *v1alpha1.Trigger) {
+ t.Spec.Subscriber = &v1alpha1.SubscriberSpec{URI: &uri}
+ }
+}
+
+func WithTriggerSubscriberRef(gvk metav1.GroupVersionKind, name string) TriggerOption {
+ return func(t *v1alpha1.Trigger) {
+ t.Spec.Subscriber = &v1alpha1.SubscriberSpec{
+ Ref: &corev1.ObjectReference{
+ APIVersion: apiVersion(gvk),
+ Kind: gvk.Kind,
+ Name: name,
+ },
+ }
+ }
+}
+
+// WithInitTriggerConditions initializes the Triggers's conditions.
+func WithInitTriggerConditions(t *v1alpha1.Trigger) {
+ t.Status.InitializeConditions()
+}
+
+// WithTriggerBrokerReady initializes the Triggers's conditions.
+func WithTriggerBrokerReady() TriggerOption {
+ return func(t *v1alpha1.Trigger) {
+ t.Status.PropagateBrokerStatus(v1alpha1.TestHelper.ReadyBrokerStatus())
+ }
+}
+
+// WithTriggerBrokerFailed marks the Broker as failed
+func WithTriggerBrokerFailed(reason, message string) TriggerOption {
+ return func(t *v1alpha1.Trigger) {
+ t.Status.MarkBrokerFailed(reason, message)
+ }
+}
+
+func WithTriggerNotSubscribed(reason, message string) TriggerOption {
+ return func(t *v1alpha1.Trigger) {
+ t.Status.MarkNotSubscribed(reason, message)
+ }
+}
+
+func WithTriggerSubscribed() TriggerOption {
+ return func(t *v1alpha1.Trigger) {
+ t.Status.PropagateSubscriptionStatus(v1alpha1.TestHelper.ReadySubscriptionStatus())
+ }
+}
+
+func WithTriggerStatusSubscriberURI(uri string) TriggerOption {
+ return func(t *v1alpha1.Trigger) {
+ t.Status.SubscriberURI = uri
+ }
+}
+
+// TODO: this can be a runtime object
+func WithTriggerDeleted(t *v1alpha1.Trigger) {
+ deleteTime := metav1.NewTime(time.Unix(1e9, 0))
+ t.ObjectMeta.SetDeletionTimestamp(&deleteTime)
+}
diff --git a/pkg/reconciler/v1alpha1/trigger/path/path.go b/pkg/reconciler/trigger/path/path.go
similarity index 100%
rename from pkg/reconciler/v1alpha1/trigger/path/path.go
rename to pkg/reconciler/trigger/path/path.go
diff --git a/pkg/reconciler/v1alpha1/trigger/resources/subscription.go b/pkg/reconciler/trigger/resources/subscription.go
similarity index 100%
rename from pkg/reconciler/v1alpha1/trigger/resources/subscription.go
rename to pkg/reconciler/trigger/resources/subscription.go
diff --git a/pkg/reconciler/trigger/trigger.go b/pkg/reconciler/trigger/trigger.go
new file mode 100644
index 00000000000..d681f47274f
--- /dev/null
+++ b/pkg/reconciler/trigger/trigger.go
@@ -0,0 +1,424 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trigger
+
+import (
+ "context"
+ "errors"
+ "net/url"
+ "reflect"
+ "time"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ listers "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/names"
+ "github.com/knative/eventing/pkg/reconciler/trigger/path"
+ "github.com/knative/eventing/pkg/reconciler/trigger/resources"
+ "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
+ brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources"
+ "github.com/knative/eventing/pkg/utils/resolve"
+ "github.com/knative/pkg/controller"
+ "github.com/knative/pkg/tracker"
+ "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ corev1informers "k8s.io/client-go/informers/core/v1"
+ corev1listers "k8s.io/client-go/listers/core/v1"
+ "k8s.io/client-go/tools/cache"
+)
+
+const (
+ // ReconcilerName is the name of the reconciler
+ ReconcilerName = "Triggers"
+
+ // controllerAgentName is the string used by this controller to identify
+ // itself when creating events.
+ controllerAgentName = "trigger-controller"
+
+ finalizerName = controllerAgentName
+
+ // Name of the corev1.Events emitted from the reconciliation process.
+ triggerReconciled = "TriggerReconciled"
+ triggerReconcileFailed = "TriggerReconcileFailed"
+ triggerUpdateStatusFailed = "TriggerUpdateStatusFailed"
+ subscriptionDeleteFailed = "SubscriptionDeleteFailed"
+ subscriptionCreateFailed = "SubscriptionCreateFailed"
+ triggerChannelFailed = "TriggerChannelFailed"
+ ingressChannelFailed = "IngressChannelFailed"
+ triggerServiceFailed = "TriggerServiceFailed"
+)
+
+type Reconciler struct {
+ *reconciler.Base
+
+ triggerLister listers.TriggerLister
+ channelLister listers.ChannelLister
+ subscriptionLister listers.SubscriptionLister
+ brokerLister listers.BrokerLister
+ serviceLister corev1listers.ServiceLister
+ tracker tracker.Interface
+}
+
+var brokerGVK = v1alpha1.SchemeGroupVersion.WithKind("Broker")
+
+// Check that our Reconciler implements controller.Reconciler.
+var _ controller.Reconciler = (*Reconciler)(nil)
+
+// NewController initializes the controller and is called by the generated code.
+// Registers event handlers to enqueue events.
+func NewController(
+ opt reconciler.Options,
+ triggerInformer eventinginformers.TriggerInformer,
+ channelInformer eventinginformers.ChannelInformer,
+ subscriptionInformer eventinginformers.SubscriptionInformer,
+ brokerInformer eventinginformers.BrokerInformer,
+ serviceInformer corev1informers.ServiceInformer,
+) *controller.Impl {
+
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ triggerLister: triggerInformer.Lister(),
+ channelLister: channelInformer.Lister(),
+ subscriptionLister: subscriptionInformer.Lister(),
+ brokerLister: brokerInformer.Lister(),
+ serviceLister: serviceInformer.Lister(),
+ }
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ r.Logger.Info("Setting up event handlers")
+ triggerInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
+
+ // Tracker is used to notify us that a Trigger's Broker has changed so that
+ // we can reconcile.
+ r.tracker = tracker.New(impl.EnqueueKey, opt.GetTrackerLease())
+ brokerInformer.Informer().AddEventHandler(reconciler.Handler(
+ // Call the tracker's OnChanged method, but we've seen the objects
+ // coming through this path missing TypeMeta, so ensure it is properly
+ // populated.
+ controller.EnsureTypeMeta(
+ r.tracker.OnChanged,
+ v1alpha1.SchemeGroupVersion.WithKind("Broker"),
+ ),
+ ))
+
+ subscriptionInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
+ FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Trigger")),
+ Handler: reconciler.Handler(impl.EnqueueControllerOf),
+ })
+ return impl
+}
+
+// Reconcile compares the actual state with the desired, and attempts to
+// converge the two. It then updates the Status block of the Trigger resource
+// with the current status of the resource.
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ ctx = logging.WithLogger(ctx, r.Logger.Desugar().With(zap.String("key", key)))
+
+ // Convert the namespace/name string into a distinct namespace and name.
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
+ }
+
+ // Get the Trigger resource with this namespace/name.
+ original, err := r.triggerLister.Triggers(namespace).Get(name)
+ if apierrs.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ logging.FromContext(ctx).Error("trigger key in work queue no longer exists")
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // Don't modify the informers copy.
+ trigger := original.DeepCopy()
+
+ // Reconcile this copy of the Trigger and then write back any status updates regardless of
+ // whether the reconcile error out.
+ err = r.reconcile(ctx, trigger)
+ if err != nil {
+ logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(err))
+ r.Recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", err)
+ } else {
+ logging.FromContext(ctx).Debug("Trigger reconciled")
+ r.Recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled")
+ }
+
+ if _, updateStatusErr := r.updateStatus(ctx, trigger); updateStatusErr != nil {
+ logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(updateStatusErr))
+ r.Recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err)
+ return updateStatusErr
+ }
+
+ // Requeue if the resource is not ready
+ return err
+}
+
+func (r *Reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
+ t.Status.InitializeConditions()
+
+ // 1. Verify the Broker exists.
+ // 2. Get the Broker's:
+ // - Trigger Channel
+ // - Ingress Channel
+ // - Filter Service
+ // 3. Find the Subscriber's URI.
+ // 4. Creates a Subscription from the Broker's Trigger Channel to this Trigger via the Broker's
+ // Filter Service with a specific path, and reply set to the Broker's Ingress Channel.
+
+ if t.DeletionTimestamp != nil {
+ // Everything is cleaned up by the garbage collector.
+ return nil
+ }
+
+ b, err := r.brokerLister.Brokers(t.Namespace).Get(t.Spec.Broker)
+ if err != nil {
+ logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err))
+ if apierrs.IsNotFound(err) {
+ t.Status.MarkBrokerFailed("DoesNotExist", "Broker does not exist")
+ } else {
+ t.Status.MarkBrokerFailed("BrokerGetFailed", "Failed to get broker")
+ }
+ return err
+ }
+ t.Status.PropagateBrokerStatus(&b.Status)
+
+ // Tell tracker to reconcile this Trigger whenever the Broker changes.
+ if err = r.tracker.Track(objectRef(b, brokerGVK), t); err != nil {
+ logging.FromContext(ctx).Error("Unable to track changes to Broker", zap.Error(err))
+ return err
+ }
+
+ brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b)
+ if err != nil {
+ if apierrs.IsNotFound(err) {
+ logging.FromContext(ctx).Error("can not find Broker's Trigger Channel", zap.Error(err))
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, triggerChannelFailed, "Broker's Trigger channel not found")
+ return errors.New("failed to find Broker's Trigger channel")
+ } else {
+ logging.FromContext(ctx).Error("failed to get Broker's Trigger Channel", zap.Error(err))
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, triggerChannelFailed, "Failed to get Broker's Trigger channel")
+ return err
+ }
+ }
+ brokerIngress, err := r.getBrokerIngressChannel(ctx, b)
+ if err != nil {
+ if apierrs.IsNotFound(err) {
+ logging.FromContext(ctx).Error("can not find Broker's Ingress Channel", zap.Error(err))
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, ingressChannelFailed, "Broker's Ingress channel not found")
+ return errors.New("failed to find Broker's Ingress channel")
+ } else {
+ logging.FromContext(ctx).Error("failed to get Broker's Ingress Channel", zap.Error(err))
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, ingressChannelFailed, "Failed to get Broker's Ingress channel")
+ return err
+ }
+ }
+
+ // Get Broker filter service.
+ filterSvc, err := r.getBrokerFilterService(ctx, b)
+ if err != nil {
+ if apierrs.IsNotFound(err) {
+ logging.FromContext(ctx).Error("can not find Broker's Filter service", zap.Error(err))
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, triggerServiceFailed, "Broker's Filter service not found")
+ return errors.New("failed to find Broker's Filter service")
+ } else {
+ logging.FromContext(ctx).Error("failed to get Broker's Filter service", zap.Error(err))
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, triggerServiceFailed, "Failed to get Broker's Filter service")
+ return err
+ }
+ }
+
+ subscriberURI, err := resolve.SubscriberSpec(ctx, r.DynamicClientSet, t.Namespace, t.Spec.Subscriber)
+ if err != nil {
+ logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
+ return err
+ }
+ t.Status.SubscriberURI = subscriberURI
+
+ sub, err := r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc)
+ if err != nil {
+ logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err))
+ t.Status.MarkNotSubscribed("NotSubscribed", "%v", err)
+ return err
+ }
+ t.Status.PropagateSubscriptionStatus(&sub.Status)
+
+ return nil
+}
+
+func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.Trigger) (*v1alpha1.Trigger, error) {
+ trigger, err := r.triggerLister.Triggers(desired.Namespace).Get(desired.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ if reflect.DeepEqual(trigger.Status, desired.Status) {
+ return trigger, nil
+ }
+
+ becomesReady := desired.Status.IsReady() && !trigger.Status.IsReady()
+
+ // Don't modify the informers copy.
+ existing := trigger.DeepCopy()
+ existing.Status = desired.Status
+
+ new, err := r.EventingClientSet.EventingV1alpha1().Triggers(desired.Namespace).UpdateStatus(existing)
+ if err == nil && becomesReady {
+ duration := time.Since(new.ObjectMeta.CreationTimestamp.Time)
+ r.Logger.Infof("Subscription %q became ready after %v", trigger.Name, duration)
+ //r.StatsReporter.ReportServiceReady(trigger.Namespace, trigger.Name, duration) // TODO: stats
+ }
+
+ return new, err
+}
+
+// getBrokerTriggerChannel return the Broker's Trigger Channel if it exists, otherwise it returns an
+// error.
+func (r *Reconciler) getBrokerTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
+ return r.getChannel(ctx, b, labels.SelectorFromSet(broker.TriggerChannelLabels(b)))
+}
+
+// getBrokerIngressChannel return the Broker's Ingress Channel if it exists, otherwise it returns an
+// error.
+func (r *Reconciler) getBrokerIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
+ return r.getChannel(ctx, b, labels.SelectorFromSet(broker.IngressChannelLabels(b)))
+}
+
+// getChannel returns the Broker's channel based on the provided label selector if it exists, otherwise it returns an error.
+func (r *Reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labels.Selector) (*v1alpha1.Channel, error) {
+ channels, err := r.channelLister.Channels(b.Namespace).List(ls)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: If there's more than one, should that be treated as an error. This seems a bit wonky
+ // but that's how it was before.
+ for _, c := range channels {
+ if metav1.IsControlledBy(c, b) {
+ return c, nil
+ }
+ }
+ return nil, apierrs.NewNotFound(schema.GroupResource{}, "")
+}
+
+// getBrokerFilterService returns the K8s service for trigger 't' if exists,
+// otherwise it returns an error.
+func (r *Reconciler) getBrokerFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) {
+ services, err := r.serviceLister.Services(b.Namespace).List(labels.SelectorFromSet(brokerresources.FilterLabels(b)))
+ if err != nil {
+ return nil, err
+ }
+ for _, svc := range services {
+ if metav1.IsControlledBy(svc, b) {
+ return svc, nil
+ }
+ }
+
+ return nil, apierrs.NewNotFound(schema.GroupResource{}, "")
+}
+
+// subscribeToBrokerChannel subscribes service 'svc' to the Broker's channels.
+func (r *Reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.Trigger, brokerTrigger, brokerIngress *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) {
+ uri := &url.URL{
+ Scheme: "http",
+ Host: names.ServiceHostName(svc.Name, svc.Namespace),
+ Path: path.Generate(t),
+ }
+ expected := resources.NewSubscription(t, brokerTrigger, brokerIngress, uri)
+
+ sub, err := r.getSubscription(ctx, t)
+
+ // If the resource doesn't exist, we'll create it.
+ if apierrs.IsNotFound(err) {
+ sub = expected
+ newSub, err := r.EventingClientSet.EventingV1alpha1().Subscriptions(sub.Namespace).Create(sub)
+ if err != nil {
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err)
+ return nil, err
+ }
+ return newSub, nil
+ } else if err != nil {
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err)
+ return nil, err
+ }
+
+ // Update Subscription if it has changed. Ignore the generation.
+ expected.Spec.DeprecatedGeneration = sub.Spec.DeprecatedGeneration
+ if !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) {
+ // Given that spec.channel is immutable, we cannot just update the Subscription. We delete
+ // it and re-create it instead.
+ err = r.EventingClientSet.EventingV1alpha1().Subscriptions(sub.Namespace).Delete(sub.Name, &metav1.DeleteOptions{})
+ if err != nil {
+ logging.FromContext(ctx).Info("Cannot delete subscription", zap.Error(err))
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, subscriptionDeleteFailed, "Delete Trigger's subscription failed: %v", err)
+ return nil, err
+ }
+ sub = expected
+ newSub, err := r.EventingClientSet.EventingV1alpha1().Subscriptions(sub.Namespace).Create(sub)
+ if err != nil {
+ logging.FromContext(ctx).Info("Cannot create subscription", zap.Error(err))
+ r.Recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err)
+ return nil, err
+ }
+ return newSub, nil
+ }
+ return sub, nil
+}
+
+// getSubscription returns the subscription of trigger 't' if exists,
+// otherwise it returns an error.
+func (r *Reconciler) getSubscription(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Subscription, error) {
+ subs, err := r.subscriptionLister.Subscriptions(t.Namespace).List(labels.SelectorFromSet(resources.SubscriptionLabels(t)))
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range subs {
+ if metav1.IsControlledBy(s, t) {
+ return s, nil
+ }
+ }
+
+ return nil, apierrs.NewNotFound(schema.GroupResource{}, "")
+}
+
+type accessor interface {
+ GroupVersionKind() schema.GroupVersionKind
+ GetNamespace() string
+ GetName() string
+}
+
+func objectRef(a accessor, gvk schema.GroupVersionKind) corev1.ObjectReference {
+ // We can't always rely on the TypeMeta being populated.
+ // See: https://github.com/knative/serving/issues/2372
+ // Also: https://github.com/kubernetes/apiextensions-apiserver/issues/29
+ // gvk := a.GroupVersionKind()
+ apiVersion, kind := gvk.ToAPIVersionAndKind()
+ return corev1.ObjectReference{
+ APIVersion: apiVersion,
+ Kind: kind,
+ Namespace: a.GetNamespace(),
+ Name: a.GetName(),
+ }
+}
diff --git a/pkg/reconciler/trigger/trigger_test.go b/pkg/reconciler/trigger/trigger_test.go
new file mode 100644
index 00000000000..1b5d4210364
--- /dev/null
+++ b/pkg/reconciler/trigger/trigger_test.go
@@ -0,0 +1,441 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package trigger
+
+import (
+ // "context"
+ // "errors"
+ "fmt"
+ "net/url"
+ "testing"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/reconciler"
+ reconciletesting "github.com/knative/eventing/pkg/reconciler/testing"
+ "github.com/knative/eventing/pkg/reconciler/trigger/resources"
+ brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources"
+ "github.com/knative/eventing/pkg/utils"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+ . "github.com/knative/pkg/reconciler/testing"
+ "github.com/knative/pkg/tracker"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/kubernetes/scheme"
+ clientgotesting "k8s.io/client-go/testing"
+)
+
+const (
+ testNS = "test-namespace"
+ triggerName = "test-trigger"
+ brokerName = "test-broker"
+
+ subscriberAPIVersion = "v1"
+ subscriberKind = "Service"
+ subscriberName = "subscriberName"
+ subscriberURI = "http://example.com/subscriber"
+)
+
+var (
+ trueVal = true
+ // deletionTime is used when objects are marked as deleted. Rfc3339Copy()
+ // truncates to seconds to match the loss of precision during serialization.
+ deletionTime = metav1.Now().Rfc3339Copy()
+
+ // Map of events to set test cases' expectations easier.
+ events = map[string]corev1.Event{
+ triggerReconciled: {Reason: triggerReconciled, Type: corev1.EventTypeNormal},
+ triggerUpdateStatusFailed: {Reason: triggerUpdateStatusFailed, Type: corev1.EventTypeWarning},
+ triggerReconcileFailed: {Reason: triggerReconcileFailed, Type: corev1.EventTypeWarning},
+ subscriptionDeleteFailed: {Reason: subscriptionDeleteFailed, Type: corev1.EventTypeWarning},
+ subscriptionCreateFailed: {Reason: subscriptionCreateFailed, Type: corev1.EventTypeWarning},
+ }
+)
+
+func init() {
+ // Add types to scheme
+ _ = v1alpha1.AddToScheme(scheme.Scheme)
+ _ = duckv1alpha1.AddToScheme(scheme.Scheme)
+}
+
+func TestAllCases(t *testing.T) {
+ triggerKey := testNS + "/" + triggerName
+ table := TableTest{
+ {
+ Name: "bad workqueue key",
+ // Make sure Reconcile handles bad keys.
+ Key: "too/many/parts",
+ }, {
+ Name: "key not found",
+ // Make sure Reconcile handles good keys that don't exist.
+ Key: "foo/not-found",
+ }, { // TODO: there is a bug in the controller, it will query for ""
+ // Name: "trigger key not found ",
+ // Objects: []runtime.Object{
+ // reconciletesting.NewTrigger(triggerName, testNS),
+ // },
+ // Key: "foo/incomplete",
+ // WantErr: true,
+ // WantEvents: []string{
+ // Eventf(corev1.EventTypeWarning, "ChannelReferenceFetchFailed", "Failed to validate spec.channel exists: s \"\" not found"),
+ // },
+ }, {
+ Name: "Broker not found",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI)),
+ },
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "TriggerReconcileFailed", "Trigger reconciliation failed: broker.eventing.knative.dev \"test-broker\" not found"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerFailed("DoesNotExist", "Broker does not exist"),
+ ),
+ }},
+ }, {
+ Name: "Trigger being deleted",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerDeleted),
+ },
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "TriggerReconciled", "Trigger reconciled"),
+ },
+ }, {
+ Name: "No Broker Trigger Channel",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "TriggerChannelFailed", "Broker's Trigger channel not found"),
+ Eventf(corev1.EventTypeWarning, "TriggerReconcileFailed", "Trigger reconciliation failed: failed to find Broker's Trigger channel"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ ),
+ }},
+ }, {
+ Name: "No Broker Ingress Channel",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ makeTriggerChannel(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "IngressChannelFailed", "Broker's Ingress channel not found"),
+ Eventf(corev1.EventTypeWarning, "TriggerReconcileFailed", "Trigger reconciliation failed: failed to find Broker's Ingress channel"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ ),
+ }},
+ }, {
+ Name: "No Broker Filter Service",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ makeTriggerChannel(),
+ makeIngressChannel(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "TriggerServiceFailed", "Broker's Filter service not found"),
+ Eventf(corev1.EventTypeWarning, "TriggerReconcileFailed", "Trigger reconciliation failed: failed to find Broker's Filter service"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ ),
+ }},
+ }, {
+ Name: "Subscription Created, not ready",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ makeTriggerChannel(),
+ makeIngressChannel(),
+ makeBrokerFilterService(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(makeServiceURI().String()),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "TriggerReconciled", "Trigger reconciled"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(makeServiceURI().String()),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ reconciletesting.WithTriggerNotSubscribed("SubscriptionNotReady", "Subscription is not ready: nil"),
+ reconciletesting.WithTriggerStatusSubscriberURI(makeServiceURI().String()),
+ ),
+ }},
+ WantCreates: []metav1.Object{
+ makeIngressSubscription(),
+ },
+ }, {
+ Name: "Subscription Created, not ready",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ makeTriggerChannel(),
+ makeIngressChannel(),
+ makeBrokerFilterService(),
+ makeReadySubscription(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(makeServiceURI().String()),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "TriggerReconciled", "Trigger reconciled"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(makeServiceURI().String()),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ reconciletesting.WithTriggerSubscribed(),
+ reconciletesting.WithTriggerStatusSubscriberURI(makeServiceURI().String()),
+ ),
+ }},
+ },
+ }
+
+ defer logtesting.ClearAll()
+
+ table.Test(t, reconciletesting.MakeFactory(func(listers *reconciletesting.Listers, opt reconciler.Options) controller.Reconciler {
+ return &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ triggerLister: listers.GetTriggerLister(),
+ channelLister: listers.GetChannelLister(),
+ subscriptionLister: listers.GetSubscriptionLister(),
+ brokerLister: listers.GetBrokerLister(),
+ serviceLister: listers.GetK8sServiceLister(),
+ tracker: tracker.New(func(string) {}, 0),
+ }
+
+ }))
+}
+
+func makeTrigger() *v1alpha1.Trigger {
+ return &v1alpha1.Trigger{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "eventing.knative.dev/v1alpha1",
+ Kind: "Trigger",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNS,
+ Name: triggerName,
+ },
+ Spec: v1alpha1.TriggerSpec{
+ Broker: brokerName,
+ Filter: &v1alpha1.TriggerFilter{
+ SourceAndType: &v1alpha1.TriggerFilterSourceAndType{
+ Source: "Any",
+ Type: "Any",
+ },
+ },
+ Subscriber: &v1alpha1.SubscriberSpec{
+ Ref: &corev1.ObjectReference{
+ Name: subscriberName,
+ Kind: subscriberKind,
+ APIVersion: subscriberAPIVersion,
+ },
+ },
+ },
+ }
+}
+
+func makeReadyTrigger() *v1alpha1.Trigger {
+ t := makeTrigger()
+ t.Status = *v1alpha1.TestHelper.ReadyTriggerStatus()
+ t.Status.SubscriberURI = fmt.Sprintf("http://%s.%s.svc.%s/", subscriberName, testNS, utils.GetClusterDomainName())
+ return t
+}
+
+func makeDeletingTrigger() *v1alpha1.Trigger {
+ b := makeReadyTrigger()
+ b.DeletionTimestamp = &deletionTime
+ return b
+}
+
+func makeTriggerWithNamespaceAndName(namespace, name string) *v1alpha1.Trigger {
+ t := makeTrigger()
+ t.Namespace = namespace
+ t.Name = name
+ return t
+}
+
+func makeBroker() *v1alpha1.Broker {
+ return &v1alpha1.Broker{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "eventing.knative.dev/v1alpha1",
+ Kind: "Broker",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNS,
+ Name: brokerName,
+ },
+ Spec: v1alpha1.BrokerSpec{
+ ChannelTemplate: &v1alpha1.ChannelSpec{
+ Provisioner: makeChannelProvisioner(),
+ },
+ },
+ }
+}
+
+func makeReadyBroker() *v1alpha1.Broker {
+ b := makeBroker()
+ b.Status = *v1alpha1.TestHelper.ReadyBrokerStatus()
+ return b
+}
+
+func makeChannelProvisioner() *corev1.ObjectReference {
+ return &corev1.ObjectReference{
+ APIVersion: "eventing.knative.dev/v1alpha1",
+ Kind: "ClusterChannelProvisioner",
+ Name: "my-provisioner",
+ }
+}
+
+func newChannel(name string, labels map[string]string) *v1alpha1.Channel {
+ return &v1alpha1.Channel{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: testNS,
+ Name: name,
+ Labels: labels,
+ OwnerReferences: []metav1.OwnerReference{
+ getOwnerReference(),
+ },
+ },
+ Spec: v1alpha1.ChannelSpec{
+ Provisioner: makeChannelProvisioner(),
+ },
+ Status: v1alpha1.ChannelStatus{
+ Address: duckv1alpha1.Addressable{
+ Hostname: "any-non-empty-string",
+ },
+ },
+ }
+}
+
+func makeTriggerChannel() *v1alpha1.Channel {
+ labels := map[string]string{
+ "eventing.knative.dev/broker": brokerName,
+ "eventing.knative.dev/brokerEverything": "true",
+ }
+ return newChannel(fmt.Sprintf("%s-broker", brokerName), labels)
+}
+
+func makeIngressChannel() *v1alpha1.Channel {
+ labels := map[string]string{
+ "eventing.knative.dev/broker": brokerName,
+ "eventing.knative.dev/brokerIngress": "true",
+ }
+ return newChannel(fmt.Sprintf("%s-broker-ingress", brokerName), labels)
+}
+
+func makeSubscriberServiceAsUnstructured() *unstructured.Unstructured {
+ return &unstructured.Unstructured{
+ Object: map[string]interface{}{
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": map[string]interface{}{
+ "namespace": testNS,
+ "name": subscriberName,
+ },
+ },
+ }
+}
+
+func makeBrokerFilterService() *corev1.Service {
+ return brokerresources.MakeFilterService(makeBroker())
+}
+
+func makeServiceURI() *url.URL {
+ return &url.URL{
+ Scheme: "http",
+ Host: fmt.Sprintf("%s.%s.svc.%s", makeBrokerFilterService().Name, testNS, utils.GetClusterDomainName()),
+ Path: fmt.Sprintf("/triggers/%s/%s", testNS, triggerName),
+ }
+}
+
+func makeIngressSubscription() *v1alpha1.Subscription {
+ return resources.NewSubscription(makeTrigger(), makeTriggerChannel(), makeIngressChannel(), makeServiceURI())
+}
+
+func makeReadySubscription() *v1alpha1.Subscription {
+ s := makeIngressSubscription()
+ s.Status = *v1alpha1.TestHelper.ReadySubscriptionStatus()
+ return s
+}
+
+func getOwnerReference() metav1.OwnerReference {
+ return metav1.OwnerReference{
+ APIVersion: v1alpha1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Name: brokerName,
+ Controller: &trueVal,
+ BlockOwnerDeletion: &trueVal,
+ }
+}
diff --git a/pkg/reconciler/v1alpha1/trigger/trigger.go b/pkg/reconciler/v1alpha1/trigger/trigger.go
deleted file mode 100644
index 24cc38bb9b3..00000000000
--- a/pkg/reconciler/v1alpha1/trigger/trigger.go
+++ /dev/null
@@ -1,451 +0,0 @@
-/*
-Copyright 2018 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package trigger
-
-import (
- "context"
- "net/url"
-
- "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "github.com/knative/eventing/pkg/logging"
- "github.com/knative/eventing/pkg/reconciler/names"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
- brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources"
- "github.com/knative/eventing/pkg/utils/resolve"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
- "go.uber.org/zap"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/equality"
- "k8s.io/apimachinery/pkg/api/errors"
- k8serrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/dynamic"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/record"
- "sigs.k8s.io/controller-runtime/pkg/client"
- runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
- "sigs.k8s.io/controller-runtime/pkg/source"
-)
-
-const (
- // controllerAgentName is the string used by this controller to identify
- // itself when creating events.
- controllerAgentName = "trigger-controller"
-
- // Name of the corev1.Events emitted from the reconciliation process.
- triggerReconciled = "TriggerReconciled"
- triggerReconcileFailed = "TriggerReconcileFailed"
- triggerUpdateStatusFailed = "TriggerUpdateStatusFailed"
- subscriptionDeleteFailed = "SubscriptionDeleteFailed"
- subscriptionCreateFailed = "SubscriptionCreateFailed"
-)
-
-type reconciler struct {
- client client.Client
- dynamicClient dynamic.Interface
- recorder record.EventRecorder
-
- logger *zap.Logger
-}
-
-// Verify the struct implements reconcile.Reconciler.
-var _ reconcile.Reconciler = &reconciler{}
-
-// ProvideController returns a function that returns a Trigger controller.
-func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) {
- // Setup a new controller to Reconcile Triggers.
- r := &reconciler{
- recorder: mgr.GetRecorder(controllerAgentName),
- logger: logger,
- }
- c, err := controller.New(controllerAgentName, mgr, controller.Options{
- Reconciler: r,
- })
- if err != nil {
- return nil, err
- }
-
- // Watch Triggers.
- if err = c.Watch(&source.Kind{Type: &v1alpha1.Trigger{}}, &handler.EnqueueRequestForObject{}); err != nil {
- return nil, err
- }
-
- // Watch all the resources that the Trigger reconciles.
- for _, t := range []runtime.Object{&corev1.Service{}, &istiov1alpha3.VirtualService{}, &v1alpha1.Subscription{}} {
- err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true})
- if err != nil {
- return nil, err
- }
- }
-
- // Watch for Broker changes. E.g. if the Broker is deleted and recreated, we need to reconcile
- // the Trigger again.
- if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: &mapBrokerToTriggers{r: r}}); err != nil {
- return nil, err
- }
-
- // TODO reconcile after a change to the subscriber. I'm not sure how this is possible, but we should do it if we
- // can find a way.
-
- return c, nil
-}
-
-// mapBrokerToTriggers maps Broker changes to all the Triggers that correspond to that Broker.
-type mapBrokerToTriggers struct {
- r *reconciler
-}
-
-// Map implements handler.Mapper.Map.
-func (b *mapBrokerToTriggers) Map(o handler.MapObject) []reconcile.Request {
- ctx := context.Background()
- triggers := make([]reconcile.Request, 0)
-
- opts := &client.ListOptions{
- Namespace: o.Meta.GetNamespace(),
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
- for {
- tl := &v1alpha1.TriggerList{}
- if err := b.r.client.List(ctx, opts, tl); err != nil {
- b.r.logger.Error("Error listing Triggers when Broker changed. Some Triggers may not be reconciled.", zap.Error(err), zap.Any("broker", o))
- return triggers
- }
-
- for _, t := range tl.Items {
- if t.Spec.Broker == o.Meta.GetName() {
- triggers = append(triggers, reconcile.Request{
- NamespacedName: types.NamespacedName{
- Namespace: t.Namespace,
- Name: t.Name,
- },
- })
- }
- }
- if tl.Continue != "" {
- opts.Raw.Continue = tl.Continue
- } else {
- return triggers
- }
- }
-}
-
-// InjectClient implements controller runtime's inject.Client.
-func (r *reconciler) InjectClient(c client.Client) error {
- r.client = c
- return nil
-}
-
-// InjectConfig implements controller runtime's inject.Config.
-func (r *reconciler) InjectConfig(c *rest.Config) error {
- var err error
- r.dynamicClient, err = dynamic.NewForConfig(c)
- return err
-}
-
-// Reconcile compares the actual state with the desired, and attempts to
-// converge the two. It then updates the Status block of the Trigger resource
-// with the current status of the resource.
-func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
- ctx := context.TODO()
- ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request)))
-
- trigger := &v1alpha1.Trigger{}
- err := r.client.Get(ctx, request.NamespacedName, trigger)
-
- if errors.IsNotFound(err) {
- logging.FromContext(ctx).Info("Could not find Trigger")
- return reconcile.Result{}, nil
- }
-
- if err != nil {
- logging.FromContext(ctx).Error("Could not get Trigger", zap.Error(err))
- return reconcile.Result{}, err
- }
-
- // Reconcile this copy of the Trigger and then write back any status updates regardless of
- // whether the reconcile error out.
- reconcileErr := r.reconcile(ctx, trigger)
- if reconcileErr != nil {
- logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(reconcileErr))
- r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", reconcileErr)
- } else {
- logging.FromContext(ctx).Debug("Trigger reconciled")
- r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled")
- }
-
- if _, err = r.updateStatus(trigger); err != nil {
- logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(err))
- r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err)
- return reconcile.Result{}, err
- }
-
- // Requeue if the resource is not ready
- return reconcile.Result{}, reconcileErr
-}
-
-func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
- t.Status.InitializeConditions()
-
- // 1. Verify the Broker exists.
- // 2. Get the Broker's:
- // - Filter Channel
- // - Ingress Channel
- // - Filter Service
- // 3. Find the Subscriber's URI.
- // 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's
- // Filter Service with a specific path, and reply set to the Broker's Ingress Channel.
-
- if t.DeletionTimestamp != nil {
- // Everything is cleaned up by the garbage collector.
- return nil
- }
-
- b, err := r.getBroker(ctx, t)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err))
- t.Status.MarkBrokerFailed("DoesNotExist", "Broker does not exist")
- return err
- }
- t.Status.PropagateBrokerStatus(&b.Status)
-
- brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to get the Broker's Trigger Channel", zap.Error(err))
- return err
- }
- brokerIngress, err := r.getBrokerIngressChannel(ctx, b)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err))
- return err
- }
- // Get Broker filter service.
- filterSvc, err := r.getBrokerFilterService(ctx, b)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err))
- return err
- }
-
- subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
- return err
- }
- t.Status.SubscriberURI = subscriberURI
-
- sub, err := r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err))
- t.Status.MarkNotSubscribed("NotSubscribed", "%v", err)
- return err
- }
- t.Status.PropagateSubscriptionStatus(&sub.Status)
-
- return nil
-}
-
-// updateStatus may in fact update the trigger's finalizers in addition to the status.
-func (r *reconciler) updateStatus(trigger *v1alpha1.Trigger) (*v1alpha1.Trigger, error) {
- ctx := context.TODO()
- objectKey := client.ObjectKey{Namespace: trigger.Namespace, Name: trigger.Name}
- latestTrigger := &v1alpha1.Trigger{}
-
- if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
- return nil, err
- }
-
- triggerChanged := false
-
- if !equality.Semantic.DeepEqual(latestTrigger.Finalizers, trigger.Finalizers) {
- latestTrigger.SetFinalizers(trigger.ObjectMeta.Finalizers)
- if err := r.client.Update(ctx, latestTrigger); err != nil {
- return nil, err
- }
- triggerChanged = true
- }
-
- if equality.Semantic.DeepEqual(latestTrigger.Status, trigger.Status) {
- return latestTrigger, nil
- }
-
- if triggerChanged {
- // Refetch
- latestTrigger = &v1alpha1.Trigger{}
- if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil {
- return nil, err
- }
- }
-
- latestTrigger.Status = trigger.Status
- if err := r.client.Status().Update(ctx, latestTrigger); err != nil {
- return nil, err
- }
-
- return latestTrigger, nil
-}
-
-// getBroker returns the Broker for Trigger 't' if exists, otherwise it returns an error.
-func (r *reconciler) getBroker(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Broker, error) {
- b := &v1alpha1.Broker{}
- name := types.NamespacedName{
- Namespace: t.Namespace,
- Name: t.Spec.Broker,
- }
- err := r.client.Get(ctx, name, b)
- return b, err
-}
-
-// getBrokerTriggerChannel return the Broker's Trigger Channel if it exists, otherwise it returns an
-// error.
-func (r *reconciler) getBrokerTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
- return r.getChannel(ctx, b, labels.SelectorFromSet(broker.TriggerChannelLabels(b)))
-}
-
-// getBrokerIngressChannel return the Broker's Ingress Channel if it exists, otherwise it returns an
-// error.
-func (r *reconciler) getBrokerIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
- return r.getChannel(ctx, b, labels.SelectorFromSet(broker.IngressChannelLabels(b)))
-}
-
-// getChannel returns the Broker's channel if it exists, otherwise it returns an error.
-func (r *reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labels.Selector) (*v1alpha1.Channel, error) {
- list := &v1alpha1.ChannelList{}
- opts := &runtimeclient.ListOptions{
- Namespace: b.Namespace,
- LabelSelector: ls,
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
-
- err := r.client.List(ctx, opts, list)
- if err != nil {
- return nil, err
- }
- for _, c := range list.Items {
- if metav1.IsControlledBy(&c, b) {
- return &c, nil
- }
- }
-
- return nil, k8serrors.NewNotFound(schema.GroupResource{}, "")
-}
-
-// getService returns the K8s service for trigger 't' if exists,
-// otherwise it returns an error.
-func (r *reconciler) getBrokerFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) {
- list := &corev1.ServiceList{}
- opts := &runtimeclient.ListOptions{
- Namespace: b.Namespace,
- LabelSelector: labels.SelectorFromSet(brokerresources.FilterLabels(b)),
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
-
- err := r.client.List(ctx, opts, list)
- if err != nil {
- return nil, err
- }
- for _, svc := range list.Items {
- if metav1.IsControlledBy(&svc, b) {
- return &svc, nil
- }
- }
-
- return nil, k8serrors.NewNotFound(schema.GroupResource{}, "")
-}
-
-// subscribeToBrokerChannel subscribes service 'svc' to the Broker's channels.
-func (r *reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.Trigger, brokerTrigger, brokerIngress *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) {
- uri := &url.URL{
- Scheme: "http",
- Host: names.ServiceHostName(svc.Name, svc.Namespace),
- Path: path.Generate(t),
- }
- expected := resources.NewSubscription(t, brokerTrigger, brokerIngress, uri)
-
- sub, err := r.getSubscription(ctx, t)
- // If the resource doesn't exist, we'll create it
- if k8serrors.IsNotFound(err) {
- sub = expected
- err = r.client.Create(ctx, sub)
- if err != nil {
- return nil, err
- }
- return sub, nil
- } else if err != nil {
- return nil, err
- }
-
- // Update Subscription if it has changed. Ignore the generation.
- expected.Spec.DeprecatedGeneration = sub.Spec.DeprecatedGeneration
- if !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) {
- // Given that spec.channel is immutable, we cannot just update the Subscription. We delete
- // it and re-create it instead.
- err = r.client.Delete(ctx, sub)
- if err != nil {
- logging.FromContext(ctx).Info("Cannot delete subscription", zap.Error(err))
- r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionDeleteFailed, "Delete Trigger's subscription failed: %v", err)
- return nil, err
- }
- sub = expected
- err = r.client.Create(ctx, sub)
- if err != nil {
- logging.FromContext(ctx).Info("Cannot create subscription", zap.Error(err))
- r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err)
- return nil, err
- }
- }
- return sub, nil
-}
-
-// getSubscription returns the subscription of trigger 't' if exists,
-// otherwise it returns an error.
-func (r *reconciler) getSubscription(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Subscription, error) {
- list := &v1alpha1.SubscriptionList{}
- opts := &runtimeclient.ListOptions{
- Namespace: t.Namespace,
- LabelSelector: labels.SelectorFromSet(resources.SubscriptionLabels(t)),
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
-
- err := r.client.List(ctx, opts, list)
- if err != nil {
- return nil, err
- }
- for _, s := range list.Items {
- if metav1.IsControlledBy(&s, t) {
- return &s, nil
- }
- }
-
- return nil, k8serrors.NewNotFound(schema.GroupResource{}, "")
-}
diff --git a/pkg/reconciler/v1alpha1/trigger/trigger_test.go b/pkg/reconciler/v1alpha1/trigger/trigger_test.go
deleted file mode 100644
index cfad0508255..00000000000
--- a/pkg/reconciler/v1alpha1/trigger/trigger_test.go
+++ /dev/null
@@ -1,738 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package trigger
-
-import (
- "context"
- "errors"
- "fmt"
- "net/url"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
- brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources"
- "github.com/knative/eventing/pkg/utils"
- duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
- "go.uber.org/zap"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/dynamic"
- "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/rest"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
-)
-
-const (
- testNS = "test-namespace"
- triggerName = "test-trigger"
- brokerName = "test-broker"
-
- subscriberAPIVersion = "v1"
- subscriberKind = "Service"
- subscriberName = "subscriberName"
-
- continueToken = "continueToken"
-)
-
-var (
- trueVal = true
- // deletionTime is used when objects are marked as deleted. Rfc3339Copy()
- // truncates to seconds to match the loss of precision during serialization.
- deletionTime = metav1.Now().Rfc3339Copy()
-
- // Map of events to set test cases' expectations easier.
- events = map[string]corev1.Event{
- triggerReconciled: {Reason: triggerReconciled, Type: corev1.EventTypeNormal},
- triggerUpdateStatusFailed: {Reason: triggerUpdateStatusFailed, Type: corev1.EventTypeWarning},
- triggerReconcileFailed: {Reason: triggerReconcileFailed, Type: corev1.EventTypeWarning},
- subscriptionDeleteFailed: {Reason: subscriptionDeleteFailed, Type: corev1.EventTypeWarning},
- subscriptionCreateFailed: {Reason: subscriptionCreateFailed, Type: corev1.EventTypeWarning},
- }
-)
-
-func init() {
- // Add types to scheme
- _ = v1alpha1.AddToScheme(scheme.Scheme)
- _ = istiov1alpha3.AddToScheme(scheme.Scheme)
-}
-
-func TestProvideController(t *testing.T) {
- // TODO(grantr) This needs a mock of manager.Manager. Creating a manager
- // with a fake Config fails because the Manager tries to contact the
- // apiserver.
-
- // cfg := &rest.Config{
- // Host: "http://foo:80",
- // }
- //
- // mgr, err := manager.New(cfg, manager.Options{})
- // if err != nil {
- // t.Fatalf("Error creating manager: %v", err)
- // }
- //
- // _, err = ProvideController(mgr)
- // if err != nil {
- // t.Fatalf("Error in ProvideController: %v", err)
- // }
-}
-
-func TestInjectClient(t *testing.T) {
- r := &reconciler{}
- orig := r.client
- n := fake.NewFakeClient()
- if orig == n {
- t.Errorf("Original and new clients are identical: %v", orig)
- }
- err := r.InjectClient(n)
- if err != nil {
- t.Errorf("Unexpected error injecting the client: %v", err)
- }
- if n != r.client {
- t.Errorf("Unexpected client. Expected: '%v'. Actual: '%v'", n, r.client)
- }
-}
-
-func TestInjectConfig(t *testing.T) {
- r := &reconciler{}
- wantCfg := &rest.Config{
- Host: "http://foo",
- }
-
- err := r.InjectConfig(wantCfg)
- if err != nil {
- t.Fatalf("Unexpected error injecting the config: %v", err)
- }
-
- wantDynClient, err := dynamic.NewForConfig(wantCfg)
- if err != nil {
- t.Fatalf("Unexpected error generating dynamic client: %v", err)
- }
-
- // Since dynamicClient doesn't export any fields, we can only test its type.
- switch r.dynamicClient.(type) {
- case dynamic.Interface:
- // ok
- default:
- t.Errorf("Unexpected dynamicClient type. Expected: %T, Got: %T", wantDynClient, r.dynamicClient)
- }
-}
-
-func TestReconcile(t *testing.T) {
- testCases := []controllertesting.TestCase{
- {
- Name: "Trigger not found",
- },
- {
- Name: "Get Trigger error",
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Trigger); ok {
- return controllertesting.Handled, errors.New("test error getting the Trigger")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error getting the Trigger",
- },
- {
- Name: "Trigger being deleted",
- InitialState: []runtime.Object{
- makeDeletingTrigger(),
- },
- WantEvent: []corev1.Event{events[triggerReconciled]},
- },
- {
- Name: "Get Broker error",
- InitialState: []runtime.Object{
- makeTrigger(),
- },
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Broker); ok {
- return controllertesting.Handled, errors.New("test error getting broker")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error getting broker",
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Get Broker Trigger channel error",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- func(_ client.Client, _ context.Context, opts *client.ListOptions, list runtime.Object) (controllertesting.MockHandled, error) {
- // Only match the Trigger Channel labels.
- ls := labels.FormatLabels(broker.TriggerChannelLabels(makeBroker()))
- l, _ := labels.ConvertSelectorToLabelsMap(ls)
-
- if _, ok := list.(*v1alpha1.ChannelList); ok && opts.LabelSelector.Matches(l) {
- return controllertesting.Handled, errors.New("test error getting broker's Trigger channel")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error getting broker's Trigger channel",
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Broker Trigger channel not found",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- },
- WantErrMsg: ` "" not found`,
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Get Broker Ingress channel error",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- func(_ client.Client, _ context.Context, opts *client.ListOptions, list runtime.Object) (handled controllertesting.MockHandled, e error) {
- // Only match the Ingress Channel labels.
- ls := labels.FormatLabels(broker.IngressChannelLabels(makeBroker()))
- l, _ := labels.ConvertSelectorToLabelsMap(ls)
-
- if _, ok := list.(*v1alpha1.ChannelList); ok && opts.LabelSelector.Matches(l) {
- return controllertesting.Handled, errors.New("test error getting broker's Ingress channel")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error getting broker's Ingress channel",
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Broker Ingress channel not found",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- },
- WantErrMsg: ` "" not found`,
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Broker Filter Service not found",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- },
- WantErrMsg: ` "" not found`,
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Get Broker Filter Service error",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- func(_ client.Client, _ context.Context, opts *client.ListOptions, list runtime.Object) (handled controllertesting.MockHandled, e error) {
- if _, ok := list.(*corev1.ServiceList); ok {
- return controllertesting.Handled, errors.New("test error getting Broker's filter Service")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error getting Broker's filter Service",
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Resolve subscriberURI error",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- makeBrokerFilterService(),
- },
- DynamicMocks: controllertesting.DynamicMocks{
- MockGets: []controllertesting.MockDynamicGet{
- func(ctx *controllertesting.MockDynamicContext, name string, options metav1.GetOptions, subresources ...string) (handled controllertesting.MockHandled, i *unstructured.Unstructured, e error) {
- if ctx.Resource.Group == "" && ctx.Resource.Version == "v1" && ctx.Resource.Resource == "services" {
-
- return controllertesting.Handled, nil, errors.New("test error resolving subscriber URI")
- }
- return controllertesting.Unhandled, nil, nil
- },
- },
- },
- WantErrMsg: "test error resolving subscriber URI",
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Get Subscription error",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- makeBrokerFilterService(),
- },
- Objects: []runtime.Object{
- makeSubscriberServiceAsUnstructured(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- func(_ client.Client, _ context.Context, _ *client.ListOptions, list runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := list.(*v1alpha1.SubscriptionList); ok {
- return controllertesting.Handled, errors.New("test error listing subscription")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error listing subscription",
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Create Subscription error",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- makeBrokerFilterService(),
- },
- Objects: []runtime.Object{
- makeSubscriberServiceAsUnstructured(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Subscription); ok {
- return controllertesting.Handled, errors.New("test error creating subscription")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error creating subscription",
- WantEvent: []corev1.Event{events[triggerReconcileFailed]},
- },
- {
- Name: "Delete Subscription error",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- makeBrokerFilterService(),
- makeDifferentSubscription(),
- },
- Objects: []runtime.Object{
- makeSubscriberServiceAsUnstructured(),
- },
- Mocks: controllertesting.Mocks{
- MockDeletes: []controllertesting.MockDelete{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Subscription); ok {
- return controllertesting.Handled, errors.New("test error deleting subscription")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error deleting subscription",
- WantEvent: []corev1.Event{events[subscriptionDeleteFailed], events[triggerReconcileFailed]},
- },
- {
- Name: "Re-create Subscription error",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- makeBrokerFilterService(),
- makeDifferentSubscription(),
- },
- Objects: []runtime.Object{
- makeSubscriberServiceAsUnstructured(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Subscription); ok {
- return controllertesting.Handled, errors.New("test error re-creating subscription")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error re-creating subscription",
- WantEvent: []corev1.Event{events[subscriptionCreateFailed], events[triggerReconcileFailed]},
- },
- {
- Name: "Update status error",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeBroker(),
- makeTriggerChannel(),
- makeBrokerFilterService(),
- makeSameSubscription(),
- },
- Objects: []runtime.Object{
- makeSubscriberServiceAsUnstructured(),
- },
- Mocks: controllertesting.Mocks{
- MockStatusUpdates: []controllertesting.MockStatusUpdate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Trigger); ok {
- return controllertesting.Handled, errors.New("test error updating trigger status")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error updating trigger status",
- WantEvent: []corev1.Event{events[triggerReconciled], events[triggerUpdateStatusFailed]},
- },
- {
- Name: "Trigger reconciliation success",
- InitialState: []runtime.Object{
- makeTrigger(),
- makeReadyBroker(),
- makeTriggerChannel(),
- makeBrokerFilterService(),
- makeReadySubscription(),
- },
- Objects: []runtime.Object{
- makeSubscriberServiceAsUnstructured(),
- },
- WantEvent: []corev1.Event{events[triggerReconciled]},
- WantPresent: []runtime.Object{
- makeReadyTrigger(),
- },
- },
- }
- for _, tc := range testCases {
- c := tc.GetClient()
- dc := tc.GetDynamicClient()
- recorder := tc.GetEventRecorder()
-
- r := &reconciler{
- client: c,
- dynamicClient: dc,
- recorder: recorder,
- logger: zap.NewNop(),
- }
- tc.ReconcileKey = fmt.Sprintf("%s/%s", testNS, triggerName)
- tc.IgnoreTimes = true
- tc.Scheme = scheme.Scheme
- t.Run(tc.Name, tc.Runner(t, r, c, recorder))
- }
-}
-
-func TestMapBrokerToTriggers(t *testing.T) {
- testCases := map[string]struct {
- initialState []runtime.Object
- mocks controllertesting.Mocks
- expected []reconcile.Request
- }{
- "List error": {
- mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- func(_ client.Client, _ context.Context, _ *client.ListOptions, list runtime.Object) (controllertesting.MockHandled, error) {
- return controllertesting.Handled, errors.New("test induced error")
- },
- },
- },
- expected: []reconcile.Request{},
- },
- "One Trigger": {
- initialState: []runtime.Object{
- makeTrigger(),
- },
- expected: []reconcile.Request{
- {
- NamespacedName: types.NamespacedName{
- Namespace: testNS,
- Name: triggerName,
- },
- },
- },
- },
- "Only from this namespace": {
- initialState: []runtime.Object{
- makeTriggerWithNamespaceAndName(testNS, "one"),
- makeTriggerWithNamespaceAndName("some-other-namespace", "will-be-ignored"),
- makeTriggerWithNamespaceAndName(testNS, "two"),
- },
- expected: []reconcile.Request{
- {
- NamespacedName: types.NamespacedName{
- Namespace: testNS,
- Name: "one",
- },
- },
- {
- NamespacedName: types.NamespacedName{
- Namespace: testNS,
- Name: "two",
- },
- },
- },
- },
- "Follows pagination": {
- initialState: []runtime.Object{
- makeTrigger(),
- },
- mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- func(innerClient client.Client, ctx context.Context, opts *client.ListOptions, list runtime.Object) (handled controllertesting.MockHandled, e error) {
- // The first request won't have a continue token. Add it and immediately
- // return. The subsequent request will have the token, remove it and send
- // the request to the inner client.
- tl := list.(*v1alpha1.TriggerList)
- if opts.Raw.Continue != continueToken {
- tl.Continue = continueToken
- return controllertesting.Handled, nil
- } else {
- tl.Continue = ""
- return controllertesting.Handled, innerClient.List(ctx, opts, list)
- }
- },
- },
- },
- expected: []reconcile.Request{
- {
- NamespacedName: types.NamespacedName{
- Namespace: testNS,
- Name: triggerName,
- },
- },
- },
- },
- }
-
- for n, tc := range testCases {
- t.Run(n, func(t *testing.T) {
- c := (&controllertesting.TestCase{
- Scheme: scheme.Scheme,
- InitialState: tc.initialState,
- Mocks: tc.mocks,
- }).GetClient()
-
- b := &mapBrokerToTriggers{
- // client and logger are the only fields that are used by the Map function.
- r: &reconciler{
- client: c,
- logger: zap.NewNop(),
- },
- }
- o := handler.MapObject{
- Meta: &metav1.ObjectMeta{
- Namespace: testNS,
- Name: brokerName,
- },
- }
- actual := b.Map(o)
- if diff := cmp.Diff(tc.expected, actual); diff != "" {
- t.Errorf("Unexpected results (-want +got): %s", diff)
- }
- })
- }
-}
-
-func makeTrigger() *v1alpha1.Trigger {
- return &v1alpha1.Trigger{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "Trigger",
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNS,
- Name: triggerName,
- },
- Spec: v1alpha1.TriggerSpec{
- Broker: brokerName,
- Filter: &v1alpha1.TriggerFilter{
- SourceAndType: &v1alpha1.TriggerFilterSourceAndType{
- Source: "Any",
- Type: "Any",
- },
- },
- Subscriber: &v1alpha1.SubscriberSpec{
- Ref: &corev1.ObjectReference{
- Name: subscriberName,
- Kind: subscriberKind,
- APIVersion: subscriberAPIVersion,
- },
- },
- },
- }
-}
-
-func makeReadyTrigger() *v1alpha1.Trigger {
- t := makeTrigger()
- t.Status = *v1alpha1.TestHelper.ReadyTriggerStatus()
- t.Status.SubscriberURI = fmt.Sprintf("http://%s.%s.svc.%s/", subscriberName, testNS, utils.GetClusterDomainName())
- return t
-}
-
-func makeDeletingTrigger() *v1alpha1.Trigger {
- b := makeReadyTrigger()
- b.DeletionTimestamp = &deletionTime
- return b
-}
-
-func makeTriggerWithNamespaceAndName(namespace, name string) *v1alpha1.Trigger {
- t := makeTrigger()
- t.Namespace = namespace
- t.Name = name
- return t
-}
-
-func makeBroker() *v1alpha1.Broker {
- return &v1alpha1.Broker{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "Broker",
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNS,
- Name: brokerName,
- },
- Spec: v1alpha1.BrokerSpec{
- ChannelTemplate: &v1alpha1.ChannelSpec{
- Provisioner: makeChannelProvisioner(),
- },
- },
- }
-}
-
-func makeReadyBroker() *v1alpha1.Broker {
- b := makeBroker()
- b.Status = *v1alpha1.TestHelper.ReadyBrokerStatus()
- return b
-}
-
-func makeChannelProvisioner() *corev1.ObjectReference {
- return &corev1.ObjectReference{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "ClusterChannelProvisioner",
- Name: "my-provisioner",
- }
-}
-
-func newChannel(name string) *v1alpha1.Channel {
- return &v1alpha1.Channel{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNS,
- Name: name,
- Labels: map[string]string{
- "eventing.knative.dev/broker": brokerName,
- "eventing.knative.dev/brokerEverything": "true",
- },
- OwnerReferences: []metav1.OwnerReference{
- getOwnerReference(),
- },
- },
- Spec: v1alpha1.ChannelSpec{
- Provisioner: makeChannelProvisioner(),
- },
- Status: v1alpha1.ChannelStatus{
- Address: duckv1alpha1.Addressable{
- Hostname: "any-non-empty-string",
- },
- },
- }
-}
-
-func makeTriggerChannel() *v1alpha1.Channel {
- return newChannel(fmt.Sprintf("%s-broker", brokerName))
-}
-
-func makeDifferentChannel() *v1alpha1.Channel {
- return newChannel(fmt.Sprintf("%s-broker-different", brokerName))
-}
-
-func makeSubscriberServiceAsUnstructured() *unstructured.Unstructured {
- return &unstructured.Unstructured{
- Object: map[string]interface{}{
- "apiVersion": "v1",
- "kind": "Service",
- "metadata": map[string]interface{}{
- "namespace": testNS,
- "name": subscriberName,
- },
- },
- }
-}
-
-func makeBrokerFilterService() *corev1.Service {
- return brokerresources.MakeFilterService(makeBroker())
-}
-
-func makeServiceURI() *url.URL {
- return &url.URL{
- Scheme: "http",
- Host: fmt.Sprintf("%s.%s.svc.%s", makeBrokerFilterService().Name, testNS, utils.GetClusterDomainName()),
- Path: fmt.Sprintf("/triggers/%s/%s", testNS, triggerName),
- }
-}
-
-func makeSameSubscription() *v1alpha1.Subscription {
- return resources.NewSubscription(makeTrigger(), makeTriggerChannel(), makeTriggerChannel(), makeServiceURI())
-}
-
-func makeDifferentSubscription() *v1alpha1.Subscription {
- return resources.NewSubscription(makeTrigger(), makeTriggerChannel(), makeDifferentChannel(), makeServiceURI())
-}
-
-func makeReadySubscription() *v1alpha1.Subscription {
- s := makeSameSubscription()
- s.Status = *v1alpha1.TestHelper.ReadySubscriptionStatus()
- return s
-}
-
-func getOwnerReference() metav1.OwnerReference {
- return metav1.OwnerReference{
- APIVersion: v1alpha1.SchemeGroupVersion.String(),
- Kind: "Broker",
- Name: brokerName,
- Controller: &trueVal,
- BlockOwnerDeletion: &trueVal,
- }
-}
diff --git a/vendor/github.com/knative/pkg/apis/deprecated.go b/vendor/github.com/knative/pkg/apis/deprecated.go
new file mode 100644
index 00000000000..c73f5be7c99
--- /dev/null
+++ b/vendor/github.com/knative/pkg/apis/deprecated.go
@@ -0,0 +1,180 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apis
+
+import (
+ "context"
+ "reflect"
+ "strings"
+)
+
+const (
+ deprecatedPrefix = "Deprecated"
+)
+
+// CheckDeprecated checks whether the provided named deprecated fields
+// are set in a context where deprecation is disallowed.
+// This is a shallow check.
+func CheckDeprecated(ctx context.Context, obj interface{}) *FieldError {
+ return CheckDeprecatedUpdate(ctx, obj, nil)
+}
+
+// CheckDeprecated checks whether the provided named deprecated fields
+// are set in a context where deprecation is disallowed.
+// This is a json shallow check. We will recursively check inlined structs.
+func CheckDeprecatedUpdate(ctx context.Context, obj interface{}, original interface{}) *FieldError {
+ if IsDeprecatedAllowed(ctx) {
+ return nil
+ }
+
+ var errs *FieldError
+ objFields, objInlined := getPrefixedNamedFieldValues(deprecatedPrefix, obj)
+
+ if nonZero(reflect.ValueOf(original)) {
+ originalFields, originalInlined := getPrefixedNamedFieldValues(deprecatedPrefix, original)
+
+ // We only have to walk obj Fields because the assumption is that obj
+ // and original are of the same type.
+ for name, value := range objFields {
+ if nonZero(value) {
+ if differ(originalFields[name], value) {
+ // Not allowed to update the value.
+ errs = errs.Also(ErrDisallowedUpdateDeprecatedFields(name))
+ }
+ }
+ }
+ // Look for deprecated inlined updates.
+ if len(objInlined) > 0 {
+ for name, value := range objInlined {
+ errs = errs.Also(CheckDeprecatedUpdate(ctx, value, originalInlined[name]))
+ }
+ }
+ } else {
+ for name, value := range objFields {
+ if nonZero(value) {
+ // Not allowed to set the value.
+ errs = errs.Also(ErrDisallowedFields(name))
+ }
+ }
+ // Look for deprecated inlined creates.
+ if len(objInlined) > 0 {
+ for _, value := range objInlined {
+ errs = errs.Also(CheckDeprecated(ctx, value))
+ }
+ }
+ }
+ return errs
+}
+
+func getPrefixedNamedFieldValues(prefix string, obj interface{}) (map[string]reflect.Value, map[string]interface{}) {
+ fields := make(map[string]reflect.Value, 0)
+ inlined := make(map[string]interface{}, 0)
+
+ objValue := reflect.Indirect(reflect.ValueOf(obj))
+
+ // If res is not valid or a struct, don't even try to use it.
+ if !objValue.IsValid() || objValue.Kind() != reflect.Struct {
+ return fields, inlined
+ }
+
+ for i := 0; i < objValue.NumField(); i++ {
+ tf := objValue.Type().Field(i)
+ if v := objValue.Field(i); v.IsValid() {
+ jTag := tf.Tag.Get("json")
+ if strings.HasPrefix(tf.Name, prefix) {
+ name := strings.Split(jTag, ",")[0]
+ if name == "" {
+ // Default to field name in go struct if no json name.
+ name = tf.Name
+ }
+ fields[name] = v
+ } else if jTag == ",inline" {
+ inlined[tf.Name] = getInterface(v)
+ }
+ }
+ }
+ return fields, inlined
+}
+
+// getInterface returns the interface value of the reflected object.
+func getInterface(a reflect.Value) interface{} {
+ switch a.Kind() {
+ case reflect.Ptr:
+ if a.IsNil() {
+ return nil
+ }
+ return a.Elem().Interface()
+
+ case reflect.Map, reflect.Slice, reflect.Array:
+ return a.Elem().Interface()
+
+ // This is a nil interface{} type.
+ case reflect.Invalid:
+ return nil
+
+ default:
+ return a.Interface()
+ }
+}
+
+// nonZero returns true if a is nil or reflect.Zero.
+func nonZero(a reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Ptr:
+ if a.IsNil() {
+ return false
+ }
+ return nonZero(a.Elem())
+
+ case reflect.Map, reflect.Slice, reflect.Array:
+ if a.IsNil() {
+ return false
+ }
+ return true
+
+ // This is a nil interface{} type.
+ case reflect.Invalid:
+ return false
+
+ default:
+ if reflect.DeepEqual(a.Interface(), reflect.Zero(a.Type()).Interface()) {
+ return false
+ }
+ return true
+ }
+}
+
+// differ returns true if a != b
+func differ(a, b reflect.Value) bool {
+ if a.Kind() != b.Kind() {
+ return true
+ }
+
+ switch a.Kind() {
+ case reflect.Ptr:
+ if a.IsNil() || b.IsNil() {
+ return a.IsNil() != b.IsNil()
+ }
+ return differ(a.Elem(), b.Elem())
+
+ default:
+ if reflect.DeepEqual(a.Interface(), b.Interface()) {
+ return false
+ }
+ return true
+ }
+}
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go
index 12b63061699..a31e8ee235f 100644
--- a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/addressable_types.go
@@ -60,9 +60,11 @@ type AddressStatus struct {
Address *Addressable `json:"address,omitempty"`
}
-// Verify AddressableType resources meet duck contracts.
-var _ duck.Populatable = (*AddressableType)(nil)
-var _ apis.Listable = (*AddressableType)(nil)
+var (
+ // Verify AddressableType resources meet duck contracts.
+ _ duck.Populatable = (*AddressableType)(nil)
+ _ apis.Listable = (*AddressableType)(nil)
+)
// GetFullType implements duck.Implementable
func (*Addressable) GetFullType() duck.Populatable {
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go
index 1c550ed19ff..72d4bf605d9 100644
--- a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/condition_set.go
@@ -153,7 +153,7 @@ func (r ConditionSet) Manage(status interface{}) ConditionManager {
}
}
- // We tried. This object is not understood by the the condition manager.
+ // We tried. This object is not understood by the condition manager.
//panic(fmt.Sprintf("Error converting %T into a ConditionsAccessor", status))
// TODO: not sure which way. using panic above means passing nil status panics the system.
return conditionsImpl{
diff --git a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go
index 0e7da480bb9..0e91aef8ac7 100644
--- a/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go
+++ b/vendor/github.com/knative/pkg/apis/duck/v1alpha1/retired_targetable_types.go
@@ -60,11 +60,13 @@ type TargetStatus struct {
Targetable *Targetable `json:"targetable,omitempty"`
}
-// In order for Targetable to be Implementable, Target must be Populatable.
-var _ duck.Populatable = (*Target)(nil)
+var (
+ // In order for Targetable to be Implementable, Target must be Populatable.
+ _ duck.Populatable = (*Target)(nil)
-// Ensure Target satisfies apis.Listable
-var _ apis.Listable = (*Target)(nil)
+ // Ensure Target satisfies apis.Listable
+ _ apis.Listable = (*Target)(nil)
+)
// GetFullType implements duck.Implementable
func (*Targetable) GetFullType() duck.Populatable {
diff --git a/vendor/github.com/knative/pkg/apis/field_error.go b/vendor/github.com/knative/pkg/apis/field_error.go
index da498281b02..8b56be976e8 100644
--- a/vendor/github.com/knative/pkg/apis/field_error.go
+++ b/vendor/github.com/knative/pkg/apis/field_error.go
@@ -20,6 +20,8 @@ import (
"fmt"
"sort"
"strings"
+
+ "github.com/knative/pkg/kmp"
)
// CurrentField is a constant to supply as a fieldPath for when there is
@@ -300,6 +302,15 @@ func ErrDisallowedFields(fieldPaths ...string) *FieldError {
}
}
+// ErrDisallowedUpdateDeprecatedFields is a variadic helper method for
+// constructing a FieldError for updating of deprecated fields.
+func ErrDisallowedUpdateDeprecatedFields(fieldPaths ...string) *FieldError {
+ return &FieldError{
+ Message: "must not update deprecated field(s)",
+ Paths: fieldPaths,
+ }
+}
+
// ErrInvalidArrayValue constructs a FieldError for a repetetive `field`
// at `index` that has received an invalid string value.
func ErrInvalidArrayValue(value interface{}, field string, index int) *FieldError {
@@ -351,3 +362,18 @@ func ErrOutOfBoundsValue(value, lower, upper interface{}, fieldPath string) *Fie
Paths: []string{fieldPath},
}
}
+
+// CheckDisallowedFields compares the request object against a masked request object. Fields
+// that are set in the request object that are unset in the mask are reported back as disallowed fields. If
+// there is an error comparing the two objects FieldError of "Internal Error" is returned.
+func CheckDisallowedFields(request, maskedRequest interface{}) *FieldError {
+ if disallowed, err := kmp.CompareSetFields(request, maskedRequest); err != nil {
+ return &FieldError{
+ Message: fmt.Sprintf("Internal Error"),
+ Paths: []string{CurrentField},
+ }
+ } else if len(disallowed) > 0 {
+ return ErrDisallowedFields(disallowed...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go
index 5b2c708b673..8798cfa3775 100644
--- a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go
+++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go
@@ -767,7 +767,7 @@ type CorsPolicy struct {
// access. Serialized into Access-Control-Expose-Headers header.
ExposeHeaders []string `json:"exposeHeaders,omitempty"`
- // Specifies how long the the results of a preflight request can be
+ // Specifies how long the results of a preflight request can be
// cached. Translates to the Access-Control-Max-Age header.
MaxAge string `json:"maxAge,omitempty"`
diff --git a/vendor/github.com/knative/pkg/metrics/config.go b/vendor/github.com/knative/pkg/metrics/config.go
index e57aec821c6..0482286e2cd 100644
--- a/vendor/github.com/knative/pkg/metrics/config.go
+++ b/vendor/github.com/knative/pkg/metrics/config.go
@@ -226,6 +226,8 @@ func UpdateExporter(ops ExporterOptions, logger *zap.SugaredLogger) error {
}
if isNewExporterRequired(newConfig) {
+ logger.Info("Flushing the existing exporter before setting up the new exporter.")
+ FlushExporter()
e, err := newMetricsExporter(newConfig, logger)
if err != nil {
logger.Errorf("Failed to update a new metrics exporter based on metric config %v. error: %v", newConfig, err)
diff --git a/vendor/github.com/knative/pkg/metrics/exporter.go b/vendor/github.com/knative/pkg/metrics/exporter.go
index 238f400f06c..e20637e3a07 100644
--- a/vendor/github.com/knative/pkg/metrics/exporter.go
+++ b/vendor/github.com/knative/pkg/metrics/exporter.go
@@ -27,6 +27,11 @@ var (
metricsMux sync.Mutex
)
+type flushable interface {
+ // Flush waits for metrics to be uploaded.
+ Flush()
+}
+
// newMetricsExporter gets a metrics exporter based on the config.
func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.Exporter, error) {
// If there is a Prometheus Exporter server running, stop it.
@@ -83,3 +88,19 @@ func setCurMetricsConfig(c *metricsConfig) {
}
curMetricsConfig = c
}
+
+// FlushExporter waits for exported data to be uploaded.
+// This should be called before the process shuts down or exporter is replaced.
+// Return value indicates whether the exporter is flushable or not.
+func FlushExporter() bool {
+ e := getCurMetricsExporter()
+ if e == nil {
+ return false
+ }
+
+ if f, ok := e.(flushable); ok {
+ f.Flush()
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/table.go b/vendor/github.com/knative/pkg/reconciler/testing/table.go
index dcd24c66054..0a0a3a9bff0 100644
--- a/vendor/github.com/knative/pkg/reconciler/testing/table.go
+++ b/vendor/github.com/knative/pkg/reconciler/testing/table.go
@@ -147,7 +147,7 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
}
if got, want := len(actions.Creates), len(r.WantCreates); got > want {
for _, extra := range actions.Creates[want:] {
- t.Errorf("Extra create: %#v", extra)
+ t.Errorf("Extra create: %#v", extra.GetObject())
}
}
@@ -181,7 +181,7 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
}
if got, want := len(updates), len(r.WantUpdates); got > want {
for _, extra := range updates[want:] {
- t.Errorf("Extra update: %#v", extra)
+ t.Errorf("Extra update: %#v", extra.GetObject())
}
}
@@ -225,11 +225,11 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
}
if len(statusUpdates)+len(updates) != len(actions.Updates) {
- var unexpected []clientgotesting.UpdateAction
+ var unexpected []runtime.Object
for _, update := range actions.Updates {
if update.GetSubresource() != "status" && update.GetSubresource() != "" {
- unexpected = append(unexpected, update)
+ unexpected = append(unexpected, update.GetObject())
}
}
@@ -251,7 +251,7 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
}
if got, want := len(actions.Deletes), len(r.WantDeletes); got > want {
for _, extra := range actions.Deletes[want:] {
- t.Errorf("Extra delete: %#v", extra)
+ t.Errorf("Extra delete: %s/%s", extra.GetNamespace(), extra.GetName())
}
}
diff --git a/vendor/github.com/knative/pkg/test/spoof/spoof.go b/vendor/github.com/knative/pkg/test/spoof/spoof.go
index f25bb07d6fa..f23d3049e13 100644
--- a/vendor/github.com/knative/pkg/test/spoof/spoof.go
+++ b/vendor/github.com/knative/pkg/test/spoof/spoof.go
@@ -106,7 +106,7 @@ func New(kubeClientset *kubernetes.Clientset, logf logging.FormatLogger, domain
if endpointOverride == "" {
var err error
// If the domain that the Route controller is configured to assign to Route.Status.Domain
- // (the domainSuffix) is not resolvable, we need to retrieve the the endpoint and spoof
+ // (the domainSuffix) is not resolvable, we need to retrieve the endpoint and spoof
// the Host in our requests.
e, err = ingress.GetIngressEndpoint(kubeClientset)
if err != nil {
diff --git a/vendor/github.com/knative/pkg/webhook/webhook.go b/vendor/github.com/knative/pkg/webhook/webhook.go
index cd57f176440..3fc5342c74f 100644
--- a/vendor/github.com/knative/pkg/webhook/webhook.go
+++ b/vendor/github.com/knative/pkg/webhook/webhook.go
@@ -227,6 +227,7 @@ func validate(ctx context.Context, new GenericCRD) error {
if err := new.Validate(ctx); err != nil {
return err
}
+
return nil
}
@@ -573,6 +574,7 @@ func (ac *AdmissionController) mutate(ctx context.Context, req *admissionv1beta1
// discretion over (our portion of) the message that the user sees.
return nil, err
}
+
return json.Marshal(patches)
}
From 6e45dd78aa0876a74bdf2a59dab8264767009b37 Mon Sep 17 00:00:00 2001
From: Scott Nichols <32305648+n3wscott@users.noreply.github.com>
Date: Tue, 23 Apr 2019 15:41:29 -0700
Subject: [PATCH 46/76] Move channel pkg/controller (#1083)
* test working.
* Adding channel to main.
* clean imports.
* not a todo. removing.
* update deps.
* fix imports from merge.
---
Gopkg.lock | 1 -
cmd/controller/main.go | 11 +-
pkg/reconciler/channel/channel.go | 164 +++++++++++
pkg/reconciler/channel/channel_test.go | 178 ++++++++++++
pkg/reconciler/testing/channel.go | 31 ++-
pkg/reconciler/v1alpha1/channel/channel.go | 144 ----------
.../v1alpha1/channel/channel_test.go | 257 ------------------
7 files changed, 380 insertions(+), 406 deletions(-)
create mode 100644 pkg/reconciler/channel/channel.go
create mode 100644 pkg/reconciler/channel/channel_test.go
delete mode 100644 pkg/reconciler/v1alpha1/channel/channel.go
delete mode 100644 pkg/reconciler/v1alpha1/channel/channel_test.go
diff --git a/Gopkg.lock b/Gopkg.lock
index a23c284149b..a2cfaea7964 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -1452,7 +1452,6 @@
"sigs.k8s.io/controller-runtime/pkg/event",
"sigs.k8s.io/controller-runtime/pkg/handler",
"sigs.k8s.io/controller-runtime/pkg/manager",
- "sigs.k8s.io/controller-runtime/pkg/predicate",
"sigs.k8s.io/controller-runtime/pkg/reconcile",
"sigs.k8s.io/controller-runtime/pkg/runtime/inject",
"sigs.k8s.io/controller-runtime/pkg/runtime/log",
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 8aa7523e323..df7cdf7b2f5 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -28,6 +28,7 @@ import (
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
+
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
@@ -36,10 +37,10 @@ import (
"github.com/knative/eventing/pkg/logconfig"
"github.com/knative/eventing/pkg/logging"
"github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/channel"
"github.com/knative/eventing/pkg/reconciler/subscription"
"github.com/knative/eventing/pkg/reconciler/trigger"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/channel"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/namespace"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"github.com/knative/pkg/configmap"
@@ -95,7 +96,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
logger = logger.With(zap.String("controller/impl", "pkg"))
logger.Info("Starting the controller")
- const numControllers = 2
+ const numControllers = 3
cfg.QPS = numControllers * rest.DefaultQPS
cfg.Burst = numControllers * rest.DefaultBurst
opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
@@ -122,6 +123,10 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
opt,
subscriptionInformer,
),
+ channel.NewController(
+ opt,
+ channelInformer,
+ ),
trigger.NewController(
opt,
triggerInformer,
@@ -148,6 +153,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
if err := kncontroller.StartInformers(
stopCh,
subscriptionInformer.Informer(),
+ channelInformer.Informer(),
configMapInformer.Informer(),
triggerInformer.Informer(),
channelInformer.Informer(),
@@ -199,7 +205,6 @@ func startControllerRuntime(stopCh <-chan struct{}, cfg *rest.Config, logger *za
// Add each controller's ProvideController func to this list to have the
// manager run it.
providers := []ProvideFunc{
- channel.ProvideController,
broker.ProvideController(
broker.ReconcilerArgs{
IngressImage: getRequiredEnv("BROKER_INGRESS_IMAGE"),
diff --git a/pkg/reconciler/channel/channel.go b/pkg/reconciler/channel/channel.go
new file mode 100644
index 00000000000..9ea395ee854
--- /dev/null
+++ b/pkg/reconciler/channel/channel.go
@@ -0,0 +1,164 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package channel
+
+import (
+ "context"
+ "reflect"
+
+ corev1 "k8s.io/api/core/v1"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ listers "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/pkg/controller"
+ "go.uber.org/zap"
+)
+
+const (
+ // ReconcilerName is the name of the reconciler
+ ReconcilerName = "Channels"
+ // controllerAgentName is the string used by this controller to identify
+ // itself when creating events.
+ controllerAgentName = "channel-default-controller"
+ channelReconciled = "ChannelReconciled"
+ channelUpdateStatusFailed = "ChannelUpdateStatusFailed"
+)
+
+type Reconciler struct {
+ *reconciler.Base
+
+ // listers index properties about resources
+ channelLister listers.ChannelLister
+}
+
+// Check that our Reconciler implements controller.Reconciler
+var _ controller.Reconciler = (*Reconciler)(nil)
+
+// NewController initializes the controller and is called by the generated code
+// Registers event handlers to enqueue events
+func NewController(
+ opt reconciler.Options,
+ channelInformer eventinginformers.ChannelInformer,
+) *controller.Impl {
+
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ channelLister: channelInformer.Lister(),
+ }
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ r.Logger.Info("Setting up event handlers")
+ channelInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
+
+ return impl
+}
+
+// Reconcile will check if the channel is being watched by provisioner's channel controller
+// This will improve UX. See https://github.com/knative/eventing/issues/779
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ // Convert the namespace/name string into a distinct namespace and name
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
+ }
+
+ // Get the Channel resource with this namespace/name
+ original, err := r.channelLister.Channels(namespace).Get(name)
+ if apierrs.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ logging.FromContext(ctx).Error("channel key in work queue no longer exists", zap.Any("key", key))
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // Delete is a no-op.
+ if original.DeletionTimestamp != nil {
+ return nil
+ }
+
+ // Don't modify the informers copy
+ channel := original.DeepCopy()
+
+ // Reconcile this copy of the Channel and then write back any status
+ // updates regardless of whether the reconcile error out.
+ err = r.reconcile(ctx, channel)
+ if err != nil {
+ logging.FromContext(ctx).Warn("Error reconciling Channel", zap.Error(err))
+ } else {
+ logging.FromContext(ctx).Debug("Successfully reconciled Channel", zap.Any("key", key))
+ r.Recorder.Eventf(channel, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %s", key)
+ }
+
+ if _, updateStatusErr := r.updateStatus(ctx, channel.DeepCopy()); updateStatusErr != nil {
+ logging.FromContext(ctx).Warn("Error updating Channel status", zap.Any("key", key), zap.Error(updateStatusErr))
+ r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update channel status: %s", key)
+ return updateStatusErr
+ }
+
+ // Requeue if the resource is not ready:
+ return err
+}
+
+func (r *Reconciler) reconcile(ctx context.Context, ch *v1alpha1.Channel) error {
+ // Do not Initialize() Status in channel-default-controller. It will set ChannelConditionProvisionerInstalled=True
+ // Directly call GetCondition(). If the Status was never initialized then GetCondition() will return nil and
+ // IsUnknown() will return true
+ c := ch.Status.GetCondition(v1alpha1.ChannelConditionProvisionerInstalled)
+
+ if c == nil || c.IsUnknown() {
+
+ var proName string
+ var proKind string
+ if ch.Spec.Provisioner != nil {
+ proName = ch.Spec.Provisioner.Name
+ proKind = ch.Spec.Provisioner.Kind
+ }
+
+ ch.Status.MarkProvisionerNotInstalled(
+ "Provisioner not found.",
+ "Specified provisioner [Name:%s Kind:%s] is not installed or not controlling the channel.",
+ proName,
+ proKind,
+ )
+ }
+ return nil
+}
+
+func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.Channel) (*v1alpha1.Channel, error) {
+ channel, err := r.channelLister.Channels(desired.Namespace).Get(desired.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If there's nothing to update, just return.
+ if reflect.DeepEqual(channel.Status, desired.Status) {
+ return channel, nil
+ }
+
+ // Don't modify the informers copy.
+ existing := channel.DeepCopy()
+ existing.Status = desired.Status
+
+ return r.EventingClientSet.EventingV1alpha1().Channels(desired.Namespace).UpdateStatus(existing)
+}
diff --git a/pkg/reconciler/channel/channel_test.go b/pkg/reconciler/channel/channel_test.go
new file mode 100644
index 00000000000..ed83c40bc8c
--- /dev/null
+++ b/pkg/reconciler/channel/channel_test.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package channel
+
+import (
+ "testing"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/kubernetes/scheme"
+ clientgotesting "k8s.io/client-go/testing"
+
+ eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ informers "github.com/knative/eventing/pkg/client/informers/externalversions"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+
+ . "github.com/knative/eventing/pkg/reconciler/testing"
+ . "github.com/knative/pkg/reconciler/testing"
+)
+
+const (
+ testNS = "testnamespace"
+ channeName = "testchannel"
+ provisionerName = "testprovisioner"
+)
+
+var (
+ provisionerGVK = metav1.GroupVersionKind{
+ Group: "eventing.knative.dev",
+ Version: "v1alpha1",
+ Kind: "ClusterChannelProvisioner",
+ }
+)
+
+func init() {
+ // Add types to scheme
+ _ = eventingv1alpha1.AddToScheme(scheme.Scheme)
+}
+
+func TestAllCases(t *testing.T) {
+ table := TableTest{
+ {
+ Name: "bad workqueue key",
+ // Make sure Reconcile handles bad keys.
+ Key: "too/many/parts",
+ }, {
+ Name: "key not found",
+ // Make sure Reconcile handles good keys that don't exist.
+ Key: "foo/not-found",
+ },
+ //{ // TODO: there is a bug in the controller, it reconcile for empty provisioner.
+ // Name: "incomplete channel",
+ // Objects: []runtime.Object{
+ // NewChannel(channelName, testNS),
+ // },
+ // Key: testNS + "/" + channeName,
+ // WantErr: true,
+ // WantEvents: []string{
+ // Eventf(corev1.EventTypeWarning, "TODO", ""),
+ //},
+ {
+ Name: "unclaimed channel, empty provisioner",
+ Objects: []runtime.Object{
+ NewChannel(channeName, testNS),
+ },
+ Key: testNS + "/" + channeName,
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewChannel(channeName, testNS,
+ WithChannelProvisionerNotFound("", ""), // TODO: THIS IS A BUG, there is no validation checking.
+ ),
+ }},
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "ChannelReconciled", "Channel reconciled: %s/%s", testNS, channeName),
+ Eventf(corev1.EventTypeWarning, "ChannelUpdateStatusFailed", "Failed to update channel status: %s/%s", testNS, channeName),
+ },
+ }, {
+ Name: "unclaimed channel",
+ Objects: []runtime.Object{
+ NewChannel(channeName, testNS,
+ WithChannelProvisioner(provisionerGVK, provisionerName),
+ ),
+ },
+ Key: testNS + "/" + channeName,
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewChannel(channeName, testNS,
+ WithChannelProvisioner(provisionerGVK, provisionerName),
+ // Status Update:
+ WithChannelProvisionerNotFound(provisionerName, provisionerGVK.Kind),
+ ),
+ }},
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "ChannelReconciled", "Channel reconciled: %s/%s", testNS, channeName),
+ },
+ }, {
+ Name: "controller defaulted channel",
+ Objects: []runtime.Object{
+ NewChannel(channeName, testNS,
+ WithInitChannelConditions,
+ ),
+ },
+ Key: testNS + "/" + channeName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "ChannelReconciled", "Channel reconciled: %s/%s", testNS, channeName),
+ },
+ }, {
+ Name: "valid claimed channel",
+ Objects: []runtime.Object{
+ NewChannel(channeName, testNS,
+ WithChannelProvisioner(provisionerGVK, provisionerName),
+ WithInitChannelConditions,
+ ),
+ },
+ Key: testNS + "/" + channeName,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "ChannelReconciled", "Channel reconciled: %s/%s", testNS, channeName),
+ },
+ }, {
+ Name: "channel deleted is no-op",
+ Objects: []runtime.Object{
+ NewChannel(channeName, testNS,
+ WithInitChannelConditions,
+ WithChannelDeleted,
+ ),
+ },
+ Key: testNS + "/" + channeName,
+ },
+ }
+
+ defer logtesting.ClearAll()
+ table.Test(t, MakeFactory(func(listers *Listers, opt reconciler.Options) controller.Reconciler {
+ return &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ channelLister: listers.GetChannelLister(),
+ }
+ }))
+
+}
+
+func TestNew(t *testing.T) {
+ defer logtesting.ClearAll()
+ kubeClient := fakekubeclientset.NewSimpleClientset()
+ eventingClient := fakeclientset.NewSimpleClientset()
+ eventingInformer := informers.NewSharedInformerFactory(eventingClient, 0)
+
+ channelInformer := eventingInformer.Eventing().V1alpha1().Channels()
+ c := NewController(reconciler.Options{
+ KubeClientSet: kubeClient,
+ EventingClientSet: eventingClient,
+ Logger: logtesting.TestLogger(t),
+ }, channelInformer)
+
+ if c == nil {
+ t.Fatal("Expected NewController to return a non-nil value")
+ }
+}
diff --git a/pkg/reconciler/testing/channel.go b/pkg/reconciler/testing/channel.go
index 3d705befdc9..bef61f609c7 100644
--- a/pkg/reconciler/testing/channel.go
+++ b/pkg/reconciler/testing/channel.go
@@ -18,10 +18,13 @@ package testing
import (
"context"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
duckv1alpha1 "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ChannelOption enables further configuration of a Channel.
@@ -61,6 +64,32 @@ func WithInitChannelConditions(s *v1alpha1.Channel) {
s.Status.InitializeConditions()
}
+func WithChannelDeleted(c *v1alpha1.Channel) {
+ t := metav1.NewTime(time.Unix(1e9, 0))
+ c.ObjectMeta.SetDeletionTimestamp(&t)
+}
+
+func WithChannelProvisionerNotFound(name, kind string) ChannelOption {
+ return func(c *v1alpha1.Channel) {
+ c.Status.MarkProvisionerNotInstalled(
+ "Provisioner not found.",
+ "Specified provisioner [Name:%s Kind:%s] is not installed or not controlling the channel.",
+ name,
+ kind,
+ )
+ }
+}
+
+func WithChannelProvisioner(gvk metav1.GroupVersionKind, name string) ChannelOption {
+ return func(c *v1alpha1.Channel) {
+ c.Spec.Provisioner = &corev1.ObjectReference{
+ APIVersion: apiVersion(gvk),
+ Kind: gvk.Kind,
+ Name: name,
+ }
+ }
+}
+
func WithChannelAddress(hostname string) ChannelOption {
return func(c *v1alpha1.Channel) {
c.Status.Address.Hostname = hostname
diff --git a/pkg/reconciler/v1alpha1/channel/channel.go b/pkg/reconciler/v1alpha1/channel/channel.go
deleted file mode 100644
index 0304d16e0d6..00000000000
--- a/pkg/reconciler/v1alpha1/channel/channel.go
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package channel
-
-import (
- "context"
-
- "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "github.com/knative/eventing/pkg/logging"
- "go.uber.org/zap"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/client-go/dynamic"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/record"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/event"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/predicate"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
- "sigs.k8s.io/controller-runtime/pkg/source"
-)
-
-const (
- // controllerAgentName is the string used by this controller to identify
- // itself when creating events.
- controllerAgentName = "channel-default-controller"
- channelReconciled = "ChannelReconciled"
- channelUpdateStatusFailed = "ChannelUpdateStatusFailed"
-)
-
-type reconciler struct {
- client client.Client
- restConfig *rest.Config
- dynamicClient dynamic.Interface
- recorder record.EventRecorder
- logger *zap.Logger
-}
-
-// Verify the struct implements reconcile.Reconciler
-var _ reconcile.Reconciler = &reconciler{}
-
-// ProvideController returns a Channel controller.
-// This Channel controller is a default controller for channels of all provisioner kinds
-func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) {
- // Setup a new controller to Reconcile channel
- c, err := controller.New(controllerAgentName, mgr, controller.Options{
- Reconciler: &reconciler{
- recorder: mgr.GetRecorder(controllerAgentName),
- logger: logger,
- },
- })
- if err != nil {
- return nil, err
- }
-
- // Watch channel events
- // This controller is no-op when Channels are deleted
- if err = c.Watch(
- &source.Kind{Type: &v1alpha1.Channel{}},
- &handler.EnqueueRequestForObject{},
- predicate.Funcs{
- DeleteFunc: func(event.DeleteEvent) bool {
- return false
- },
- }); err != nil {
- return nil, err
- }
-
- return c, nil
-}
-
-// Reconcile will check if the channel is being watched by provisioner's channel controller
-// This will improve UX. See https://github.com/knative/eventing/issues/779
-func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
- ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", request)))
- logging.FromContext(ctx).Debug("Reconciling Channel")
- ch := &v1alpha1.Channel{}
-
- // Controller-runtime client Get() always deep copies the object. Hence no need to again deep copy it
- err := r.client.Get(ctx, request.NamespacedName, ch)
-
- if errors.IsNotFound(err) {
- logging.FromContext(ctx).Info("Channel not found")
- return reconcile.Result{}, nil
- }
-
- if err != nil {
- logging.FromContext(ctx).Error("Error getting Channel", zap.Error(err))
- return reconcile.Result{}, err
- }
-
- err = r.reconcile(ctx, ch)
-
- if err != nil {
- logging.FromContext(ctx).Warn("Error reconciling Channel", zap.Error(err))
- r.recorder.Eventf(ch, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update channel status: %s", request)
- return reconcile.Result{Requeue: true}, err
- }
- logging.FromContext(ctx).Debug("Successfully reconciled Channel")
- r.recorder.Eventf(ch, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %s", request)
- return reconcile.Result{Requeue: false}, nil
-}
-
-func (r *reconciler) reconcile(ctx context.Context, ch *v1alpha1.Channel) error {
- // Do not Initialize() Status in channel-default-controller. It will set ChannelConditionProvisionerInstalled=True
- // Directly call GetCondition(). If the Status was never initialized then GetCondition() will return nil and
- // IsUnknown() will return true
- c := ch.Status.GetCondition(v1alpha1.ChannelConditionProvisionerInstalled)
-
- if c.IsUnknown() {
- ch.Status.MarkProvisionerNotInstalled(
- "Provisioner not found.",
- "Specified provisioner [Name:%s Kind:%s] is not installed or not controlling the channel.",
- ch.Spec.Provisioner.Name,
- ch.Spec.Provisioner.Kind,
- )
- err := r.client.Status().Update(ctx, ch)
- return err
- }
- return nil
-}
-
-// InjectClient implements controller runtime's inject.Client.
-func (r *reconciler) InjectClient(c client.Client) error {
- r.client = c
- return nil
-}
diff --git a/pkg/reconciler/v1alpha1/channel/channel_test.go b/pkg/reconciler/v1alpha1/channel/channel_test.go
deleted file mode 100644
index 512ce0aeaf6..00000000000
--- a/pkg/reconciler/v1alpha1/channel/channel_test.go
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package channel
-
-import (
- "context"
- "errors"
- "fmt"
- "testing"
-
- eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
- eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
- duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
- "go.uber.org/zap"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/rest"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
-)
-
-const (
- testNamespace = "testnamespace"
- testAPIVersion = "eventing.knative.dev/v1alpha1"
- testCCPName = "TestProvisioner"
- testCCPKind = "ClusterChannelProvisioner"
-)
-
-var (
- events = map[string]corev1.Event{
- channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal},
- channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning},
- }
-)
-
-func init() {
- // Add types to scheme
- _ = eventingv1alpha1.AddToScheme(scheme.Scheme)
- _ = duckv1alpha1.AddToScheme(scheme.Scheme)
-}
-
-func TestInjectClient(t *testing.T) {
- r := &reconciler{}
- orig := r.client
- n := fake.NewFakeClient()
- if orig == n {
- t.Errorf("Original and new clients are identical: %v", orig)
- }
- err := r.InjectClient(n)
- if err != nil {
- t.Errorf("Unexpected error injecting the client: %v", err)
- }
- if n != r.client {
- t.Errorf("Unexpected client. Expected: '%v'. Actual: '%v'", n, r.client)
- }
-}
-
-func TestAllCases(t *testing.T) {
- testCases := []controllertesting.TestCase{
- {
- Name: "No channels exist",
- WantResult: reconcile.Result{},
- ReconcileKey: fmt.Sprintf("%v/%v", "chan-1", testNamespace),
- }, {
- Name: "Cannot get Channel",
- WantResult: reconcile.Result{},
- ReconcileKey: fmt.Sprintf("%v/%v", "chan-1", testNamespace),
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{accessDenied},
- },
- WantErrMsg: "access denied",
- }, {
- Name: "Orphaned channel",
- WantResult: reconcile.Result{},
- ReconcileKey: fmt.Sprintf("%v/%v", testNamespace, "chan-1"),
- InitialState: []runtime.Object{
- Channel("chan-1", testNamespace),
- Channel("chan-2", testNamespace).WithProvInstalledStatus(corev1.ConditionTrue),
- Channel("chan-3", testNamespace).WithProvInstalledStatus(corev1.ConditionFalse),
- },
- WantPresent: []runtime.Object{
- Channel("chan-1", testNamespace).WithProvInstalledStatus(corev1.ConditionFalse),
- Channel("chan-2", testNamespace).WithProvInstalledStatus(corev1.ConditionTrue),
- Channel("chan-3", testNamespace).WithProvInstalledStatus(corev1.ConditionFalse),
- },
- WantEvent: []corev1.Event{
- events[channelReconciled],
- },
- }, {
- Name: "Non-orphaned channel test 1",
- WantResult: reconcile.Result{},
- ReconcileKey: fmt.Sprintf("%v/%v", testNamespace, "chan-2"),
- InitialState: []runtime.Object{
- Channel("chan-1", testNamespace),
- Channel("chan-2", testNamespace).WithProvInstalledStatus(corev1.ConditionTrue),
- Channel("chan-3", testNamespace).WithProvInstalledStatus(corev1.ConditionFalse),
- },
- WantPresent: []runtime.Object{
- Channel("chan-1", testNamespace),
- Channel("chan-2", testNamespace).WithProvInstalledStatus(corev1.ConditionTrue),
- Channel("chan-3", testNamespace).WithProvInstalledStatus(corev1.ConditionFalse),
- },
- WantEvent: []corev1.Event{
- events[channelReconciled],
- },
- }, {
- Name: "Non-orphaned channel test 2",
- WantResult: reconcile.Result{},
- ReconcileKey: fmt.Sprintf("%v/%v", testNamespace, "chan-3"),
- InitialState: []runtime.Object{
- Channel("chan-1", testNamespace),
- Channel("chan-2", testNamespace).WithProvInstalledStatus(corev1.ConditionTrue),
- Channel("chan-3", testNamespace).WithProvInstalledStatus(corev1.ConditionFalse),
- },
- WantPresent: []runtime.Object{
- Channel("chan-1", testNamespace),
- Channel("chan-2", testNamespace).WithProvInstalledStatus(corev1.ConditionTrue),
- Channel("chan-3", testNamespace).WithProvInstalledStatus(corev1.ConditionFalse),
- },
- WantEvent: []corev1.Event{
- events[channelReconciled],
- },
- }, {
- Name: "Fail orphaned channel status update",
- WantErrMsg: "update failed",
- WantResult: reconcile.Result{Requeue: true},
- ReconcileKey: fmt.Sprintf("%v/%v", testNamespace, "chan-1"),
- InitialState: []runtime.Object{
- Channel("chan-1", testNamespace),
- Channel("chan-2", testNamespace).WithProvInstalledStatus(corev1.ConditionTrue),
- Channel("chan-3", testNamespace).WithProvInstalledStatus(corev1.ConditionFalse),
- },
- WantPresent: []runtime.Object{
- Channel("chan-1", testNamespace),
- Channel("chan-2", testNamespace).WithProvInstalledStatus(corev1.ConditionTrue),
- Channel("chan-3", testNamespace).WithProvInstalledStatus(corev1.ConditionFalse),
- },
- WantEvent: []corev1.Event{
- events[channelUpdateStatusFailed],
- },
- Mocks: controllertesting.Mocks{
- MockStatusUpdates: []controllertesting.MockStatusUpdate{failUpdate},
- },
- },
- }
- for _, tc := range testCases {
- c := tc.GetClient()
- dc := tc.GetDynamicClient()
- recorder := tc.GetEventRecorder()
-
- r := &reconciler{
- client: c,
- dynamicClient: dc,
- restConfig: &rest.Config{},
- recorder: recorder,
- logger: zap.NewNop(),
- }
- tc.IgnoreTimes = true
- t.Run(tc.Name, tc.Runner(t, r, c, recorder))
- }
-}
-
-func failUpdate(_ client.Client, _ context.Context, _ runtime.Object) (controllertesting.MockHandled, error) {
- return controllertesting.Handled, errors.New("update failed")
-}
-
-func accessDenied(_ client.Client, _ context.Context, _ client.ObjectKey, _ runtime.Object) (controllertesting.MockHandled, error) {
- return controllertesting.Handled, errors.New("access denied")
-}
-
-type ChannelBuilder struct {
- *eventingv1alpha1.Channel
-}
-
-// Verify the Builder implements Buildable
-var _ controllertesting.Buildable = &ChannelBuilder{}
-
-func (cb *ChannelBuilder) Build() runtime.Object {
- return cb.Channel
-}
-
-func Channel(name string, namespace string) *ChannelBuilder {
- channel := getTestChannelWithoutStatus(name, namespace)
- return &ChannelBuilder{
- Channel: channel,
- }
-}
-
-func (cb *ChannelBuilder) WithProvInstalledStatus(provInstalledStatus corev1.ConditionStatus) *ChannelBuilder {
- cb.Status = eventingv1alpha1.ChannelStatus{}
-
- switch provInstalledStatus {
- case corev1.ConditionTrue:
- cb.Status.MarkProvisionerInstalled()
- break
- case corev1.ConditionFalse:
- cb.Status.MarkProvisionerNotInstalled(
- "Provisioner not found.",
- "Specified provisioner [Name:%v Kind:%v] is not installed or not controlling the channel.",
- testCCPName,
- testCCPKind,
- )
- break
- }
- return cb
-}
-
-func getTestChannelWithoutStatus(name string, namespace string) *eventingv1alpha1.Channel {
- ch := &eventingv1alpha1.Channel{}
- ch.APIVersion = testAPIVersion
- ch.Namespace = testNamespace
- ch.Kind = "Channel"
- ch.Name = name
- ch.Namespace = namespace
- ch.Spec = getTestChannelSpec()
- return ch
-}
-
-func getTestChannelSpec() eventingv1alpha1.ChannelSpec {
- chSpec := eventingv1alpha1.ChannelSpec{
- Provisioner: &corev1.ObjectReference{},
- Subscribable: &eventingduck.Subscribable{
- Subscribers: []eventingduck.ChannelSubscriberSpec{
- getTestSubscriberSpec(),
- getTestSubscriberSpec(),
- },
- },
- }
- chSpec.Provisioner.Name = testCCPName
- chSpec.Provisioner.APIVersion = testAPIVersion
- chSpec.Provisioner.Kind = testCCPKind
- return chSpec
-}
-
-func getTestSubscriberSpec() eventingduck.ChannelSubscriberSpec {
- return eventingduck.ChannelSubscriberSpec{
- SubscriberURI: "TestSubscriberURI",
- ReplyURI: "TestReplyURI",
- }
-}
From 4c67561e91096ea81f1146e19cf463ed1d1fb8d3 Mon Sep 17 00:00:00 2001
From: Chi Zhang
Date: Tue, 23 Apr 2019 16:10:28 -0700
Subject: [PATCH 47/76] disable gcp pubsub e2e tests until we fix the flaky
tests (#1094)
---
test/e2e-tests.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh
index 6536d7b511a..c41bce57f4c 100755
--- a/test/e2e-tests.sh
+++ b/test/e2e-tests.sh
@@ -145,6 +145,6 @@ function dump_extra_cluster_state() {
initialize $@
-go_test_e2e -timeout=20m ./test/e2e -run ^TestMain$ -runFromMain=true -clusterChannelProvisioners=in-memory-channel,in-memory,gcp-pubsub || fail_test
+go_test_e2e -timeout=20m ./test/e2e -run ^TestMain$ -runFromMain=true -clusterChannelProvisioners=in-memory-channel,in-memory || fail_test
success
From c1390e5cc32b28b14c080d4f548628309083450f Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Tue, 23 Apr 2019 16:30:28 -0700
Subject: [PATCH 48/76] Remove a few obsolete overrides (#1090)
* Remove cobra and pflag overrides
These overrides are very old, and cobra isn't imported anymore. This
cleanup has already happened in serving.
* Update pflag to 1.0.3
Tracking the serving bump in #2124.
* Remove very old GCP PubSub-related overrides
These were originally added for the PubSub receive adapter in #57, but
it's been a while and we probably don't need them anymore.
---
Gopkg.lock | 5 +-
Gopkg.toml | 29 ----
vendor/github.com/spf13/pflag/bytes.go | 104 ++++++++++++
vendor/github.com/spf13/pflag/flag.go | 10 +-
.../github.com/spf13/pflag/string_to_int.go | 149 ++++++++++++++++
.../spf13/pflag/string_to_string.go | 160 ++++++++++++++++++
6 files changed, 423 insertions(+), 34 deletions(-)
create mode 100644 vendor/github.com/spf13/pflag/string_to_int.go
create mode 100644 vendor/github.com/spf13/pflag/string_to_string.go
diff --git a/Gopkg.lock b/Gopkg.lock
index a2cfaea7964..81597fe9ee3 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -685,11 +685,12 @@
version = "v1.0.0"
[[projects]]
- digest = "1:15e5c398fbd9d2c439b635a08ac161b13d04f0c2aa587fe256b65dc0c3efe8b7"
+ digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "NUT"
- revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
+ revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
+ version = "v1.0.3"
[[projects]]
digest = "1:bce7c290509e40fd1c73d700305c1961004d08c9a1812e47533416a8742893a7"
diff --git a/Gopkg.toml b/Gopkg.toml
index 155bd1cfa10..0a1e6fde97d 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -26,29 +26,10 @@ required = [
name = "github.com/knative/test-infra"
non-go = false
-# Use HEAD (2018-04-21) to pick up:
-# https://github.com/spf13/cobra/pull/662
-# TODO this can be changed to a version constraint on 0.0.3 which has this commit
-# [[constraint]]
-# name = "github.com/spf13/cobra"
-# version = "0.0.3"
-[[override]]
- name = "github.com/spf13/cobra"
- revision = "615425954c3b0d9485a7027d4d451fdcdfdee84e"
-
-# TODO this can be changed to a version constraint on 1.0.3 which has this commit
-# [[constraint]]
-# name = "github.com/spf13/pflag"
-# version = "1.0.3"
-[[override]]
- name = "github.com/spf13/pflag"
- revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
-
# TODO why is this overridden?
[[override]]
name = "gopkg.in/yaml.v2"
version = "v2.2.1"
-
# TODO(https://github.com/knative/eventing/issues/1065): if we get to update k8s.io to > 1.13, we can remove
# k8s-dynamic-fake-simple.patch in ./hack/update-deps.sh
#
@@ -88,16 +69,6 @@ required = [
name = "github.com/json-iterator/go"
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
-# TODO why is this overridden?
-[[override]]
- name = "github.com/golang/protobuf"
- version = "v1.1.0"
-
-# TODO why is this overridden?
-[[override]]
- name = "golang.org/x/oauth2"
- revision = "cdc340f7c179dbbfa4afd43b7614e8fcadde4269"
-
# Constrain the version of knative/pkg we would like to import.
# This controls when we upgrade apis independently of Serving.
[[override]]
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
index 12c58db9fe3..67d53045708 100644
--- a/vendor/github.com/spf13/pflag/bytes.go
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -1,6 +1,7 @@
package pflag
import (
+ "encoding/base64"
"encoding/hex"
"fmt"
"strings"
@@ -9,10 +10,12 @@ import (
// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
type bytesHexValue []byte
+// String implements pflag.Value.String.
func (bytesHex bytesHexValue) String() string {
return fmt.Sprintf("%X", []byte(bytesHex))
}
+// Set implements pflag.Value.Set.
func (bytesHex *bytesHexValue) Set(value string) error {
bin, err := hex.DecodeString(strings.TrimSpace(value))
@@ -25,6 +28,7 @@ func (bytesHex *bytesHexValue) Set(value string) error {
return nil
}
+// Type implements pflag.Value.Type.
func (*bytesHexValue) Type() string {
return "bytesHex"
}
@@ -103,3 +107,103 @@ func BytesHex(name string, value []byte, usage string) *[]byte {
func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
return CommandLine.BytesHexP(name, shorthand, value, usage)
}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+ bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesBase64 = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+ return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+ *p = val
+ return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+ bin, err := base64.StdEncoding.DecodeString(sval)
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
index 5eadc84e3c4..9beeda8ecca 100644
--- a/vendor/github.com/spf13/pflag/flag.go
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -925,13 +925,16 @@ func stripUnknownFlagValue(args []string) []string {
}
first := args[0]
- if first[0] == '-' {
+ if len(first) > 0 && first[0] == '-' {
//--unknown --next-flag ...
return args
}
//--unknown arg ... (args will be arg ...)
- return args[1:]
+ if len(args) > 1 {
+ return args[1:]
+ }
+ return nil
}
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
@@ -990,11 +993,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
}
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+
if strings.HasPrefix(shorthands, "test.") {
return
}
- outArgs = args
outShorts = shorthands[1:]
c := shorthands[0]
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 00000000000..5ceda3965df
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+ value *map[string]int
+ changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+ ssv := new(stringToIntValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToIntValue) Type() string {
+ return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.Itoa(v))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+ val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+ if err != nil {
+ return map[string]int{}, err
+ }
+ return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 00000000000..890a01afc03
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+ value *map[string]string
+ changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+ ssv := new(stringToStringValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+ var ss []string
+ n := strings.Count(val, "=")
+ switch n {
+ case 0:
+ return fmt.Errorf("%s must be formatted as key=value", val)
+ case 1:
+ ss = append(ss, strings.Trim(val, `"`))
+ default:
+ r := csv.NewReader(strings.NewReader(val))
+ var err error
+ ss, err = r.Read()
+ if err != nil {
+ return err
+ }
+ }
+
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToStringValue) Type() string {
+ return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+ records := make([]string, 0, len(*s.value)>>1)
+ for k, v := range *s.value {
+ records = append(records, k+"="+v)
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]string{}, nil
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+ val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+ if err != nil {
+ return map[string]string{}, err
+ }
+ return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
From 2ff585c6486c3cdd01f06b6cdf7dd073cba37c06 Mon Sep 17 00:00:00 2001
From: Matthias Wessendorf
Date: Wed, 24 Apr 2019 16:14:29 +0200
Subject: [PATCH 49/76] keeping the original deployments as they were, just
more on finalizers (#1089)
* keeping the original deployments as they were, just more on finalizers
* use only update
* use only update
---
config/200-webhook-clusterrole.yaml | 24 +++++++++++++++---------
1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/config/200-webhook-clusterrole.yaml b/config/200-webhook-clusterrole.yaml
index 739eb63aa65..89823ca6cb1 100644
--- a/config/200-webhook-clusterrole.yaml
+++ b/config/200-webhook-clusterrole.yaml
@@ -41,23 +41,29 @@ rules:
- "apps"
resources:
- "deployments"
+ verbs:
+ - "get"
+
+ - apiGroups:
+ - "apps"
+ resources:
- "deployments/finalizers"
- verbs: &everything
- - get
- - list
- - watch
- - create
+ verbs:
- update
- - patch
- - delete
-
# For actually registering our webhook.
- apiGroups:
- "admissionregistration.k8s.io"
resources:
- "mutatingwebhookconfigurations"
- verbs: *everything
+ verbs:
+ - "get"
+ - "list"
+ - "create"
+ - "update"
+ - "delete"
+ - "patch"
+ - "watch"
# Our own resources and statuses we care about.
- apiGroups:
From f907a64d422c88400a8f67c49ae95a0e562a6218 Mon Sep 17 00:00:00 2001
From: Scott Nichols <32305648+n3wscott@users.noreply.github.com>
Date: Wed, 24 Apr 2019 11:09:28 -0700
Subject: [PATCH 50/76] Move namespace controller to pkg/controller style.
(#1069)
* Start of move namespace controller to pkg/controler style.
* no need for get methods.
* add the namespace controller to the main function.
* working on tests.
* migrate namespace controller to pkg/controller.
* using core informmers.
---
cmd/controller/main.go | 16 +-
pkg/reconciler/namespace/namespace.go | 211 +++++++++++
pkg/reconciler/namespace/namespace_test.go | 244 ++++++++++++
pkg/reconciler/namespace/resources/broker.go | 33 ++
pkg/reconciler/namespace/resources/labels.go | 43 +++
pkg/reconciler/namespace/resources/names.go | 24 ++
.../namespace/resources/role_binding.go | 47 +++
.../namespace/resources/service_account.go | 33 ++
pkg/reconciler/testing/listers.go | 4 +
pkg/reconciler/testing/namespace.go | 50 +++
.../v1alpha1/namespace/namespace.go | 356 ------------------
.../v1alpha1/namespace/namespace_test.go | 293 --------------
12 files changed, 697 insertions(+), 657 deletions(-)
create mode 100644 pkg/reconciler/namespace/namespace.go
create mode 100644 pkg/reconciler/namespace/namespace_test.go
create mode 100644 pkg/reconciler/namespace/resources/broker.go
create mode 100644 pkg/reconciler/namespace/resources/labels.go
create mode 100644 pkg/reconciler/namespace/resources/names.go
create mode 100644 pkg/reconciler/namespace/resources/role_binding.go
create mode 100644 pkg/reconciler/namespace/resources/service_account.go
create mode 100644 pkg/reconciler/testing/namespace.go
delete mode 100644 pkg/reconciler/v1alpha1/namespace/namespace.go
delete mode 100644 pkg/reconciler/v1alpha1/namespace/namespace_test.go
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index df7cdf7b2f5..f1993c336a0 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -38,10 +38,10 @@ import (
"github.com/knative/eventing/pkg/logging"
"github.com/knative/eventing/pkg/reconciler"
"github.com/knative/eventing/pkg/reconciler/channel"
+ "github.com/knative/eventing/pkg/reconciler/namespace"
"github.com/knative/eventing/pkg/reconciler/subscription"
"github.com/knative/eventing/pkg/reconciler/trigger"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
- "github.com/knative/eventing/pkg/reconciler/v1alpha1/namespace"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"github.com/knative/pkg/configmap"
kncontroller "github.com/knative/pkg/controller"
@@ -96,7 +96,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
logger = logger.With(zap.String("controller/impl", "pkg"))
logger.Info("Starting the controller")
- const numControllers = 3
+ const numControllers = 4
cfg.QPS = numControllers * rest.DefaultQPS
cfg.Burst = numControllers * rest.DefaultBurst
opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
@@ -109,10 +109,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
subscriptionInformer := eventingInformerFactory.Eventing().V1alpha1().Subscriptions()
brokerInformer := eventingInformerFactory.Eventing().V1alpha1().Brokers()
coreServiceInformer := kubeInformerFactory.Core().V1().Services()
-
- // TODO: remove unused after done integrating all controllers.
- //deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
- //coreServiceInformer := kubeInformerFactory.Core().V1().Services()
+ coreNamespaceInformer := kubeInformerFactory.Core().V1().Namespaces()
configMapInformer := kubeInformerFactory.Core().V1().ConfigMaps()
// Build all of our controllers, with the clients constructed above.
@@ -123,6 +120,10 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
opt,
subscriptionInformer,
),
+ namespace.NewController(
+ opt,
+ coreNamespaceInformer,
+ ),
channel.NewController(
opt,
channelInformer,
@@ -153,8 +154,8 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
if err := kncontroller.StartInformers(
stopCh,
subscriptionInformer.Informer(),
- channelInformer.Informer(),
configMapInformer.Informer(),
+ coreNamespaceInformer.Informer(),
triggerInformer.Informer(),
channelInformer.Informer(),
brokerInformer.Informer(),
@@ -212,7 +213,6 @@ func startControllerRuntime(stopCh <-chan struct{}, cfg *rest.Config, logger *za
FilterImage: getRequiredEnv("BROKER_FILTER_IMAGE"),
FilterServiceAccountName: getRequiredEnv("BROKER_FILTER_SERVICE_ACCOUNT"),
}),
- namespace.ProvideController,
}
for _, provider := range providers {
if _, err = provider(mgr, logger.Desugar()); err != nil {
diff --git a/pkg/reconciler/namespace/namespace.go b/pkg/reconciler/namespace/namespace.go
new file mode 100644
index 00000000000..6840273ec67
--- /dev/null
+++ b/pkg/reconciler/namespace/namespace.go
@@ -0,0 +1,211 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package namespace
+
+import (
+ "context"
+ "fmt"
+ "github.com/knative/eventing/pkg/reconciler/namespace/resources"
+ "k8s.io/client-go/tools/cache"
+
+ corev1informers "k8s.io/client-go/informers/core/v1"
+ corev1listers "k8s.io/client-go/listers/core/v1"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/pkg/controller"
+ "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // ReconcilerName is the name of the reconciler
+ ReconcilerName = "Namespace" // TODO: Namespace is not a very good name for this controller.
+
+ // controllerAgentName is the string used by this controller to identify
+ // itself when creating events.
+ controllerAgentName = "knative-eventing-namespace-controller"
+
+ // Name of the corev1.Events emitted from the reconciliation process.
+ brokerCreated = "BrokerCreated"
+ serviceAccountCreated = "BrokerFilterServiceAccountCreated"
+ serviceAccountRBACCreated = "BrokerFilterServiceAccountRBACCreated"
+)
+
+type Reconciler struct {
+ *reconciler.Base
+
+ // listers index properties about resources
+ namespaceLister corev1listers.NamespaceLister
+}
+
+// Check that our Reconciler implements controller.Reconciler
+var _ controller.Reconciler = (*Reconciler)(nil)
+
+// NewController initializes the controller and is called by the generated code
+// Registers event handlers to enqueue events
+func NewController(
+ opt reconciler.Options,
+ namespaceInformer corev1informers.NamespaceInformer,
+) *controller.Impl {
+
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ namespaceLister: namespaceInformer.Lister(),
+ }
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ // TODO: filter label selector: on InjectionEnabledLabels()
+ // TODO: we need to also watch for changes to service accounts, RoleBindings, and Brokers to heal on bad changes.
+
+ r.Logger.Info("Setting up event handlers")
+ namespaceInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
+
+ return impl
+}
+
+// Reconcile compares the actual state with the desired, and attempts to
+// converge the two. It then updates the Status block of the Namespace resource
+// with the current status of the resource.
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ _, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
+ }
+
+ // Get the namespace resource with this namespace/name
+ original, err := r.namespaceLister.Get(name)
+ if apierrs.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ logging.FromContext(ctx).Error("namespace key in work queue no longer exists", zap.Any("key", key))
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ if original.Labels[resources.InjectionLabelKey] != resources.InjectionEnabledLabelValue {
+ logging.FromContext(ctx).Debug("Not reconciling Namespace")
+ // TODO: this does not handle cleanup of unwanted brokers in namespace.
+ return nil
+ }
+
+ // Don't modify the informers copy
+ ns := original.DeepCopy()
+
+ // Reconcile this copy of the Namespace and then write back any status updates regardless of
+ // whether the reconcile error out.
+ err = r.reconcile(ctx, ns)
+ if err != nil {
+ logging.FromContext(ctx).Error("Error reconciling Namespace", zap.Error(err), zap.Any("key", key))
+ } else {
+ logging.FromContext(ctx).Debug("Namespace reconciled", zap.Any("key", key))
+ }
+
+ // Requeue if the resource is not ready:
+ return err
+}
+
+func (r *Reconciler) reconcile(ctx context.Context, ns *corev1.Namespace) error {
+ if ns.DeletionTimestamp != nil {
+ return nil
+ }
+ sa, err := r.reconcileBrokerFilterServiceAccount(ctx, ns)
+ if err != nil {
+ logging.FromContext(ctx).Error("Unable to reconcile the Broker Filter Service Account for the namespace", zap.Error(err))
+ return err
+ }
+ _, err = r.reconcileBrokerFilterRBAC(ctx, ns, sa)
+ if err != nil {
+ logging.FromContext(ctx).Error("Unable to reconcile the Broker Filter Service Account RBAC for the namespace", zap.Error(err))
+ return err
+ }
+ _, err = r.reconcileBroker(ctx, ns)
+ if err != nil {
+ logging.FromContext(ctx).Error("Unable to reconcile Broker for the namespace", zap.Error(err))
+ return err
+ }
+ return nil
+}
+
+// reconcileBrokerFilterServiceAccount reconciles the Broker's filter service account for Namespace 'ns'.
+func (r *Reconciler) reconcileBrokerFilterServiceAccount(ctx context.Context, ns *corev1.Namespace) (*corev1.ServiceAccount, error) {
+ current, err := r.KubeClientSet.CoreV1().ServiceAccounts(ns.Name).Get(resources.ServiceAccountName, metav1.GetOptions{})
+
+ // If the resource doesn't exist, we'll create it.
+ if k8serrors.IsNotFound(err) {
+ sa := resources.MakeServiceAccount(ns.Name)
+ sa, err := r.KubeClientSet.CoreV1().ServiceAccounts(ns.Name).Create(sa)
+ if err != nil {
+ return nil, err
+ }
+ r.Recorder.Event(ns, corev1.EventTypeNormal, serviceAccountCreated,
+ fmt.Sprintf("Service account created for the Broker '%s'", sa.Name))
+ return sa, nil
+ } else if err != nil {
+ return nil, err
+ }
+ // Don't update anything that is already present.
+ return current, nil
+}
+
+// reconcileBrokerFilterRBAC reconciles the Broker's filter service account RBAC for the Namespace 'ns'.
+func (r *Reconciler) reconcileBrokerFilterRBAC(ctx context.Context, ns *corev1.Namespace, sa *corev1.ServiceAccount) (*rbacv1.RoleBinding, error) {
+ current, err := r.KubeClientSet.RbacV1().RoleBindings(ns.Name).Get(resources.RoleBindingName, metav1.GetOptions{})
+
+ // If the resource doesn't exist, we'll create it.
+ if k8serrors.IsNotFound(err) {
+ rb := resources.MakeRoleBinding(sa)
+ rb, err := r.KubeClientSet.RbacV1().RoleBindings(ns.Name).Create(rb)
+ if err != nil {
+ return nil, err
+ }
+ r.Recorder.Event(ns, corev1.EventTypeNormal, serviceAccountRBACCreated,
+ fmt.Sprintf("Service account RBAC created for the Broker Filter '%s'", rb.Name))
+ return rb, nil
+ } else if err != nil {
+ return nil, err
+ }
+ // Don't update anything that is already present.
+ return current, nil
+}
+
+// reconcileBroker reconciles the default Broker for the Namespace 'ns'.
+func (r *Reconciler) reconcileBroker(ctx context.Context, ns *corev1.Namespace) (*v1alpha1.Broker, error) {
+ current, err := r.EventingClientSet.EventingV1alpha1().Brokers(ns.Name).Get(resources.DefaultBrokerName, metav1.GetOptions{})
+
+ // If the resource doesn't exist, we'll create it.
+ if k8serrors.IsNotFound(err) {
+ b := resources.MakeBroker(ns.Name)
+ b, err = r.EventingClientSet.EventingV1alpha1().Brokers(ns.Name).Create(b)
+ if err != nil {
+ return nil, err
+ }
+ r.Recorder.Event(ns, corev1.EventTypeNormal, brokerCreated,
+ "Default eventing.knative.dev Broker created.")
+ return b, nil
+ } else if err != nil {
+ return nil, err
+ }
+ // Don't update anything that is already present.
+ return current, nil
+}
diff --git a/pkg/reconciler/namespace/namespace_test.go b/pkg/reconciler/namespace/namespace_test.go
new file mode 100644
index 00000000000..5b76fe2157d
--- /dev/null
+++ b/pkg/reconciler/namespace/namespace_test.go
@@ -0,0 +1,244 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package namespace
+
+import (
+ "github.com/knative/eventing/pkg/reconciler/namespace/resources"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "testing"
+
+ eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ "github.com/knative/eventing/pkg/reconciler"
+ . "github.com/knative/eventing/pkg/reconciler/testing"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+ . "github.com/knative/pkg/reconciler/testing"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ kubeinformers "k8s.io/client-go/informers"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/kubernetes/scheme"
+)
+
+const (
+ testNS = "test-namespace"
+)
+
+var (
+ brokerGVR = schema.GroupVersionResource{
+ Group: "eventing.knative.dev",
+ Version: "v1alpha1",
+ Resource: "brokers",
+ }
+
+ roleBindingGVR = schema.GroupVersionResource{
+ Group: "rbac.authorization.k8s.io",
+ Version: "v1",
+ Resource: "rolebindings",
+ }
+
+ serviceAccountGVR = schema.GroupVersionResource{
+ Version: "v1",
+ Resource: "serviceaccounts",
+ }
+)
+
+func init() {
+ // Add types to scheme
+ _ = eventingv1alpha1.AddToScheme(scheme.Scheme)
+}
+
+func TestNew(t *testing.T) {
+ defer logtesting.ClearAll()
+ kubeClient := fakekubeclientset.NewSimpleClientset()
+ eventingClient := fakeclientset.NewSimpleClientset()
+ kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
+
+ namespaceInformer := kubeInformer.Core().V1().Namespaces()
+
+ c := NewController(reconciler.Options{
+ KubeClientSet: kubeClient,
+ EventingClientSet: eventingClient,
+ Logger: logtesting.TestLogger(t),
+ }, namespaceInformer)
+
+ if c == nil {
+ t.Fatal("Expected NewController to return a non-nil value")
+ }
+}
+
+func TestAllCases(t *testing.T) {
+ table := TableTest{
+ {
+ Name: "bad workqueue key",
+ // Make sure Reconcile handles bad keys.
+ Key: "too/many/parts",
+ }, {
+ Name: "key not found",
+ // Make sure Reconcile handles good keys that don't exist.
+ Key: "foo/not-found",
+ }, {
+ Name: "Namespace is not labeled",
+ Objects: []runtime.Object{
+ NewNamespace(testNS),
+ },
+ Key: testNS,
+ }, {
+ Name: "Namespace is labeled disabled",
+ Objects: []runtime.Object{
+ NewNamespace(testNS,
+ WithNamespaceLabeled(resources.InjectionDisabledLabels())),
+ },
+ Key: testNS,
+ }, {
+ Name: "Namespace is deleted, no resources",
+ Objects: []runtime.Object{
+ NewNamespace(testNS,
+ WithNamespaceLabeled(resources.InjectionEnabledLabels()),
+ WithNamespaceDeleted,
+ ),
+ },
+ Key: testNS,
+ }, {
+ Name: "Namespace enabled",
+ Objects: []runtime.Object{
+ NewNamespace(testNS,
+ WithNamespaceLabeled(resources.InjectionEnabledLabels()),
+ ),
+ },
+ Key: testNS,
+ SkipNamespaceValidation: true,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "BrokerFilterServiceAccountCreated", "Service account created for the Broker 'eventing-broker-filter'"),
+ Eventf(corev1.EventTypeNormal, "BrokerFilterServiceAccountRBACCreated", "Service account RBAC created for the Broker Filter 'eventing-broker-filter'"),
+ Eventf(corev1.EventTypeNormal, "BrokerCreated", "Default eventing.knative.dev Broker created."),
+ },
+ WantCreates: []metav1.Object{
+ resources.MakeBroker(testNS),
+ resources.MakeServiceAccount(testNS),
+ resources.MakeRoleBinding(resources.MakeServiceAccount(testNS)),
+ },
+ }, {
+ Name: "Namespace enabled, broker exists",
+ Objects: []runtime.Object{
+ NewNamespace(testNS,
+ WithNamespaceLabeled(resources.InjectionEnabledLabels()),
+ ),
+ resources.MakeBroker(testNS),
+ },
+ Key: testNS,
+ SkipNamespaceValidation: true,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "BrokerFilterServiceAccountCreated", "Service account created for the Broker 'eventing-broker-filter'"),
+ Eventf(corev1.EventTypeNormal, "BrokerFilterServiceAccountRBACCreated", "Service account RBAC created for the Broker Filter 'eventing-broker-filter'"),
+ },
+ WantCreates: []metav1.Object{
+ resources.MakeServiceAccount(testNS),
+ resources.MakeRoleBinding(resources.MakeServiceAccount(testNS)),
+ },
+ },
+ {
+ Name: "Namespace enabled, service account exists",
+ Objects: []runtime.Object{
+ NewNamespace(testNS,
+ WithNamespaceLabeled(resources.InjectionEnabledLabels()),
+ ),
+ resources.MakeServiceAccount(testNS),
+ },
+ Key: testNS,
+ SkipNamespaceValidation: true,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "BrokerFilterServiceAccountRBACCreated", "Service account RBAC created for the Broker Filter 'eventing-broker-filter'"),
+ Eventf(corev1.EventTypeNormal, "BrokerCreated", "Default eventing.knative.dev Broker created."),
+ },
+ WantCreates: []metav1.Object{
+ resources.MakeBroker(testNS),
+ resources.MakeRoleBinding(resources.MakeServiceAccount(testNS)),
+ },
+ },
+ {
+ Name: "Namespace enabled, role binding exists",
+ Objects: []runtime.Object{
+ NewNamespace(testNS,
+ WithNamespaceLabeled(resources.InjectionEnabledLabels()),
+ ),
+ resources.MakeRoleBinding(resources.MakeServiceAccount(testNS)),
+ },
+ Key: testNS,
+ SkipNamespaceValidation: true,
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "BrokerFilterServiceAccountCreated", "Service account created for the Broker 'eventing-broker-filter'"),
+ Eventf(corev1.EventTypeNormal, "BrokerCreated", "Default eventing.knative.dev Broker created."),
+ },
+ WantCreates: []metav1.Object{
+ resources.MakeBroker(testNS),
+ resources.MakeServiceAccount(testNS),
+ },
+ },
+ //{ TODO: this test should work but there is no clean-up in the controller.
+ // Name: "Namespace disabled, cleanup",
+ // Objects: []runtime.Object{
+ // NewNamespace(testNS,
+ // WithNamespaceLabeled(resources.InjectionDisabledLabels()),
+ // ),
+ // resources.MakeBroker(testNS),
+ // resources.MakeServiceAccount(testNS),
+ // resources.MakeRoleBinding(resources.MakeServiceAccount(testNS)),
+ // },
+ // Key: testNS,
+ // SkipNamespaceValidation: true,
+ // WantErr: false,
+ // WantDeletes: []clientgotesting.DeleteActionImpl{{
+ // ActionImpl: clientgotesting.ActionImpl{
+ // Namespace: testNS,
+ // Verb: "delete",
+ // Resource: brokerGVR,
+ // },
+ // Name: resources.DefaultBrokerName,
+ // }, {
+ // ActionImpl: clientgotesting.ActionImpl{
+ // Namespace: testNS,
+ // Verb: "delete",
+ // Resource: roleBindingGVR,
+ // },
+ // Name: resources.RoleBindingName,
+ // }, {
+ // ActionImpl: clientgotesting.ActionImpl{
+ // Namespace: testNS,
+ // Verb: "delete",
+ // Resource: serviceAccountGVR,
+ // },
+ // Name: resources.DefaultBrokerName,
+ // }},
+ //},
+ // TODO: we need a existing default un-owned test.
+ }
+
+ defer logtesting.ClearAll()
+ table.Test(t, MakeFactory(func(listers *Listers, opt reconciler.Options) controller.Reconciler {
+ return &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ namespaceLister: listers.GetNamespaceLister(),
+ }
+ }))
+}
diff --git a/pkg/reconciler/namespace/resources/broker.go b/pkg/reconciler/namespace/resources/broker.go
new file mode 100644
index 00000000000..10147cb68e6
--- /dev/null
+++ b/pkg/reconciler/namespace/resources/broker.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// MakeBroker creates a default Broker object for Namespace 'ns'.
+func MakeBroker(namespace string) *v1alpha1.Broker {
+ return &v1alpha1.Broker{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: DefaultBrokerName,
+ Labels: OwnedLabels(),
+ },
+ }
+}
diff --git a/pkg/reconciler/namespace/resources/labels.go b/pkg/reconciler/namespace/resources/labels.go
new file mode 100644
index 00000000000..79fa71c82db
--- /dev/null
+++ b/pkg/reconciler/namespace/resources/labels.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+const (
+ // Label to enable knative-eventing in a namespace.
+ InjectionLabelKey = "knative-eventing-injection"
+ InjectionEnabledLabelValue = "enabled"
+ InjectionDisabledLabelValue = "disabled"
+)
+
+// OwnedLabels generates the labels present on injected broker resources.
+func OwnedLabels() map[string]string {
+ return map[string]string{
+ "eventing.knative.dev/namespaceInjected": "true",
+ }
+}
+
+func InjectionEnabledLabels() map[string]string {
+ return map[string]string{
+ InjectionLabelKey: InjectionEnabledLabelValue,
+ }
+}
+
+func InjectionDisabledLabels() map[string]string {
+ return map[string]string{
+ InjectionLabelKey: InjectionDisabledLabelValue,
+ }
+}
diff --git a/pkg/reconciler/namespace/resources/names.go b/pkg/reconciler/namespace/resources/names.go
new file mode 100644
index 00000000000..d33da008ed0
--- /dev/null
+++ b/pkg/reconciler/namespace/resources/names.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+const (
+ DefaultBrokerName = "default"
+ ServiceAccountName = "eventing-broker-filter"
+ RoleBindingName = "eventing-broker-filter"
+ ClusterRoleName = "eventing-broker-filter"
+)
diff --git a/pkg/reconciler/namespace/resources/role_binding.go b/pkg/reconciler/namespace/resources/role_binding.go
new file mode 100644
index 00000000000..4ff07e27617
--- /dev/null
+++ b/pkg/reconciler/namespace/resources/role_binding.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// MakeRoleBinding creates a RoleBinding object for the Broker's filter
+// service account 'sa' in the Namespace 'ns'.
+func MakeRoleBinding(sa *corev1.ServiceAccount) *rbacv1.RoleBinding {
+ return &rbacv1.RoleBinding{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: sa.Namespace,
+ Name: RoleBindingName,
+ Labels: OwnedLabels(),
+ },
+ RoleRef: rbacv1.RoleRef{
+ APIGroup: "rbac.authorization.k8s.io",
+ Kind: "ClusterRole",
+ Name: ClusterRoleName,
+ },
+ Subjects: []rbacv1.Subject{
+ {
+ Kind: "ServiceAccount",
+ Namespace: sa.Namespace,
+ Name: sa.Name,
+ },
+ },
+ }
+}
diff --git a/pkg/reconciler/namespace/resources/service_account.go b/pkg/reconciler/namespace/resources/service_account.go
new file mode 100644
index 00000000000..5e06a690f41
--- /dev/null
+++ b/pkg/reconciler/namespace/resources/service_account.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// MakeServiceAccount creates a ServiceAccount object for the Namespace 'ns'.
+func MakeServiceAccount(namespace string) *corev1.ServiceAccount {
+ return &corev1.ServiceAccount{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: ServiceAccountName,
+ Labels: OwnedLabels(),
+ },
+ }
+}
diff --git a/pkg/reconciler/testing/listers.go b/pkg/reconciler/testing/listers.go
index 310ccb568c3..7ecff7d57e0 100644
--- a/pkg/reconciler/testing/listers.go
+++ b/pkg/reconciler/testing/listers.go
@@ -127,6 +127,10 @@ func (l *Listers) GetK8sServiceLister() corev1listers.ServiceLister {
return corev1listers.NewServiceLister(l.indexerFor(&corev1.Service{}))
}
+func (l *Listers) GetNamespaceLister() corev1listers.NamespaceLister {
+ return corev1listers.NewNamespaceLister(l.indexerFor(&corev1.Namespace{}))
+}
+
func (l *Listers) GetEndpointsLister() corev1listers.EndpointsLister {
return corev1listers.NewEndpointsLister(l.indexerFor(&corev1.Endpoints{}))
}
diff --git a/pkg/reconciler/testing/namespace.go b/pkg/reconciler/testing/namespace.go
new file mode 100644
index 00000000000..3f294c0e889
--- /dev/null
+++ b/pkg/reconciler/testing/namespace.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "time"
+)
+
+// NamespaceOption enables further configuration of a Namespace.
+type NamespaceOption func(*corev1.Namespace)
+
+// NewNamespace creates a Namespace with NamespaceOptions
+func NewNamespace(name string, o ...NamespaceOption) *corev1.Namespace {
+ s := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ }
+ for _, opt := range o {
+ opt(s)
+ }
+ return s
+}
+
+func WithNamespaceDeleted(n *corev1.Namespace) {
+ t := metav1.NewTime(time.Unix(1e9, 0))
+ n.ObjectMeta.SetDeletionTimestamp(&t)
+}
+
+func WithNamespaceLabeled(labels map[string]string) NamespaceOption {
+ return func(n *corev1.Namespace) {
+ n.Labels = labels
+ }
+}
diff --git a/pkg/reconciler/v1alpha1/namespace/namespace.go b/pkg/reconciler/v1alpha1/namespace/namespace.go
deleted file mode 100644
index ccf83115530..00000000000
--- a/pkg/reconciler/v1alpha1/namespace/namespace.go
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package namespace
-
-import (
- "context"
- "fmt"
-
- "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "github.com/knative/eventing/pkg/logging"
- "go.uber.org/zap"
- corev1 "k8s.io/api/core/v1"
- v1 "k8s.io/api/core/v1"
- rbacv1 "k8s.io/api/rbac/v1"
- "k8s.io/apimachinery/pkg/api/errors"
- k8serrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/tools/record"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
- "sigs.k8s.io/controller-runtime/pkg/source"
-)
-
-const (
- // controllerAgentName is the string used by this controller to identify
- // itself when creating events.
- controllerAgentName = "knative-eventing-namespace-controller"
-
- // Label to enable knative-eventing in a namespace.
- knativeEventingLabelKey = "knative-eventing-injection"
- knativeEventingLabelValue = "enabled"
-
- defaultBroker = "default"
- brokerFilterSA = "eventing-broker-filter"
- brokerFilterRB = "eventing-broker-filter"
- brokerFilterClusterRole = "eventing-broker-filter"
-
- // Name of the corev1.Events emitted from the reconciliation process.
- brokerCreated = "BrokerCreated"
- serviceAccountCreated = "BrokerFilterServiceAccountCreated"
- serviceAccountRBACCreated = "BrokerFilterServiceAccountRBACCreated"
-)
-
-type reconciler struct {
- client client.Client
- recorder record.EventRecorder
-
- logger *zap.Logger
-}
-
-// Verify the struct implements reconcile.Reconciler
-var _ reconcile.Reconciler = &reconciler{}
-
-// ProvideController returns a function that returns a Namespace controller.
-func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) {
- // Setup a new controller to Reconcile Namespaces.
- r := &reconciler{
- recorder: mgr.GetRecorder(controllerAgentName),
- logger: logger,
- }
- c, err := controller.New(controllerAgentName, mgr, controller.Options{
- Reconciler: r,
- })
- if err != nil {
- return nil, err
- }
-
- // Watch Namespaces.
- if err = c.Watch(&source.Kind{Type: &v1.Namespace{}}, &handler.EnqueueRequestForObject{}); err != nil {
- return nil, err
- }
-
- // Watch all the resources that this reconciler reconciles. This is a map from resource type to
- // the name of the resource of that type we care about (i.e. only if the resource of the given
- // type and with the given name changes, do we reconcile the Namespace).
- resources := map[runtime.Object]string{
- &corev1.ServiceAccount{}: brokerFilterSA,
- &rbacv1.RoleBinding{}: brokerFilterRB,
- &v1alpha1.Broker{}: defaultBroker,
- }
- for t, n := range resources {
- nm := &namespaceMapper{
- name: n,
- }
- err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestsFromMapFunc{ToRequests: nm})
- if err != nil {
- return nil, err
- }
- }
-
- return c, nil
-}
-
-type namespaceMapper struct {
- name string
-}
-
-var _ handler.Mapper = &namespaceMapper{}
-
-// Map implements handler.Mapper.Map.
-func (m *namespaceMapper) Map(o handler.MapObject) []reconcile.Request {
- if o.Meta.GetName() == m.name {
- return []reconcile.Request{
- {
- NamespacedName: types.NamespacedName{
- Namespace: "",
- Name: o.Meta.GetNamespace(),
- },
- },
- }
- }
- return []reconcile.Request{}
-}
-
-// InjectClient implements controller runtime's inject.Client.
-func (r *reconciler) InjectClient(c client.Client) error {
- r.client = c
- return nil
-}
-
-// Reconcile compares the actual state with the desired, and attempts to
-// converge the two. It then updates the Status block of the Namespace resource
-// with the current status of the resource.
-func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
- ctx := context.TODO()
- ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request)))
-
- ns := &corev1.Namespace{}
- err := r.client.Get(ctx, request.NamespacedName, ns)
-
- if errors.IsNotFound(err) {
- logging.FromContext(ctx).Info("Could not find Namespace")
- return reconcile.Result{}, nil
- }
-
- if err != nil {
- logging.FromContext(ctx).Error("Could not Get Namespace", zap.Error(err))
- return reconcile.Result{}, err
- }
-
- if ns.Labels[knativeEventingLabelKey] != knativeEventingLabelValue {
- logging.FromContext(ctx).Debug("Not reconciling Namespace")
- return reconcile.Result{}, nil
- }
-
- // Reconcile this copy of the Namespace and then write back any status updates regardless of
- // whether the reconcile error out.
- reconcileErr := r.reconcile(ctx, ns)
- if reconcileErr != nil {
- logging.FromContext(ctx).Error("Error reconciling Namespace", zap.Error(reconcileErr))
- } else {
- logging.FromContext(ctx).Debug("Namespace reconciled")
- }
-
- // Requeue if the resource is not ready:
- return reconcile.Result{}, reconcileErr
-}
-
-func (r *reconciler) reconcile(ctx context.Context, ns *corev1.Namespace) error {
- // No need for a finalizer, because everything reconciled is created inside the Namespace. If
- // the Namespace is being deleted, then all the reconciled objects will be too.
-
- if ns.DeletionTimestamp != nil {
- return nil
- }
-
- sa, err := r.reconcileBrokerFilterServiceAccount(ctx, ns)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to reconcile the Broker Filter Service Account for the namespace", zap.Error(err))
- return err
- }
- _, err = r.reconcileBrokerFilterRBAC(ctx, ns, sa)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to reconcile the Broker Filter Service Account RBAC for the namespace", zap.Error(err))
- return err
- }
- _, err = r.reconcileBroker(ctx, ns)
- if err != nil {
- logging.FromContext(ctx).Error("Unable to reconcile Broker for the namespace", zap.Error(err))
- return err
- }
-
- return nil
-}
-
-// reconcileBrokerFilterServiceAccount reconciles the Broker's filter service account for Namespace 'ns'.
-func (r *reconciler) reconcileBrokerFilterServiceAccount(ctx context.Context, ns *corev1.Namespace) (*corev1.ServiceAccount, error) {
- current, err := r.getBrokerFilterServiceAccount(ctx, ns)
-
- // If the resource doesn't exist, we'll create it.
- if k8serrors.IsNotFound(err) {
- sa := newBrokerFilterServiceAccount(ns)
- err = r.client.Create(ctx, sa)
- if err != nil {
- return nil, err
- }
- r.recorder.Event(ns,
- corev1.EventTypeNormal,
- serviceAccountCreated,
- fmt.Sprintf("Service account created for the Broker '%s'", sa.Name))
- return sa, nil
- } else if err != nil {
- return nil, err
- }
- // Don't update anything that is already present.
- return current, nil
-}
-
-// getBrokerFilterServiceAccount returns the Broker's filter service account for Namespace 'ns' if exists,
-// otherwise it returns an error.
-func (r *reconciler) getBrokerFilterServiceAccount(ctx context.Context, ns *corev1.Namespace) (*corev1.ServiceAccount, error) {
- sa := &corev1.ServiceAccount{}
- name := types.NamespacedName{
- Namespace: ns.Name,
- Name: brokerFilterSA,
- }
- err := r.client.Get(ctx, name, sa)
- return sa, err
-}
-
-// newBrokerFilterServiceAccount creates a ServiceAccount object for the Namespace 'ns'.
-func newBrokerFilterServiceAccount(ns *corev1.Namespace) *corev1.ServiceAccount {
- return &corev1.ServiceAccount{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: ns.Name,
- Name: brokerFilterSA,
- Labels: injectedLabels(),
- },
- }
-}
-
-func injectedLabels() map[string]string {
- return map[string]string{
- "eventing.knative.dev/namespaceInjected": "true",
- }
-}
-
-// reconcileBrokerFilterRBAC reconciles the Broker's filter service account RBAC for the Namespace 'ns'.
-func (r *reconciler) reconcileBrokerFilterRBAC(ctx context.Context, ns *corev1.Namespace, sa *corev1.ServiceAccount) (*rbacv1.RoleBinding, error) {
- current, err := r.getBrokerFilterRBAC(ctx, ns)
-
- // If the resource doesn't exist, we'll create it.
- if k8serrors.IsNotFound(err) {
- rbac := newBrokerFilterRBAC(ns, sa)
- err = r.client.Create(ctx, rbac)
- if err != nil {
- return nil, err
- }
- r.recorder.Event(ns,
- corev1.EventTypeNormal,
- serviceAccountRBACCreated,
- fmt.Sprintf("Service account RBAC created for the Broker Filter '%s'", rbac.Name))
- return rbac, nil
- } else if err != nil {
- return nil, err
- }
- // Don't update anything that is already present.
- return current, nil
-}
-
-// getBrokerFilterRBAC returns the Broker's filter role binding for Namespace 'ns' if exists,
-// otherwise it returns an error.
-func (r *reconciler) getBrokerFilterRBAC(ctx context.Context, ns *corev1.Namespace) (*rbacv1.RoleBinding, error) {
- rb := &rbacv1.RoleBinding{}
- name := types.NamespacedName{
- Namespace: ns.Name,
- Name: brokerFilterRB,
- }
- err := r.client.Get(ctx, name, rb)
- return rb, err
-}
-
-// newBrokerFilterRBAC creates a RpleBinding object for the Broker's filter service account 'sa' in the Namespace 'ns'.
-func newBrokerFilterRBAC(ns *corev1.Namespace, sa *corev1.ServiceAccount) *rbacv1.RoleBinding {
- return &rbacv1.RoleBinding{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: ns.Name,
- Name: brokerFilterRB,
- Labels: injectedLabels(),
- },
- RoleRef: rbacv1.RoleRef{
- APIGroup: "rbac.authorization.k8s.io",
- Kind: "ClusterRole",
- Name: brokerFilterClusterRole,
- },
- Subjects: []rbacv1.Subject{
- {
- Kind: "ServiceAccount",
- Namespace: ns.Name,
- Name: sa.Name,
- },
- },
- }
-}
-
-// getBroker returns the default broker for Namespace 'ns' if it exists, otherwise it returns an
-// error.
-func (r *reconciler) getBroker(ctx context.Context, ns *corev1.Namespace) (*v1alpha1.Broker, error) {
- b := &v1alpha1.Broker{}
- name := types.NamespacedName{
- Namespace: ns.Name,
- Name: defaultBroker,
- }
- err := r.client.Get(ctx, name, b)
- return b, err
-}
-
-// reconcileBroker reconciles the default Broker for the Namespace 'ns'.
-func (r *reconciler) reconcileBroker(ctx context.Context, ns *corev1.Namespace) (*v1alpha1.Broker, error) {
- current, err := r.getBroker(ctx, ns)
-
- // If the resource doesn't exist, we'll create it.
- if k8serrors.IsNotFound(err) {
- b := newBroker(ns)
- err = r.client.Create(ctx, b)
- if err != nil {
- return nil, err
- }
- r.recorder.Event(ns, corev1.EventTypeNormal, brokerCreated, "Default eventing.knative.dev Broker created.")
- return b, nil
- } else if err != nil {
- return nil, err
- }
- // Don't update anything that is already present.
- return current, nil
-}
-
-// newBroker creates a placeholder default Broker object for Namespace 'ns'.
-func newBroker(ns *corev1.Namespace) *v1alpha1.Broker {
- return &v1alpha1.Broker{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: ns.Name,
- Name: defaultBroker,
- Labels: injectedLabels(),
- },
- }
-}
diff --git a/pkg/reconciler/v1alpha1/namespace/namespace_test.go b/pkg/reconciler/v1alpha1/namespace/namespace_test.go
deleted file mode 100644
index be60e4a9828..00000000000
--- a/pkg/reconciler/v1alpha1/namespace/namespace_test.go
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package namespace
-
-import (
- "context"
- "errors"
- "fmt"
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
- "go.uber.org/zap"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/kubernetes/scheme"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
-)
-
-const (
- testNS = "test-namespace"
- brokerName = "default"
-)
-
-var (
- disabled = "disabled"
- enabled = "enabled"
-
- // deletionTime is used when objects are marked as deleted. Rfc3339Copy()
- // truncates to seconds to match the loss of precision during serialization.
- deletionTime = metav1.Now().Rfc3339Copy()
-
- // map of events to set test cases' expectations easier
- events = map[string]corev1.Event{
- brokerCreated: {Reason: brokerCreated, Type: corev1.EventTypeNormal},
- serviceAccountCreated: {Reason: serviceAccountCreated, Type: corev1.EventTypeNormal},
- serviceAccountRBACCreated: {Reason: serviceAccountRBACCreated, Type: corev1.EventTypeNormal},
- }
-)
-
-func init() {
- // Add types to scheme
- _ = v1alpha1.AddToScheme(scheme.Scheme)
-}
-
-func TestProvideController(t *testing.T) {
- // TODO(grantr) This needs a mock of manager.Manager. Creating a manager
- // with a fake Config fails because the Manager tries to contact the
- // apiserver.
-
- // cfg := &rest.Config{
- // Host: "http://foo:80",
- // }
- //
- // mgr, err := manager.New(cfg, manager.Options{})
- // if err != nil {
- // t.Fatalf("Error creating manager: %v", err)
- // }
- //
- // _, err = ProvideController(mgr)
- // if err != nil {
- // t.Fatalf("Error in ProvideController: %v", err)
- // }
-}
-
-func TestInjectClient(t *testing.T) {
- r := &reconciler{}
- orig := r.client
- n := fake.NewFakeClient()
- if orig == n {
- t.Errorf("Original and new clients are identical: %v", orig)
- }
- err := r.InjectClient(n)
- if err != nil {
- t.Errorf("Unexpected error injecting the client: %v", err)
- }
- if n != r.client {
- t.Errorf("Unexpected client. Expected: '%v'. Actual: '%v'", n, r.client)
- }
-}
-
-func TestNamespaceMapper_Map(t *testing.T) {
- m := &namespaceMapper{
- name: makeBroker().Name,
- }
-
- req := handler.MapObject{
- Meta: makeBroker().GetObjectMeta(),
- Object: makeBroker(),
- }
- actual := m.Map(req)
- expected := []reconcile.Request{
- {
- NamespacedName: types.NamespacedName{
- Namespace: "",
- Name: testNS,
- },
- },
- }
- if diff := cmp.Diff(expected, actual); diff != "" {
- t.Errorf("Unexpected reconcile requests (-want +got): %v", diff)
- }
-}
-
-func TestReconcile(t *testing.T) {
- testCases := []controllertesting.TestCase{
- {
- Name: "Namespace not found",
- },
- {
- Name: "Namespace.Get fails",
- Scheme: scheme.Scheme,
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*corev1.Namespace); ok {
- return controllertesting.Handled, errors.New("test error getting the NS")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error getting the NS",
- },
- {
- Name: "Namespace is not labeled",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeNamespace(nil),
- },
- WantAbsent: []runtime.Object{
- makeBroker(),
- },
- },
- {
- Name: "Namespace is labeled disabled",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeNamespace(&disabled),
- },
- WantAbsent: []runtime.Object{
- makeBroker(),
- },
- },
- {
- Name: "Namespace is being deleted",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeDeletingNamespace(),
- },
- WantAbsent: []runtime.Object{
- makeBroker(),
- },
- },
- {
- Name: "Broker.Get fails",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeNamespace(&enabled),
- },
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Broker); ok {
- return controllertesting.Handled, errors.New("test error getting the Broker")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error getting the Broker",
- WantAbsent: []runtime.Object{
- makeBroker(),
- },
- WantEvent: []corev1.Event{events[serviceAccountCreated], events[serviceAccountRBACCreated]},
- },
- {
- Name: "Broker Found",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeNamespace(&enabled),
- makeBroker(),
- },
- WantEvent: []corev1.Event{events[serviceAccountCreated], events[serviceAccountRBACCreated]},
- },
- {
- Name: "Broker.Create fails",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeNamespace(&enabled),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Broker); ok {
- return controllertesting.Handled, errors.New("test error creating the Broker")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error creating the Broker",
- WantEvent: []corev1.Event{events[serviceAccountCreated], events[serviceAccountRBACCreated]},
- },
- {
- Name: "Broker created",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeNamespace(&enabled),
- },
- WantPresent: []runtime.Object{
- makeBroker(),
- },
- WantEvent: []corev1.Event{
- events[serviceAccountCreated],
- events[serviceAccountRBACCreated],
- events[brokerCreated]},
- },
- }
- for _, tc := range testCases {
- c := tc.GetClient()
- recorder := tc.GetEventRecorder()
-
- r := &reconciler{
- client: c,
- recorder: recorder,
- logger: zap.NewNop(),
- }
- tc.ReconcileKey = fmt.Sprintf("%s/%s", "", testNS)
- tc.IgnoreTimes = true
- t.Run(tc.Name, tc.Runner(t, r, c, recorder))
- }
-}
-
-func makeNamespace(labelValue *string) *corev1.Namespace {
- labels := map[string]string{}
- if labelValue != nil {
- labels["knative-eventing-injection"] = *labelValue
- }
-
- return &corev1.Namespace{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "v1",
- Kind: "Namespace",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: testNS,
- Labels: labels,
- },
- }
-}
-
-func makeDeletingNamespace() *corev1.Namespace {
- ns := makeNamespace(&enabled)
- ns.DeletionTimestamp = &deletionTime
- return ns
-}
-
-func makeBroker() *v1alpha1.Broker {
- return &v1alpha1.Broker{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "Broker",
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNS,
- Name: brokerName,
- Labels: map[string]string{
- "eventing.knative.dev/namespaceInjected": "true",
- },
- },
- }
-}
From ff2c461a46ea1f749bf026434c84d05824246ca7 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Tue, 23 Apr 2019 10:51:01 -0700
Subject: [PATCH 51/76] Remove protobuf constraint
No longer used for CEL type declarations.
---
Gopkg.lock | 7 +-
Gopkg.toml | 4 -
.../golang/protobuf/jsonpb/jsonpb.go | 1271 -----------------
3 files changed, 2 insertions(+), 1280 deletions(-)
delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 33be1a8025c..1feab64d598 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -196,11 +196,10 @@
revision = "24b0969c4cb722950103eed87108c8d291a8df00"
[[projects]]
- digest = "1:771d03745d3f566fb910a47ab1fa959379398df1d1768e44b8c5bc3865d1407a"
+ digest = "1:ac06172e8420ee3192527e84a3f373ada56043e6b0e27c2e765b4dd8408f2ec9"
name = "github.com/golang/protobuf"
packages = [
"descriptor",
- "jsonpb",
"proto",
"protoc-gen-go/descriptor",
"ptypes",
@@ -1286,15 +1285,13 @@
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http",
"github.com/cloudevents/sdk-go/pkg/cloudevents/types",
"github.com/fsnotify/fsnotify",
- "github.com/golang/protobuf/jsonpb",
- "github.com/golang/protobuf/proto",
- "github.com/golang/protobuf/ptypes/struct",
"github.com/google/cel-go/cel",
"github.com/google/cel-go/checker/decls",
"github.com/google/cel-go/common/types",
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/google/uuid",
+ "github.com/hashicorp/golang-lru",
"github.com/knative/build/pkg/apis/build/v1alpha1",
"github.com/knative/pkg/apis",
"github.com/knative/pkg/apis/duck",
diff --git a/Gopkg.toml b/Gopkg.toml
index fe4c527d801..4d075f192e4 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -135,7 +135,3 @@ required = [
[[constraint]]
name = "github.com/google/cel-go"
version = "=0.2.0"
-
-[[constraint]]
- name = "github.com/golang/protobuf"
- version = "=1.3.0"
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
deleted file mode 100644
index ada2b78e89d..00000000000
--- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
+++ /dev/null
@@ -1,1271 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2015 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
-It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
-
-This package produces a different output than the standard "encoding/json" package,
-which does not operate correctly on protocol buffers.
-*/
-package jsonpb
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
-
- stpb "github.com/golang/protobuf/ptypes/struct"
-)
-
-const secondInNanos = int64(time.Second / time.Nanosecond)
-
-// Marshaler is a configurable object for converting between
-// protocol buffer objects and a JSON representation for them.
-type Marshaler struct {
- // Whether to render enum values as integers, as opposed to string values.
- EnumsAsInts bool
-
- // Whether to render fields with zero values.
- EmitDefaults bool
-
- // A string to indent each level by. The presence of this field will
- // also cause a space to appear between the field separator and
- // value, and for newlines to be appear between fields and array
- // elements.
- Indent string
-
- // Whether to use the original (.proto) name for fields.
- OrigName bool
-
- // A custom URL resolver to use when marshaling Any messages to JSON.
- // If unset, the default resolution strategy is to extract the
- // fully-qualified type name from the type URL and pass that to
- // proto.MessageType(string).
- AnyResolver AnyResolver
-}
-
-// AnyResolver takes a type URL, present in an Any message, and resolves it into
-// an instance of the associated message.
-type AnyResolver interface {
- Resolve(typeUrl string) (proto.Message, error)
-}
-
-func defaultResolveAny(typeUrl string) (proto.Message, error) {
- // Only the part of typeUrl after the last slash is relevant.
- mname := typeUrl
- if slash := strings.LastIndex(mname, "/"); slash >= 0 {
- mname = mname[slash+1:]
- }
- mt := proto.MessageType(mname)
- if mt == nil {
- return nil, fmt.Errorf("unknown message type %q", mname)
- }
- return reflect.New(mt.Elem()).Interface().(proto.Message), nil
-}
-
-// JSONPBMarshaler is implemented by protobuf messages that customize the
-// way they are marshaled to JSON. Messages that implement this should
-// also implement JSONPBUnmarshaler so that the custom format can be
-// parsed.
-//
-// The JSON marshaling must follow the proto to JSON specification:
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-type JSONPBMarshaler interface {
- MarshalJSONPB(*Marshaler) ([]byte, error)
-}
-
-// JSONPBUnmarshaler is implemented by protobuf messages that customize
-// the way they are unmarshaled from JSON. Messages that implement this
-// should also implement JSONPBMarshaler so that the custom format can be
-// produced.
-//
-// The JSON unmarshaling must follow the JSON to proto specification:
-// https://developers.google.com/protocol-buffers/docs/proto3#json
-type JSONPBUnmarshaler interface {
- UnmarshalJSONPB(*Unmarshaler, []byte) error
-}
-
-// Marshal marshals a protocol buffer into JSON.
-func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
- v := reflect.ValueOf(pb)
- if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
- return errors.New("Marshal called with nil")
- }
- // Check for unset required fields first.
- if err := checkRequiredFields(pb); err != nil {
- return err
- }
- writer := &errWriter{writer: out}
- return m.marshalObject(writer, pb, "", "")
-}
-
-// MarshalToString converts a protocol buffer object to JSON string.
-func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
- var buf bytes.Buffer
- if err := m.Marshal(&buf, pb); err != nil {
- return "", err
- }
- return buf.String(), nil
-}
-
-type int32Slice []int32
-
-var nonFinite = map[string]float64{
- `"NaN"`: math.NaN(),
- `"Infinity"`: math.Inf(1),
- `"-Infinity"`: math.Inf(-1),
-}
-
-// For sorting extensions ids to ensure stable output.
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-type wkt interface {
- XXX_WellKnownType() string
-}
-
-// marshalObject writes a struct to the Writer.
-func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
- if jsm, ok := v.(JSONPBMarshaler); ok {
- b, err := jsm.MarshalJSONPB(m)
- if err != nil {
- return err
- }
- if typeURL != "" {
- // we are marshaling this object to an Any type
- var js map[string]*json.RawMessage
- if err = json.Unmarshal(b, &js); err != nil {
- return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
- }
- turl, err := json.Marshal(typeURL)
- if err != nil {
- return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
- }
- js["@type"] = (*json.RawMessage)(&turl)
- if b, err = json.Marshal(js); err != nil {
- return err
- }
- }
-
- out.write(string(b))
- return out.err
- }
-
- s := reflect.ValueOf(v).Elem()
-
- // Handle well-known types.
- if wkt, ok := v.(wkt); ok {
- switch wkt.XXX_WellKnownType() {
- case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
- "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
- // "Wrappers use the same representation in JSON
- // as the wrapped primitive type, ..."
- sprop := proto.GetProperties(s.Type())
- return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
- case "Any":
- // Any is a bit more involved.
- return m.marshalAny(out, v, indent)
- case "Duration":
- // "Generated output always contains 0, 3, 6, or 9 fractional digits,
- // depending on required precision."
- s, ns := s.Field(0).Int(), s.Field(1).Int()
- if ns <= -secondInNanos || ns >= secondInNanos {
- return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
- }
- if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
- return errors.New("signs of seconds and nanos do not match")
- }
- if s < 0 {
- ns = -ns
- }
- x := fmt.Sprintf("%d.%09d", s, ns)
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- out.write(`"`)
- out.write(x)
- out.write(`s"`)
- return out.err
- case "Struct", "ListValue":
- // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
- // TODO: pass the correct Properties if needed.
- return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
- case "Timestamp":
- // "RFC 3339, where generated output will always be Z-normalized
- // and uses 0, 3, 6 or 9 fractional digits."
- s, ns := s.Field(0).Int(), s.Field(1).Int()
- if ns < 0 || ns >= secondInNanos {
- return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
- }
- t := time.Unix(s, ns).UTC()
- // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
- x := t.Format("2006-01-02T15:04:05.000000000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, "000")
- x = strings.TrimSuffix(x, ".000")
- out.write(`"`)
- out.write(x)
- out.write(`Z"`)
- return out.err
- case "Value":
- // Value has a single oneof.
- kind := s.Field(0)
- if kind.IsNil() {
- // "absence of any variant indicates an error"
- return errors.New("nil Value")
- }
- // oneof -> *T -> T -> T.F
- x := kind.Elem().Elem().Field(0)
- // TODO: pass the correct Properties if needed.
- return m.marshalValue(out, &proto.Properties{}, x, indent)
- }
- }
-
- out.write("{")
- if m.Indent != "" {
- out.write("\n")
- }
-
- firstField := true
-
- if typeURL != "" {
- if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
- return err
- }
- firstField = false
- }
-
- for i := 0; i < s.NumField(); i++ {
- value := s.Field(i)
- valueField := s.Type().Field(i)
- if strings.HasPrefix(valueField.Name, "XXX_") {
- continue
- }
-
- // IsNil will panic on most value kinds.
- switch value.Kind() {
- case reflect.Chan, reflect.Func, reflect.Interface:
- if value.IsNil() {
- continue
- }
- }
-
- if !m.EmitDefaults {
- switch value.Kind() {
- case reflect.Bool:
- if !value.Bool() {
- continue
- }
- case reflect.Int32, reflect.Int64:
- if value.Int() == 0 {
- continue
- }
- case reflect.Uint32, reflect.Uint64:
- if value.Uint() == 0 {
- continue
- }
- case reflect.Float32, reflect.Float64:
- if value.Float() == 0 {
- continue
- }
- case reflect.String:
- if value.Len() == 0 {
- continue
- }
- case reflect.Map, reflect.Ptr, reflect.Slice:
- if value.IsNil() {
- continue
- }
- }
- }
-
- // Oneof fields need special handling.
- if valueField.Tag.Get("protobuf_oneof") != "" {
- // value is an interface containing &T{real_value}.
- sv := value.Elem().Elem() // interface -> *T -> T
- value = sv.Field(0)
- valueField = sv.Type().Field(0)
- }
- prop := jsonProperties(valueField, m.OrigName)
- if !firstField {
- m.writeSep(out)
- }
- if err := m.marshalField(out, prop, value, indent); err != nil {
- return err
- }
- firstField = false
- }
-
- // Handle proto2 extensions.
- if ep, ok := v.(proto.Message); ok {
- extensions := proto.RegisteredExtensions(v)
- // Sort extensions for stable output.
- ids := make([]int32, 0, len(extensions))
- for id, desc := range extensions {
- if !proto.HasExtension(ep, desc) {
- continue
- }
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
- for _, id := range ids {
- desc := extensions[id]
- if desc == nil {
- // unknown extension
- continue
- }
- ext, extErr := proto.GetExtension(ep, desc)
- if extErr != nil {
- return extErr
- }
- value := reflect.ValueOf(ext)
- var prop proto.Properties
- prop.Parse(desc.Tag)
- prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
- if !firstField {
- m.writeSep(out)
- }
- if err := m.marshalField(out, &prop, value, indent); err != nil {
- return err
- }
- firstField = false
- }
-
- }
-
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- }
- out.write("}")
- return out.err
-}
-
-func (m *Marshaler) writeSep(out *errWriter) {
- if m.Indent != "" {
- out.write(",\n")
- } else {
- out.write(",")
- }
-}
-
-func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
- // "If the Any contains a value that has a special JSON mapping,
- // it will be converted as follows: {"@type": xxx, "value": yyy}.
- // Otherwise, the value will be converted into a JSON object,
- // and the "@type" field will be inserted to indicate the actual data type."
- v := reflect.ValueOf(any).Elem()
- turl := v.Field(0).String()
- val := v.Field(1).Bytes()
-
- var msg proto.Message
- var err error
- if m.AnyResolver != nil {
- msg, err = m.AnyResolver.Resolve(turl)
- } else {
- msg, err = defaultResolveAny(turl)
- }
- if err != nil {
- return err
- }
-
- if err := proto.Unmarshal(val, msg); err != nil {
- return err
- }
-
- if _, ok := msg.(wkt); ok {
- out.write("{")
- if m.Indent != "" {
- out.write("\n")
- }
- if err := m.marshalTypeURL(out, indent, turl); err != nil {
- return err
- }
- m.writeSep(out)
- if m.Indent != "" {
- out.write(indent)
- out.write(m.Indent)
- out.write(`"value": `)
- } else {
- out.write(`"value":`)
- }
- if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
- return err
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- }
- out.write("}")
- return out.err
- }
-
- return m.marshalObject(out, msg, indent, turl)
-}
-
-func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
- if m.Indent != "" {
- out.write(indent)
- out.write(m.Indent)
- }
- out.write(`"@type":`)
- if m.Indent != "" {
- out.write(" ")
- }
- b, err := json.Marshal(typeURL)
- if err != nil {
- return err
- }
- out.write(string(b))
- return out.err
-}
-
-// marshalField writes field description and value to the Writer.
-func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
- if m.Indent != "" {
- out.write(indent)
- out.write(m.Indent)
- }
- out.write(`"`)
- out.write(prop.JSONName)
- out.write(`":`)
- if m.Indent != "" {
- out.write(" ")
- }
- if err := m.marshalValue(out, prop, v, indent); err != nil {
- return err
- }
- return nil
-}
-
-// marshalValue writes the value to the Writer.
-func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
- var err error
- v = reflect.Indirect(v)
-
- // Handle nil pointer
- if v.Kind() == reflect.Invalid {
- out.write("null")
- return out.err
- }
-
- // Handle repeated elements.
- if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
- out.write("[")
- comma := ""
- for i := 0; i < v.Len(); i++ {
- sliceVal := v.Index(i)
- out.write(comma)
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- out.write(m.Indent)
- }
- if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
- return err
- }
- comma = ","
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- }
- out.write("]")
- return out.err
- }
-
- // Handle well-known types.
- // Most are handled up in marshalObject (because 99% are messages).
- if wkt, ok := v.Interface().(wkt); ok {
- switch wkt.XXX_WellKnownType() {
- case "NullValue":
- out.write("null")
- return out.err
- }
- }
-
- // Handle enumerations.
- if !m.EnumsAsInts && prop.Enum != "" {
- // Unknown enum values will are stringified by the proto library as their
- // value. Such values should _not_ be quoted or they will be interpreted
- // as an enum string instead of their value.
- enumStr := v.Interface().(fmt.Stringer).String()
- var valStr string
- if v.Kind() == reflect.Ptr {
- valStr = strconv.Itoa(int(v.Elem().Int()))
- } else {
- valStr = strconv.Itoa(int(v.Int()))
- }
- isKnownEnum := enumStr != valStr
- if isKnownEnum {
- out.write(`"`)
- }
- out.write(enumStr)
- if isKnownEnum {
- out.write(`"`)
- }
- return out.err
- }
-
- // Handle nested messages.
- if v.Kind() == reflect.Struct {
- return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
- }
-
- // Handle maps.
- // Since Go randomizes map iteration, we sort keys for stable output.
- if v.Kind() == reflect.Map {
- out.write(`{`)
- keys := v.MapKeys()
- sort.Sort(mapKeys(keys))
- for i, k := range keys {
- if i > 0 {
- out.write(`,`)
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- out.write(m.Indent)
- }
-
- // TODO handle map key prop properly
- b, err := json.Marshal(k.Interface())
- if err != nil {
- return err
- }
- s := string(b)
-
- // If the JSON is not a string value, encode it again to make it one.
- if !strings.HasPrefix(s, `"`) {
- b, err := json.Marshal(s)
- if err != nil {
- return err
- }
- s = string(b)
- }
-
- out.write(s)
- out.write(`:`)
- if m.Indent != "" {
- out.write(` `)
- }
-
- vprop := prop
- if prop != nil && prop.MapValProp != nil {
- vprop = prop.MapValProp
- }
- if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
- return err
- }
- }
- if m.Indent != "" {
- out.write("\n")
- out.write(indent)
- out.write(m.Indent)
- }
- out.write(`}`)
- return out.err
- }
-
- // Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- f := v.Float()
- var sval string
- switch {
- case math.IsInf(f, 1):
- sval = `"Infinity"`
- case math.IsInf(f, -1):
- sval = `"-Infinity"`
- case math.IsNaN(f):
- sval = `"NaN"`
- }
- if sval != "" {
- out.write(sval)
- return out.err
- }
- }
-
- // Default handling defers to the encoding/json library.
- b, err := json.Marshal(v.Interface())
- if err != nil {
- return err
- }
- needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
- if needToQuote {
- out.write(`"`)
- }
- out.write(string(b))
- if needToQuote {
- out.write(`"`)
- }
- return out.err
-}
-
-// Unmarshaler is a configurable object for converting from a JSON
-// representation to a protocol buffer object.
-type Unmarshaler struct {
- // Whether to allow messages to contain unknown fields, as opposed to
- // failing to unmarshal.
- AllowUnknownFields bool
-
- // A custom URL resolver to use when unmarshaling Any messages from JSON.
- // If unset, the default resolution strategy is to extract the
- // fully-qualified type name from the type URL and pass that to
- // proto.MessageType(string).
- AnyResolver AnyResolver
-}
-
-// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
-// This function is lenient and will decode any options permutations of the
-// related Marshaler.
-func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
- inputValue := json.RawMessage{}
- if err := dec.Decode(&inputValue); err != nil {
- return err
- }
- if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
- return err
- }
- return checkRequiredFields(pb)
-}
-
-// Unmarshal unmarshals a JSON object stream into a protocol
-// buffer. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
- dec := json.NewDecoder(r)
- return u.UnmarshalNext(dec, pb)
-}
-
-// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
-// This function is lenient and will decode any options permutations of the
-// related Marshaler.
-func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
- return new(Unmarshaler).UnmarshalNext(dec, pb)
-}
-
-// Unmarshal unmarshals a JSON object stream into a protocol
-// buffer. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func Unmarshal(r io.Reader, pb proto.Message) error {
- return new(Unmarshaler).Unmarshal(r, pb)
-}
-
-// UnmarshalString will populate the fields of a protocol buffer based
-// on a JSON string. This function is lenient and will decode any options
-// permutations of the related Marshaler.
-func UnmarshalString(str string, pb proto.Message) error {
- return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
-}
-
-// unmarshalValue converts/copies a value into the target.
-// prop may be nil.
-func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
- targetType := target.Type()
-
- // Allocate memory for pointer fields.
- if targetType.Kind() == reflect.Ptr {
- // If input value is "null" and target is a pointer type, then the field should be treated as not set
- // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
- _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
- if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
- return nil
- }
- target.Set(reflect.New(targetType.Elem()))
-
- return u.unmarshalValue(target.Elem(), inputValue, prop)
- }
-
- if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
- return jsu.UnmarshalJSONPB(u, []byte(inputValue))
- }
-
- // Handle well-known types that are not pointers.
- if w, ok := target.Addr().Interface().(wkt); ok {
- switch w.XXX_WellKnownType() {
- case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
- "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
- return u.unmarshalValue(target.Field(0), inputValue, prop)
- case "Any":
- // Use json.RawMessage pointer type instead of value to support pre-1.8 version.
- // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
- // https://github.com/golang/go/issues/14493
- var jsonFields map[string]*json.RawMessage
- if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
- return err
- }
-
- val, ok := jsonFields["@type"]
- if !ok || val == nil {
- return errors.New("Any JSON doesn't have '@type'")
- }
-
- var turl string
- if err := json.Unmarshal([]byte(*val), &turl); err != nil {
- return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
- }
- target.Field(0).SetString(turl)
-
- var m proto.Message
- var err error
- if u.AnyResolver != nil {
- m, err = u.AnyResolver.Resolve(turl)
- } else {
- m, err = defaultResolveAny(turl)
- }
- if err != nil {
- return err
- }
-
- if _, ok := m.(wkt); ok {
- val, ok := jsonFields["value"]
- if !ok {
- return errors.New("Any JSON doesn't have 'value'")
- }
-
- if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
- return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
- }
- } else {
- delete(jsonFields, "@type")
- nestedProto, err := json.Marshal(jsonFields)
- if err != nil {
- return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
- }
-
- if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
- return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
- }
- }
-
- b, err := proto.Marshal(m)
- if err != nil {
- return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
- }
- target.Field(1).SetBytes(b)
-
- return nil
- case "Duration":
- unq, err := unquote(string(inputValue))
- if err != nil {
- return err
- }
-
- d, err := time.ParseDuration(unq)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
-
- ns := d.Nanoseconds()
- s := ns / 1e9
- ns %= 1e9
- target.Field(0).SetInt(s)
- target.Field(1).SetInt(ns)
- return nil
- case "Timestamp":
- unq, err := unquote(string(inputValue))
- if err != nil {
- return err
- }
-
- t, err := time.Parse(time.RFC3339Nano, unq)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
-
- target.Field(0).SetInt(t.Unix())
- target.Field(1).SetInt(int64(t.Nanosecond()))
- return nil
- case "Struct":
- var m map[string]json.RawMessage
- if err := json.Unmarshal(inputValue, &m); err != nil {
- return fmt.Errorf("bad StructValue: %v", err)
- }
-
- target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
- for k, jv := range m {
- pv := &stpb.Value{}
- if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
- return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
- }
- target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
- }
- return nil
- case "ListValue":
- var s []json.RawMessage
- if err := json.Unmarshal(inputValue, &s); err != nil {
- return fmt.Errorf("bad ListValue: %v", err)
- }
-
- target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
- for i, sv := range s {
- if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
- return err
- }
- }
- return nil
- case "Value":
- ivStr := string(inputValue)
- if ivStr == "null" {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
- } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
- } else if v, err := unquote(ivStr); err == nil {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
- } else if v, err := strconv.ParseBool(ivStr); err == nil {
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
- } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
- lv := &stpb.ListValue{}
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
- return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
- } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
- sv := &stpb.Struct{}
- target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
- return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
- } else {
- return fmt.Errorf("unrecognized type for Value %q", ivStr)
- }
- return nil
- }
- }
-
- // Handle enums, which have an underlying type of int32,
- // and may appear as strings.
- // The case of an enum appearing as a number is handled
- // at the bottom of this function.
- if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
- vmap := proto.EnumValueMap(prop.Enum)
- // Don't need to do unquoting; valid enum names
- // are from a limited character set.
- s := inputValue[1 : len(inputValue)-1]
- n, ok := vmap[string(s)]
- if !ok {
- return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
- }
- if target.Kind() == reflect.Ptr { // proto2
- target.Set(reflect.New(targetType.Elem()))
- target = target.Elem()
- }
- if targetType.Kind() != reflect.Int32 {
- return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
- }
- target.SetInt(int64(n))
- return nil
- }
-
- // Handle nested messages.
- if targetType.Kind() == reflect.Struct {
- var jsonFields map[string]json.RawMessage
- if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
- return err
- }
-
- consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
- // Be liberal in what names we accept; both orig_name and camelName are okay.
- fieldNames := acceptedJSONFieldNames(prop)
-
- vOrig, okOrig := jsonFields[fieldNames.orig]
- vCamel, okCamel := jsonFields[fieldNames.camel]
- if !okOrig && !okCamel {
- return nil, false
- }
- // If, for some reason, both are present in the data, favour the camelName.
- var raw json.RawMessage
- if okOrig {
- raw = vOrig
- delete(jsonFields, fieldNames.orig)
- }
- if okCamel {
- raw = vCamel
- delete(jsonFields, fieldNames.camel)
- }
- return raw, true
- }
-
- sprops := proto.GetProperties(targetType)
- for i := 0; i < target.NumField(); i++ {
- ft := target.Type().Field(i)
- if strings.HasPrefix(ft.Name, "XXX_") {
- continue
- }
-
- valueForField, ok := consumeField(sprops.Prop[i])
- if !ok {
- continue
- }
-
- if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
- return err
- }
- }
- // Check for any oneof fields.
- if len(jsonFields) > 0 {
- for _, oop := range sprops.OneofTypes {
- raw, ok := consumeField(oop.Prop)
- if !ok {
- continue
- }
- nv := reflect.New(oop.Type.Elem())
- target.Field(oop.Field).Set(nv)
- if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
- return err
- }
- }
- }
- // Handle proto2 extensions.
- if len(jsonFields) > 0 {
- if ep, ok := target.Addr().Interface().(proto.Message); ok {
- for _, ext := range proto.RegisteredExtensions(ep) {
- name := fmt.Sprintf("[%s]", ext.Name)
- raw, ok := jsonFields[name]
- if !ok {
- continue
- }
- delete(jsonFields, name)
- nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
- if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
- return err
- }
- if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
- return err
- }
- }
- }
- }
- if !u.AllowUnknownFields && len(jsonFields) > 0 {
- // Pick any field to be the scapegoat.
- var f string
- for fname := range jsonFields {
- f = fname
- break
- }
- return fmt.Errorf("unknown field %q in %v", f, targetType)
- }
- return nil
- }
-
- // Handle arrays (which aren't encoded bytes)
- if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
- var slc []json.RawMessage
- if err := json.Unmarshal(inputValue, &slc); err != nil {
- return err
- }
- if slc != nil {
- l := len(slc)
- target.Set(reflect.MakeSlice(targetType, l, l))
- for i := 0; i < l; i++ {
- if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
- return err
- }
- }
- }
- return nil
- }
-
- // Handle maps (whose keys are always strings)
- if targetType.Kind() == reflect.Map {
- var mp map[string]json.RawMessage
- if err := json.Unmarshal(inputValue, &mp); err != nil {
- return err
- }
- if mp != nil {
- target.Set(reflect.MakeMap(targetType))
- for ks, raw := range mp {
- // Unmarshal map key. The core json library already decoded the key into a
- // string, so we handle that specially. Other types were quoted post-serialization.
- var k reflect.Value
- if targetType.Key().Kind() == reflect.String {
- k = reflect.ValueOf(ks)
- } else {
- k = reflect.New(targetType.Key()).Elem()
- var kprop *proto.Properties
- if prop != nil && prop.MapKeyProp != nil {
- kprop = prop.MapKeyProp
- }
- if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
- return err
- }
- }
-
- // Unmarshal map value.
- v := reflect.New(targetType.Elem()).Elem()
- var vprop *proto.Properties
- if prop != nil && prop.MapValProp != nil {
- vprop = prop.MapValProp
- }
- if err := u.unmarshalValue(v, raw, vprop); err != nil {
- return err
- }
- target.SetMapIndex(k, v)
- }
- }
- return nil
- }
-
- // Non-finite numbers can be encoded as strings.
- isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
- if isFloat {
- if num, ok := nonFinite[string(inputValue)]; ok {
- target.SetFloat(num)
- return nil
- }
- }
-
- // integers & floats can be encoded as strings. In this case we drop
- // the quotes and proceed as normal.
- isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
- targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
- targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
- if isNum && strings.HasPrefix(string(inputValue), `"`) {
- inputValue = inputValue[1 : len(inputValue)-1]
- }
-
- // Use the encoding/json for parsing other value types.
- return json.Unmarshal(inputValue, target.Addr().Interface())
-}
-
-func unquote(s string) (string, error) {
- var ret string
- err := json.Unmarshal([]byte(s), &ret)
- return ret, err
-}
-
-// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
-func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
- var prop proto.Properties
- prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
- if origName || prop.JSONName == "" {
- prop.JSONName = prop.OrigName
- }
- return &prop
-}
-
-type fieldNames struct {
- orig, camel string
-}
-
-func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
- opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
- if prop.JSONName != "" {
- opts.camel = prop.JSONName
- }
- return opts
-}
-
-// Writer wrapper inspired by https://blog.golang.org/errors-are-values
-type errWriter struct {
- writer io.Writer
- err error
-}
-
-func (w *errWriter) write(str string) {
- if w.err != nil {
- return
- }
- _, w.err = w.writer.Write([]byte(str))
-}
-
-// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-//
-// Numeric keys are sorted in numeric order per
-// https://developers.google.com/protocol-buffers/docs/proto#maps.
-type mapKeys []reflect.Value
-
-func (s mapKeys) Len() int { return len(s) }
-func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s mapKeys) Less(i, j int) bool {
- if k := s[i].Kind(); k == s[j].Kind() {
- switch k {
- case reflect.String:
- return s[i].String() < s[j].String()
- case reflect.Int32, reflect.Int64:
- return s[i].Int() < s[j].Int()
- case reflect.Uint32, reflect.Uint64:
- return s[i].Uint() < s[j].Uint()
- }
- }
- return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
-}
-
-// checkRequiredFields returns an error if any required field in the given proto message is not set.
-// This function is used by both Marshal and Unmarshal. While required fields only exist in a
-// proto2 message, a proto3 message can contain proto2 message(s).
-func checkRequiredFields(pb proto.Message) error {
- // Most well-known type messages do not contain required fields. The "Any" type may contain
- // a message that has required fields.
- //
- // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
- // field in order to transform that into JSON, and that should have returned an error if a
- // required field is not set in the embedded message.
- //
- // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
- // embedded message to store the serialized message in Any.Value field, and that should have
- // returned an error if a required field is not set.
- if _, ok := pb.(wkt); ok {
- return nil
- }
-
- v := reflect.ValueOf(pb)
- // Skip message if it is not a struct pointer.
- if v.Kind() != reflect.Ptr {
- return nil
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return nil
- }
-
- for i := 0; i < v.NumField(); i++ {
- field := v.Field(i)
- sfield := v.Type().Field(i)
-
- if sfield.PkgPath != "" {
- // blank PkgPath means the field is exported; skip if not exported
- continue
- }
-
- if strings.HasPrefix(sfield.Name, "XXX_") {
- continue
- }
-
- // Oneof field is an interface implemented by wrapper structs containing the actual oneof
- // field, i.e. an interface containing &T{real_value}.
- if sfield.Tag.Get("protobuf_oneof") != "" {
- if field.Kind() != reflect.Interface {
- continue
- }
- v := field.Elem()
- if v.Kind() != reflect.Ptr || v.IsNil() {
- continue
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct || v.NumField() < 1 {
- continue
- }
- field = v.Field(0)
- sfield = v.Type().Field(0)
- }
-
- protoTag := sfield.Tag.Get("protobuf")
- if protoTag == "" {
- continue
- }
- var prop proto.Properties
- prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
-
- switch field.Kind() {
- case reflect.Map:
- if field.IsNil() {
- continue
- }
- // Check each map value.
- keys := field.MapKeys()
- for _, k := range keys {
- v := field.MapIndex(k)
- if err := checkRequiredFieldsInValue(v); err != nil {
- return err
- }
- }
- case reflect.Slice:
- // Handle non-repeated type, e.g. bytes.
- if !prop.Repeated {
- if prop.Required && field.IsNil() {
- return fmt.Errorf("required field %q is not set", prop.Name)
- }
- continue
- }
-
- // Handle repeated type.
- if field.IsNil() {
- continue
- }
- // Check each slice item.
- for i := 0; i < field.Len(); i++ {
- v := field.Index(i)
- if err := checkRequiredFieldsInValue(v); err != nil {
- return err
- }
- }
- case reflect.Ptr:
- if field.IsNil() {
- if prop.Required {
- return fmt.Errorf("required field %q is not set", prop.Name)
- }
- continue
- }
- if err := checkRequiredFieldsInValue(field); err != nil {
- return err
- }
- }
- }
-
- // Handle proto2 extensions.
- for _, ext := range proto.RegisteredExtensions(pb) {
- if !proto.HasExtension(pb, ext) {
- continue
- }
- ep, err := proto.GetExtension(pb, ext)
- if err != nil {
- return err
- }
- err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func checkRequiredFieldsInValue(v reflect.Value) error {
- if pm, ok := v.Interface().(proto.Message); ok {
- return checkRequiredFields(pm)
- }
- return nil
-}
From 447ec152e02d9e5e4381b521fd08a47087dcfc17 Mon Sep 17 00:00:00 2001
From: Ville Aikas
Date: Wed, 24 Apr 2019 14:05:28 -0700
Subject: [PATCH 52/76] add more unit tests for trigger (#1097)
---
pkg/reconciler/trigger/trigger.go | 4 +
pkg/reconciler/trigger/trigger_test.go | 225 ++++++++++++++++++++++++-
2 files changed, 220 insertions(+), 9 deletions(-)
diff --git a/pkg/reconciler/trigger/trigger.go b/pkg/reconciler/trigger/trigger.go
index d681f47274f..2c8976d819c 100644
--- a/pkg/reconciler/trigger/trigger.go
+++ b/pkg/reconciler/trigger/trigger.go
@@ -353,6 +353,7 @@ func (r *Reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.T
// If the resource doesn't exist, we'll create it.
if apierrs.IsNotFound(err) {
sub = expected
+ logging.FromContext(ctx).Info("Creating subscription")
newSub, err := r.EventingClientSet.EventingV1alpha1().Subscriptions(sub.Namespace).Create(sub)
if err != nil {
r.Recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err)
@@ -360,6 +361,7 @@ func (r *Reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.T
}
return newSub, nil
} else if err != nil {
+ logging.FromContext(ctx).Error("Failed to get subscription", zap.Error(err))
r.Recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err)
return nil, err
}
@@ -369,6 +371,7 @@ func (r *Reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.T
if !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) {
// Given that spec.channel is immutable, we cannot just update the Subscription. We delete
// it and re-create it instead.
+ logging.FromContext(ctx).Info("Deleting subscription", zap.String("namespace", sub.Namespace), zap.String("name", sub.Name))
err = r.EventingClientSet.EventingV1alpha1().Subscriptions(sub.Namespace).Delete(sub.Name, &metav1.DeleteOptions{})
if err != nil {
logging.FromContext(ctx).Info("Cannot delete subscription", zap.Error(err))
@@ -376,6 +379,7 @@ func (r *Reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.T
return nil, err
}
sub = expected
+ logging.FromContext(ctx).Info("Creating subscription")
newSub, err := r.EventingClientSet.EventingV1alpha1().Subscriptions(sub.Namespace).Create(sub)
if err != nil {
logging.FromContext(ctx).Info("Cannot create subscription", zap.Error(err))
diff --git a/pkg/reconciler/trigger/trigger_test.go b/pkg/reconciler/trigger/trigger_test.go
index 1b5d4210364..7fa47f42b08 100644
--- a/pkg/reconciler/trigger/trigger_test.go
+++ b/pkg/reconciler/trigger/trigger_test.go
@@ -17,8 +17,6 @@ limitations under the License.
package trigger
import (
- // "context"
- // "errors"
"fmt"
"net/url"
"testing"
@@ -115,6 +113,28 @@ func TestAllCases(t *testing.T) {
reconciletesting.WithTriggerBrokerFailed("DoesNotExist", "Broker does not exist"),
),
}},
+ }, {
+ Name: "Broker get failure",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI)),
+ },
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "TriggerReconcileFailed", "Trigger reconciliation failed: broker.eventing.knative.dev \"test-broker\" not found"),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("get", "brokers"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerFailed("DoesNotExist", "Broker does not exist"),
+ ),
+ }},
}, {
Name: "Trigger being deleted",
Key: triggerKey,
@@ -201,15 +221,124 @@ func TestAllCases(t *testing.T) {
),
}},
}, {
- Name: "Subscription Created, not ready",
+ Name: "Subscription create fail",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ makeTriggerChannel(),
+ makeIngressChannel(),
+ makeBrokerFilterService(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: true,
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "subscriptions"),
+ },
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SubscriptionCreateFailed", "Create Trigger's subscription failed: inducing failure for create subscriptions"),
+ Eventf(corev1.EventTypeWarning, "TriggerReconcileFailed", "Trigger reconciliation failed: inducing failure for create subscriptions")},
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ reconciletesting.WithTriggerNotSubscribed("NotSubscribed", "inducing failure for create subscriptions"),
+ reconciletesting.WithTriggerStatusSubscriberURI(subscriberURI),
+ ),
+ }},
+ WantCreates: []metav1.Object{
+ makeIngressSubscription(),
+ },
+ }, {
+ Name: "Subscription delete fail",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ makeTriggerChannel(),
+ makeIngressChannel(),
+ makeBrokerFilterService(),
+ makeDifferentReadySubscription(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: true,
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("delete", "subscriptions"),
+ },
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SubscriptionDeleteFailed", "Delete Trigger's subscription failed: inducing failure for delete subscriptions"),
+ Eventf(corev1.EventTypeWarning, "TriggerReconcileFailed", "Trigger reconciliation failed: inducing failure for delete subscriptions")},
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ reconciletesting.WithTriggerNotSubscribed("NotSubscribed", "inducing failure for delete subscriptions"),
+ reconciletesting.WithTriggerStatusSubscriberURI(subscriberURI),
+ ),
+ }},
+ // Name being "" is NOT a bug. Because we use generate name, the object created
+ // does not have a name...
+ WantDeletes: []clientgotesting.DeleteActionImpl{{
+ Name: "",
+ }},
+ }, {
+ Name: "Subscription create after delete fail",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ makeTriggerChannel(),
+ makeIngressChannel(),
+ makeBrokerFilterService(),
+ makeDifferentReadySubscription(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: true,
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "subscriptions"),
+ },
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SubscriptionCreateFailed", "Create Trigger's subscription failed: inducing failure for create subscriptions"),
+ Eventf(corev1.EventTypeWarning, "TriggerReconcileFailed", "Trigger reconciliation failed: inducing failure for create subscriptions")},
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ reconciletesting.WithTriggerNotSubscribed("NotSubscribed", "inducing failure for create subscriptions"),
+ reconciletesting.WithTriggerStatusSubscriberURI(subscriberURI),
+ ),
+ }},
+ // Name being "" is NOT a bug. Because we use generate name, the object created
+ // does not have a name...
+ WantDeletes: []clientgotesting.DeleteActionImpl{{
+ Name: "",
+ }},
+ WantCreates: []metav1.Object{
+ makeIngressSubscription(),
+ },
+ }, {
+ Name: "Subscription updated works",
Key: triggerKey,
Objects: []runtime.Object{
makeReadyBroker(),
makeTriggerChannel(),
makeIngressChannel(),
makeBrokerFilterService(),
+ makeDifferentReadySubscription(),
reconciletesting.NewTrigger(triggerName, testNS, brokerName,
- reconciletesting.WithTriggerSubscriberURI(makeServiceURI().String()),
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
reconciletesting.WithInitTriggerConditions,
),
},
@@ -219,20 +348,83 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
- reconciletesting.WithTriggerSubscriberURI(makeServiceURI().String()),
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
// The first reconciliation will initialize the status conditions.
reconciletesting.WithInitTriggerConditions,
reconciletesting.WithTriggerBrokerReady(),
reconciletesting.WithTriggerNotSubscribed("SubscriptionNotReady", "Subscription is not ready: nil"),
- reconciletesting.WithTriggerStatusSubscriberURI(makeServiceURI().String()),
+ reconciletesting.WithTriggerStatusSubscriberURI(subscriberURI),
),
}},
+ // Name being "" is NOT a bug. Because we use generate name, the object created
+ // does not have a name...
+ WantDeletes: []clientgotesting.DeleteActionImpl{{
+ Name: "",
+ }},
WantCreates: []metav1.Object{
makeIngressSubscription(),
},
}, {
Name: "Subscription Created, not ready",
Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ makeTriggerChannel(),
+ makeIngressChannel(),
+ makeBrokerFilterService(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "TriggerReconciled", "Trigger reconciled"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ reconciletesting.WithTriggerNotSubscribed("SubscriptionNotReady", "Subscription is not ready: nil"),
+ reconciletesting.WithTriggerStatusSubscriberURI(subscriberURI),
+ ),
+ }},
+ WantCreates: []metav1.Object{
+ makeIngressSubscription(),
+ },
+ }, {
+ Name: "Subscription not ready, trigger marked not ready",
+ Key: triggerKey,
+ Objects: []runtime.Object{
+ makeReadyBroker(),
+ makeTriggerChannel(),
+ makeIngressChannel(),
+ makeBrokerFilterService(),
+ makeNotReadySubscription(),
+ reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ reconciletesting.WithInitTriggerConditions,
+ ),
+ },
+ WantErr: false,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "TriggerReconciled", "Trigger reconciled"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
+ // The first reconciliation will initialize the status conditions.
+ reconciletesting.WithInitTriggerConditions,
+ reconciletesting.WithTriggerBrokerReady(),
+ reconciletesting.WithTriggerNotSubscribed("SubscriptionNotReady", "Subscription is not ready: nil"),
+ reconciletesting.WithTriggerStatusSubscriberURI(subscriberURI),
+ ),
+ }},
+ }, {
+ Name: "Subscription ready, trigger marked ready",
+ Key: triggerKey,
Objects: []runtime.Object{
makeReadyBroker(),
makeTriggerChannel(),
@@ -240,7 +432,7 @@ func TestAllCases(t *testing.T) {
makeBrokerFilterService(),
makeReadySubscription(),
reconciletesting.NewTrigger(triggerName, testNS, brokerName,
- reconciletesting.WithTriggerSubscriberURI(makeServiceURI().String()),
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
reconciletesting.WithInitTriggerConditions,
),
},
@@ -250,12 +442,12 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: reconciletesting.NewTrigger(triggerName, testNS, brokerName,
- reconciletesting.WithTriggerSubscriberURI(makeServiceURI().String()),
+ reconciletesting.WithTriggerSubscriberURI(subscriberURI),
// The first reconciliation will initialize the status conditions.
reconciletesting.WithInitTriggerConditions,
reconciletesting.WithTriggerBrokerReady(),
reconciletesting.WithTriggerSubscribed(),
- reconciletesting.WithTriggerStatusSubscriberURI(makeServiceURI().String()),
+ reconciletesting.WithTriggerStatusSubscriberURI(subscriberURI),
),
}},
},
@@ -424,12 +616,27 @@ func makeIngressSubscription() *v1alpha1.Subscription {
return resources.NewSubscription(makeTrigger(), makeTriggerChannel(), makeIngressChannel(), makeServiceURI())
}
+// Just so we can test subscription updates
+func makeDifferentReadySubscription() *v1alpha1.Subscription {
+ uri := "http://example.com/differenturi"
+ s := makeIngressSubscription()
+ s.Spec.Subscriber.URI = &uri
+ s.Status = *v1alpha1.TestHelper.ReadySubscriptionStatus()
+ return s
+}
+
func makeReadySubscription() *v1alpha1.Subscription {
s := makeIngressSubscription()
s.Status = *v1alpha1.TestHelper.ReadySubscriptionStatus()
return s
}
+func makeNotReadySubscription() *v1alpha1.Subscription {
+ s := makeIngressSubscription()
+ s.Status = *v1alpha1.TestHelper.NotReadySubscriptionStatus()
+ return s
+}
+
func getOwnerReference() metav1.OwnerReference {
return metav1.OwnerReference{
APIVersion: v1alpha1.SchemeGroupVersion.String(),
From cc11eaa85bf1951f97cb72e6d33b38829859443a Mon Sep 17 00:00:00 2001
From: Chi Zhang
Date: Wed, 24 Apr 2019 19:23:29 -0700
Subject: [PATCH 53/76] update test-infra dep (#1098)
---
Gopkg.lock | 4 ++--
.../knative/test-infra/scripts/README.md | 5 +++--
.../knative/test-infra/scripts/e2e-tests.sh | 16 ++++++++++++----
.../test-infra/scripts/presubmit-tests.sh | 2 +-
4 files changed, 18 insertions(+), 9 deletions(-)
diff --git a/Gopkg.lock b/Gopkg.lock
index 81597fe9ee3..ed867edd90a 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -472,14 +472,14 @@
[[projects]]
branch = "master"
- digest = "1:f3c8dc03bb88bca90c59550e06b0f27a6ca489b1747ac8e5153686a6752e4d74"
+ digest = "1:e52814856d1182e3f80db2dbab1b2888f1d7c486a9094ebc27aa18d241e43a08"
name = "github.com/knative/test-infra"
packages = [
"scripts",
"tools/dep-collector",
]
pruneopts = "UT"
- revision = "3a09cd7f5428743509941d116fdee644041a3507"
+ revision = "6a4119d87d5e494fe74b17d096db3e5aca57361c"
[[projects]]
digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde"
diff --git a/vendor/github.com/knative/test-infra/scripts/README.md b/vendor/github.com/knative/test-infra/scripts/README.md
index b33543080e9..06cff45e003 100644
--- a/vendor/github.com/knative/test-infra/scripts/README.md
+++ b/vendor/github.com/knative/test-infra/scripts/README.md
@@ -155,8 +155,9 @@ This is a helper script for Knative E2E test scripts. To use it:
1. Write logic for the end-to-end tests. Run all go tests using `go_test_e2e()`
(or `report_go_test()` if you need a more fine-grained control) and call
`fail_test()` or `success()` if any of them failed. The environment variable
- `KO_DOCKER_REPO` will be set according to the test cluster. You can also use
- the following boolean (0 is false, 1 is true) environment variables for the logic:
+ `KO_DOCKER_REPO` and `E2E_PROJECT_ID` will be set according to the test cluster.
+ You can also use the following boolean (0 is false, 1 is true) environment
+ variables for the logic:
- `EMIT_METRICS`: true if `--emit-metrics` was passed.
diff --git a/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh b/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
index e0c949cc43d..868b36d7eb9 100755
--- a/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
+++ b/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
@@ -67,6 +67,8 @@ IS_BOSKOS=0
# Tear down the test resources.
function teardown_test_resources() {
+ # On boskos, save time and don't teardown as the cluster will be destroyed anyway.
+ (( IS_BOSKOS )) && return
header "Tearing down test environment"
function_exists test_teardown && test_teardown
(( ! SKIP_KNATIVE_SETUP )) && function_exists knative_teardown && knative_teardown
@@ -113,8 +115,7 @@ function save_metadata() {
geo_key="Zone"
geo_value="${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}"
fi
- local gcloud_project="$(gcloud config get-value project)"
- local cluster_version="$(gcloud container clusters list --project=${gcloud_project} --format='value(currentMasterVersion)')"
+ local cluster_version="$(gcloud container clusters list --project=${E2E_PROJECT_ID} --format='value(currentMasterVersion)')"
cat << EOF > ${ARTIFACTS}/metadata.json
{
"E2E:${geo_key}": "${geo_value}",
@@ -130,6 +131,7 @@ EOF
# See https://github.com/knative/serving/issues/959 for details.
# TODO(adrcunha): Remove once the leak issue is resolved.
function delete_leaked_network_resources() {
+ # On boskos, don't bother with leaks as the janitor will delete everything in the project.
(( IS_BOSKOS )) && return
# Ensure we're using the GCP project used by kubetest
local gcloud_project="$(gcloud config get-value project)"
@@ -269,6 +271,11 @@ function setup_test_cluster() {
header "Setting up test cluster"
+ # Set the actual project the test cluster resides in
+ # It will be a project assigned by Boskos if test is running on Prow,
+ # otherwise will be ${GCP_PROJECT} set up by user.
+ readonly export E2E_PROJECT_ID="$(gcloud config get-value project)"
+
# Save some metadata about cluster creation for using in prow and testgrid
save_metadata
@@ -280,9 +287,10 @@ function setup_test_cluster() {
if [[ -z "$(kubectl get clusterrolebinding cluster-admin-binding 2> /dev/null)" ]]; then
acquire_cluster_admin_role ${k8s_user} ${E2E_CLUSTER_NAME} ${E2E_CLUSTER_REGION} ${E2E_CLUSTER_ZONE}
kubectl config set-context ${k8s_cluster} --namespace=default
- export KO_DOCKER_REPO=gcr.io/$(gcloud config get-value project)/${E2E_BASE_NAME}-e2e-img
+ export KO_DOCKER_REPO=gcr.io/${E2E_PROJECT_ID}/${E2E_BASE_NAME}-e2e-img
fi
+ echo "- Project is ${E2E_PROJECT_ID}"
echo "- Cluster is ${k8s_cluster}"
echo "- User is ${k8s_user}"
echo "- Docker is ${KO_DOCKER_REPO}"
@@ -303,7 +311,7 @@ function setup_test_cluster() {
fi
}
-# Gets the exit of of the test script.
+# Gets the exit of the test script.
# For more details, see set_test_return_code().
function get_test_return_code() {
echo $(cat ${TEST_RESULT_FILE})
diff --git a/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh b/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh
index b10a8515cfc..3f15cf3abaf 100755
--- a/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh
+++ b/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh
@@ -43,7 +43,7 @@ IS_DOCUMENTATION_PR=0
# Returns true if PR only contains the given file regexes.
# Parameters: $1 - file regexes, space separated.
function pr_only_contains() {
- [[ -z "$(echo "${CHANGED_FILES}" | grep -v \(${1// /\\|}\)$))" ]]
+ [[ -z "$(echo "${CHANGED_FILES}" | grep -v "\(${1// /\\|}\)$")" ]]
}
# List changed files in the current PR.
From 739c89ba751017e93805d936d7378216d94665e0 Mon Sep 17 00:00:00 2001
From: Matthias Wessendorf
Date: Thu, 25 Apr 2019 11:21:29 +0200
Subject: [PATCH 54/76] Kafka dispatcher no need for a statefulset (#972)
* No need for a statefulset
* Feedback from @syedriko
---
contrib/kafka/config/README.md | 2 +-
contrib/kafka/config/kafka.yaml | 3 +--
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/contrib/kafka/config/README.md b/contrib/kafka/config/README.md
index 62963062226..2664fb684ee 100644
--- a/contrib/kafka/config/README.md
+++ b/contrib/kafka/config/README.md
@@ -83,7 +83,7 @@ kubectl get configmap -n knative-eventing kafka-channel-controller-config
The Channel Dispatcher receives and distributes all events:
```shell
-kubectl get statefulset -n knative-eventing kafka-channel-dispatcher
+kubectl get deployment -n knative-eventing kafka-channel-dispatcher
```
The Channel Dispatcher Config Map is used to send information about Channels and
diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml
index 31506c804de..49882a5e158 100644
--- a/contrib/kafka/config/kafka.yaml
+++ b/contrib/kafka/config/kafka.yaml
@@ -187,7 +187,7 @@ roleRef:
---
apiVersion: apps/v1
-kind: StatefulSet
+kind: Deployment
metadata:
name: kafka-channel-dispatcher
namespace: knative-eventing
@@ -197,7 +197,6 @@ spec:
matchLabels: &labels
clusterChannelProvisioner: kafka
role: dispatcher
- serviceName: kafka-channel-dispatcher-service
template:
metadata:
annotations:
From 47e3de90f79d7585b8ead65849253b24a7e52661 Mon Sep 17 00:00:00 2001
From: Scott Nichols <32305648+n3wscott@users.noreply.github.com>
Date: Thu, 25 Apr 2019 08:45:32 -0700
Subject: [PATCH 55/76] API + Reconciler for CronJobSource. (#1092)
* Move ContainerSource API.
* stage this.
* Move ContainerSource API.
* ported rec.
* stash.
* working cron job source.
* add rbac
* update deps.
* update codegen.
* add deepcopy.
* clean copyright.
* updating to have a source controller.
* fix yaml.
* use sources.
* 2019
* use the real word, not core.
* source -> sources.
* fix yaml.
---
Gopkg.lock | 21 +-
cmd/controller/main.go | 23 +-
cmd/cronjob_receive_adapter/main.go | 74 ++++
cmd/sources-controller/kodata/HEAD | 1 +
cmd/sources-controller/kodata/LICENSE | 1 +
cmd/sources-controller/kodata/VENDOR-LICENSE | 1 +
cmd/sources-controller/main.go | 163 ++++++++
config/200-controller-clusterrole.yaml | 47 +--
config/300-cronjobsource.yaml | 84 ++++
config/400-source-controller-service.yaml | 30 ++
config/500-controller.yaml | 3 +
config/500-sources-controller.yaml | 68 ++++
hack/update-codegen.sh | 2 +-
pkg/adapter/cronjobevents/adapter.go | 114 ++++++
pkg/adapter/cronjobevents/adapter_test.go | 208 ++++++++++
pkg/apis/duck/v1alpha1/doc.go | 5 +-
pkg/apis/duck/v1alpha1/subscribable_types.go | 28 +-
.../duck/v1alpha1/subscribable_types_test.go | 28 +-
pkg/apis/eventing/v1alpha1/doc.go | 5 +-
pkg/apis/sources/register.go | 21 +
.../sources/v1alpha1/cron_job_lifecycle.go | 96 +++++
.../v1alpha1/cron_job_lifecycle_test.go | 376 +++++++++++++++++
pkg/apis/sources/v1alpha1/cron_job_types.go | 96 +++++
pkg/apis/sources/v1alpha1/doc.go | 20 +
pkg/apis/sources/v1alpha1/register.go | 53 +++
pkg/apis/sources/v1alpha1/register_test.go | 71 ++++
.../sources/v1alpha1/zz_generated.deepcopy.go | 125 ++++++
pkg/client/clientset/versioned/clientset.go | 22 +
.../versioned/fake/clientset_generated.go | 12 +
.../clientset/versioned/fake/register.go | 2 +
.../clientset/versioned/scheme/register.go | 2 +
.../typed/sources/v1alpha1/cronjobsource.go | 174 ++++++++
.../versioned/typed/sources/v1alpha1/doc.go | 20 +
.../typed/sources/v1alpha1/fake/doc.go | 20 +
.../v1alpha1/fake/fake_cronjobsource.go | 140 +++++++
.../v1alpha1/fake/fake_sources_client.go | 40 ++
.../sources/v1alpha1/generated_expansion.go | 21 +
.../typed/sources/v1alpha1/sources_client.go | 90 +++++
.../informers/externalversions/factory.go | 6 +
.../informers/externalversions/generic.go | 5 +
.../externalversions/sources/interface.go | 46 +++
.../sources/v1alpha1/cronjobsource.go | 89 ++++
.../sources/v1alpha1/interface.go | 45 +++
.../listers/sources/v1alpha1/cronjobsource.go | 94 +++++
.../sources/v1alpha1/expansion_generated.go | 27 ++
pkg/duck/sinks.go | 66 +++
pkg/duck/sinks_test.go | 210 ++++++++++
pkg/{utils/resolve => duck}/subscriber.go | 2 +-
.../resolve => duck}/subscriber_test.go | 6 +-
pkg/kncloudevents/good_client.go | 30 ++
pkg/logconfig/config.go | 3 +
pkg/reconciler/cronjobsource/cronjobsource.go | 287 +++++++++++++
.../cronjobsource/cronjobsource_test.go | 307 ++++++++++++++
pkg/reconciler/cronjobsource/doc.go | 18 +
.../cronjobsource/resources/labels.go | 30 ++
.../resources/receive_adapter.go | 90 +++++
.../resources/receive_adapter_test.go | 119 ++++++
pkg/reconciler/subscription/subscription.go | 28 +-
pkg/reconciler/testing/cronjobsource.go | 81 ++++
pkg/reconciler/testing/listers.go | 6 +
pkg/reconciler/trigger/trigger.go | 4 +-
third_party/VENDOR-LICENSE | 27 ++
vendor/github.com/robfig/cron/LICENSE | 21 +
.../github.com/robfig/cron/constantdelay.go | 27 ++
vendor/github.com/robfig/cron/cron.go | 259 ++++++++++++
vendor/github.com/robfig/cron/doc.go | 129 ++++++
vendor/github.com/robfig/cron/parser.go | 380 ++++++++++++++++++
vendor/github.com/robfig/cron/spec.go | 158 ++++++++
.../golang.org/x/oauth2/google/appengine.go | 89 +---
.../x/oauth2/google/appengine_gen1.go | 77 ++++
.../x/oauth2/google/appengine_gen2_flex.go | 27 ++
.../x/oauth2/google/appengine_hook.go | 14 -
.../x/oauth2/google/appengineflex_hook.go | 11 -
vendor/golang.org/x/oauth2/google/default.go | 57 ++-
.../x/oauth2/google/{doc_go19.go => doc.go} | 2 -
.../x/oauth2/google/doc_not_go19.go | 43 --
vendor/golang.org/x/oauth2/google/go19.go | 57 ---
vendor/golang.org/x/oauth2/google/google.go | 24 +-
vendor/golang.org/x/oauth2/google/not_go19.go | 54 ---
vendor/golang.org/x/oauth2/google/sdk.go | 2 +-
vendor/golang.org/x/oauth2/internal/oauth2.go | 2 +-
vendor/golang.org/x/oauth2/internal/token.go | 220 +++++-----
.../golang.org/x/oauth2/internal/transport.go | 3 +-
vendor/golang.org/x/oauth2/jwt/jwt.go | 10 +-
vendor/golang.org/x/oauth2/oauth2.go | 66 ++-
vendor/golang.org/x/oauth2/token.go | 9 +-
vendor/golang.org/x/oauth2/transport.go | 16 +-
87 files changed, 5200 insertions(+), 490 deletions(-)
create mode 100644 cmd/cronjob_receive_adapter/main.go
create mode 120000 cmd/sources-controller/kodata/HEAD
create mode 120000 cmd/sources-controller/kodata/LICENSE
create mode 120000 cmd/sources-controller/kodata/VENDOR-LICENSE
create mode 100644 cmd/sources-controller/main.go
create mode 100644 config/300-cronjobsource.yaml
create mode 100644 config/400-source-controller-service.yaml
create mode 100644 config/500-sources-controller.yaml
create mode 100644 pkg/adapter/cronjobevents/adapter.go
create mode 100644 pkg/adapter/cronjobevents/adapter_test.go
create mode 100644 pkg/apis/sources/register.go
create mode 100644 pkg/apis/sources/v1alpha1/cron_job_lifecycle.go
create mode 100644 pkg/apis/sources/v1alpha1/cron_job_lifecycle_test.go
create mode 100644 pkg/apis/sources/v1alpha1/cron_job_types.go
create mode 100644 pkg/apis/sources/v1alpha1/doc.go
create mode 100644 pkg/apis/sources/v1alpha1/register.go
create mode 100644 pkg/apis/sources/v1alpha1/register_test.go
create mode 100644 pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/cronjobsource.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/doc.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/doc.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_cronjobsource.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go
create mode 100644 pkg/client/informers/externalversions/sources/interface.go
create mode 100644 pkg/client/informers/externalversions/sources/v1alpha1/cronjobsource.go
create mode 100644 pkg/client/informers/externalversions/sources/v1alpha1/interface.go
create mode 100644 pkg/client/listers/sources/v1alpha1/cronjobsource.go
create mode 100644 pkg/client/listers/sources/v1alpha1/expansion_generated.go
create mode 100644 pkg/duck/sinks.go
create mode 100644 pkg/duck/sinks_test.go
rename pkg/{utils/resolve => duck}/subscriber.go (99%)
rename pkg/{utils/resolve => duck}/subscriber_test.go (99%)
create mode 100644 pkg/kncloudevents/good_client.go
create mode 100644 pkg/reconciler/cronjobsource/cronjobsource.go
create mode 100644 pkg/reconciler/cronjobsource/cronjobsource_test.go
create mode 100644 pkg/reconciler/cronjobsource/doc.go
create mode 100644 pkg/reconciler/cronjobsource/resources/labels.go
create mode 100644 pkg/reconciler/cronjobsource/resources/receive_adapter.go
create mode 100644 pkg/reconciler/cronjobsource/resources/receive_adapter_test.go
create mode 100644 pkg/reconciler/testing/cronjobsource.go
create mode 100644 vendor/github.com/robfig/cron/LICENSE
create mode 100644 vendor/github.com/robfig/cron/constantdelay.go
create mode 100644 vendor/github.com/robfig/cron/cron.go
create mode 100644 vendor/github.com/robfig/cron/doc.go
create mode 100644 vendor/github.com/robfig/cron/parser.go
create mode 100644 vendor/github.com/robfig/cron/spec.go
create mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen1.go
create mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
delete mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go
delete mode 100644 vendor/golang.org/x/oauth2/google/appengineflex_hook.go
rename vendor/golang.org/x/oauth2/google/{doc_go19.go => doc.go} (99%)
delete mode 100644 vendor/golang.org/x/oauth2/google/doc_not_go19.go
delete mode 100644 vendor/golang.org/x/oauth2/google/go19.go
delete mode 100644 vendor/golang.org/x/oauth2/google/not_go19.go
diff --git a/Gopkg.lock b/Gopkg.lock
index ed867edd90a..099d11bd55e 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -676,6 +676,14 @@
pruneopts = "NUT"
revision = "3113b8401b8a98917cde58f8bbd42a1b1c03b1fd"
+[[projects]]
+ digest = "1:53c3320ee307f01fd24a88e396a8d2239cd8346d1a085320209319f2d33f59cc"
+ name = "github.com/robfig/cron"
+ packages = ["."]
+ pruneopts = "NUT"
+ revision = "b41be1df696709bb6395fe435af20370037c0b4c"
+ version = "v1.1"
+
[[projects]]
digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04"
name = "github.com/sergi/go-diff"
@@ -780,7 +788,8 @@
revision = "1e491301e022f8f977054da4c2d852decd59571f"
[[projects]]
- digest = "1:46bd4e66bfce5e77f08fc2e8dcacc3676e679241ce83d9c150ff0397d686dd44"
+ branch = "master"
+ digest = "1:3121d742fbe48670a16d98b6da4693501fc33cd76d69ed6f35850c564f255c65"
name = "golang.org/x/oauth2"
packages = [
".",
@@ -790,7 +799,7 @@
"jwt",
]
pruneopts = "NUT"
- revision = "cdc340f7c179dbbfa4afd43b7614e8fcadde4269"
+ revision = "9f3314589c9a9136388751d9adae6b0ed400978a"
[[projects]]
branch = "master"
@@ -1358,7 +1367,10 @@
"github.com/Shopify/sarama",
"github.com/bsm/sarama-cluster",
"github.com/cloudevents/sdk-go",
+ "github.com/cloudevents/sdk-go/pkg/cloudevents",
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/client",
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http",
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/types",
"github.com/fsnotify/fsnotify",
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
@@ -1371,6 +1383,7 @@
"github.com/knative/pkg/client/listers/istio/v1alpha3",
"github.com/knative/pkg/configmap",
"github.com/knative/pkg/controller",
+ "github.com/knative/pkg/kmeta",
"github.com/knative/pkg/kmp",
"github.com/knative/pkg/logging",
"github.com/knative/pkg/logging/logkey",
@@ -1388,6 +1401,7 @@
"github.com/nats-io/go-nats-streaming",
"github.com/nats-io/nats-streaming-server/server",
"github.com/prometheus/client_golang/prometheus/promhttp",
+ "github.com/robfig/cron",
"go.opencensus.io/exporter/prometheus",
"go.opencensus.io/stats",
"go.opencensus.io/stats/view",
@@ -1397,6 +1411,7 @@
"go.uber.org/zap",
"go.uber.org/zap/zapcore",
"go.uber.org/zap/zaptest/observer",
+ "golang.org/x/net/context",
"golang.org/x/oauth2/google",
"google.golang.org/api/option",
"gopkg.in/yaml.v2",
@@ -1426,6 +1441,7 @@
"k8s.io/client-go/dynamic",
"k8s.io/client-go/dynamic/fake",
"k8s.io/client-go/informers",
+ "k8s.io/client-go/informers/apps/v1",
"k8s.io/client-go/informers/core/v1",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/kubernetes/fake",
@@ -1437,6 +1453,7 @@
"k8s.io/client-go/rest",
"k8s.io/client-go/testing",
"k8s.io/client-go/tools/cache",
+ "k8s.io/client-go/tools/clientcmd",
"k8s.io/client-go/tools/record",
"k8s.io/client-go/util/flowcontrol",
"k8s.io/client-go/util/workqueue",
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index f1993c336a0..34aee80848a 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -104,12 +104,15 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(opt.KubeClientSet, opt.ResyncPeriod)
eventingInformerFactory := informers.NewSharedInformerFactory(opt.EventingClientSet, opt.ResyncPeriod)
+ // Eventing
triggerInformer := eventingInformerFactory.Eventing().V1alpha1().Triggers()
channelInformer := eventingInformerFactory.Eventing().V1alpha1().Channels()
subscriptionInformer := eventingInformerFactory.Eventing().V1alpha1().Subscriptions()
brokerInformer := eventingInformerFactory.Eventing().V1alpha1().Brokers()
- coreServiceInformer := kubeInformerFactory.Core().V1().Services()
- coreNamespaceInformer := kubeInformerFactory.Core().V1().Namespaces()
+
+ // Kube
+ serviceInformer := kubeInformerFactory.Core().V1().Services()
+ namespaceInformer := kubeInformerFactory.Core().V1().Namespaces()
configMapInformer := kubeInformerFactory.Core().V1().ConfigMaps()
// Build all of our controllers, with the clients constructed above.
@@ -122,7 +125,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
),
namespace.NewController(
opt,
- coreNamespaceInformer,
+ namespaceInformer,
),
channel.NewController(
opt,
@@ -134,7 +137,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
channelInformer,
subscriptionInformer,
brokerInformer,
- coreServiceInformer,
+ serviceInformer,
),
}
if len(controllers) != numControllers {
@@ -153,13 +156,15 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
logger.Info("Starting informers.")
if err := kncontroller.StartInformers(
stopCh,
+ // Eventing
+ brokerInformer.Informer(),
+ channelInformer.Informer(),
subscriptionInformer.Informer(),
- configMapInformer.Informer(),
- coreNamespaceInformer.Informer(),
triggerInformer.Informer(),
- channelInformer.Informer(),
- brokerInformer.Informer(),
- coreServiceInformer.Informer(),
+ // Kube
+ configMapInformer.Informer(),
+ serviceInformer.Informer(),
+ namespaceInformer.Informer(),
); err != nil {
logger.Fatalf("Failed to start informers: %v", err)
}
diff --git a/cmd/cronjob_receive_adapter/main.go b/cmd/cronjob_receive_adapter/main.go
new file mode 100644
index 00000000000..965ad2af176
--- /dev/null
+++ b/cmd/cronjob_receive_adapter/main.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "flag"
+ "log"
+ "os"
+
+ "github.com/knative/eventing/pkg/adapter/cronjobevents"
+ "github.com/knative/pkg/signals"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "golang.org/x/net/context"
+)
+
+const (
+ // Environment variable container schedule.
+ envSchedule = "SCHEDULE"
+
+ // Environment variable containing data.
+ envData = "DATA"
+
+ // Sink for messages.
+ envSinkURI = "SINK_URI"
+)
+
+func getRequiredEnv(envKey string) string {
+ val, defined := os.LookupEnv(envKey)
+ if !defined {
+ log.Fatalf("required environment variable not defined %q", envKey)
+ }
+ return val
+}
+
+func main() {
+ flag.Parse()
+
+ ctx := context.Background()
+ logCfg := zap.NewProductionConfig()
+ logCfg.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ logger, err := logCfg.Build()
+ if err != nil {
+ log.Fatalf("Unable to create logger: %v", err)
+ }
+
+ adapter := &cronjobevents.Adapter{
+ Schedule: getRequiredEnv(envSchedule),
+ Data: getRequiredEnv(envData),
+ SinkURI: getRequiredEnv(envSinkURI),
+ }
+
+ logger.Info("Starting Receive Adapter", zap.Reflect("adapter", adapter))
+
+ stopCh := signals.SetupSignalHandler()
+
+ if err := adapter.Start(ctx, stopCh); err != nil {
+ logger.Fatal("Failed to start adapter", zap.Error(err))
+ }
+}
diff --git a/cmd/sources-controller/kodata/HEAD b/cmd/sources-controller/kodata/HEAD
new file mode 120000
index 00000000000..8f63681d362
--- /dev/null
+++ b/cmd/sources-controller/kodata/HEAD
@@ -0,0 +1 @@
+../../../.git/HEAD
\ No newline at end of file
diff --git a/cmd/sources-controller/kodata/LICENSE b/cmd/sources-controller/kodata/LICENSE
new file mode 120000
index 00000000000..5853aaea53b
--- /dev/null
+++ b/cmd/sources-controller/kodata/LICENSE
@@ -0,0 +1 @@
+../../../LICENSE
\ No newline at end of file
diff --git a/cmd/sources-controller/kodata/VENDOR-LICENSE b/cmd/sources-controller/kodata/VENDOR-LICENSE
new file mode 120000
index 00000000000..3cc89764519
--- /dev/null
+++ b/cmd/sources-controller/kodata/VENDOR-LICENSE
@@ -0,0 +1 @@
+../../../third_party/VENDOR-LICENSE
\ No newline at end of file
diff --git a/cmd/sources-controller/main.go b/cmd/sources-controller/main.go
new file mode 100644
index 00000000000..6abe075e348
--- /dev/null
+++ b/cmd/sources-controller/main.go
@@ -0,0 +1,163 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "flag"
+ "k8s.io/client-go/tools/clientcmd"
+ "log"
+
+ // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
+ _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+
+ kubeinformers "k8s.io/client-go/informers"
+ "k8s.io/client-go/rest"
+
+ informers "github.com/knative/eventing/pkg/client/informers/externalversions"
+ "github.com/knative/eventing/pkg/logconfig"
+ "github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/cronjobsource"
+ "github.com/knative/pkg/configmap"
+ kncontroller "github.com/knative/pkg/controller"
+ "github.com/knative/pkg/logging/logkey"
+ "github.com/knative/pkg/signals"
+ "go.uber.org/zap"
+)
+
+var (
+ hardcodedLoggingConfig = flag.Bool("hardCodedLoggingConfig", false, "If true, use the hard coded logging config. It is intended to be used only when debugging outside a Kubernetes cluster.")
+ masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
+ kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
+)
+
+func main() {
+ flag.Parse()
+
+ logger, atomicLevel := setupLogger()
+ defer logger.Sync()
+ logger = logger.With(zap.String(logkey.ControllerType, logconfig.SourcesController))
+
+ // set up signals so we handle the first shutdown signal gracefully
+ stopCh := signals.SetupSignalHandler()
+
+ cfg, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig)
+ if err != nil {
+ logger.Fatalw("Error building kubeconfig", zap.Error(err))
+ }
+
+ logger = logger.With(zap.String("controller/impl", "pkg"))
+ logger.Info("Starting the controller")
+
+ const numControllers = 1
+ cfg.QPS = numControllers * rest.DefaultQPS
+ cfg.Burst = numControllers * rest.DefaultBurst
+ opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
+
+ kubeInformerFactory := kubeinformers.NewSharedInformerFactory(opt.KubeClientSet, opt.ResyncPeriod)
+ eventingInformerFactory := informers.NewSharedInformerFactory(opt.EventingClientSet, opt.ResyncPeriod)
+
+ // Eventing
+ cronjobsourceInformer := eventingInformerFactory.Sources().V1alpha1().CronJobSources()
+
+ // Kube
+ deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
+
+ // Build all of our controllers, with the clients constructed above.
+ // Add new controllers to this array.
+ // You also need to modify numControllers above to match this.
+ controllers := []*kncontroller.Impl{
+ cronjobsource.NewController(
+ opt,
+ cronjobsourceInformer,
+ deploymentInformer,
+ ),
+ }
+ if len(controllers) != numControllers {
+ logger.Fatalf("Number of controllers and QPS settings mismatch: %d != %d", len(controllers), numControllers)
+ }
+
+ // Watch the logging config map and dynamically update logging levels.
+ opt.ConfigMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.SourcesController))
+ // TODO: Watch the observability config map and dynamically update metrics exporter.
+ //opt.ConfigMapWatcher.Watch(metrics.ObservabilityConfigName, metrics.UpdateExporterFromConfigMap(component, logger))
+ if err := opt.ConfigMapWatcher.Start(stopCh); err != nil {
+ logger.Fatalw("failed to start configuration manager", zap.Error(err))
+ }
+
+ // Start all of the informers and wait for them to sync.
+ logger.Info("Starting informers.")
+ if err := kncontroller.StartInformers(
+ stopCh,
+ // Eventing
+ cronjobsourceInformer.Informer(),
+ // Kube
+ deploymentInformer.Informer(),
+ ); err != nil {
+ logger.Fatalf("Failed to start informers: %v", err)
+ }
+
+ // Start all of the controllers.
+ logger.Info("Starting controllers.")
+ go kncontroller.StartAll(stopCh, controllers...)
+
+ <-stopCh
+}
+
+func setupLogger() (*zap.SugaredLogger, zap.AtomicLevel) {
+ // Set up our logger.
+ loggingConfigMap := getLoggingConfigOrDie()
+ loggingConfig, err := logging.NewConfigFromMap(loggingConfigMap)
+ if err != nil {
+ log.Fatalf("Error parsing logging configuration: %v", err)
+ }
+ return logging.NewLoggerFromConfig(loggingConfig, logconfig.Controller)
+}
+
+func getLoggingConfigOrDie() map[string]string {
+ if hardcodedLoggingConfig != nil && *hardcodedLoggingConfig {
+ return map[string]string{
+ "loglevel.controller": "info",
+ "zap-logger-config": `
+ {
+ "level": "info",
+ "development": false,
+ "outputPaths": ["stdout"],
+ "errorOutputPaths": ["stderr"],
+ "encoding": "json",
+ "encoderConfig": {
+ "timeKey": "ts",
+ "levelKey": "level",
+ "nameKey": "logger",
+ "callerKey": "caller",
+ "messageKey": "msg",
+ "stacktraceKey": "stacktrace",
+ "lineEnding": "",
+ "levelEncoder": "",
+ "timeEncoder": "iso8601",
+ "durationEncoder": "",
+ "callerEncoder": ""
+ }`,
+ }
+ } else {
+ cm, err := configmap.Load("/etc/config-logging")
+ if err != nil {
+ log.Fatalf("Error loading logging configuration: %v", err)
+ }
+ return cm
+ }
+}
diff --git a/config/200-controller-clusterrole.yaml b/config/200-controller-clusterrole.yaml
index 7b0d29bca4c..44a8d3f1408 100644
--- a/config/200-controller-clusterrole.yaml
+++ b/config/200-controller-clusterrole.yaml
@@ -26,7 +26,7 @@ rules:
- "services"
- "events"
- "serviceaccounts"
- verbs:
+ verbs: &everything
- "get"
- "list"
- "create"
@@ -40,42 +40,21 @@ rules:
- "networking.istio.io"
resources:
- "virtualservices"
- verbs:
- - "get"
- - "list"
- - "create"
- - "update"
- - "delete"
- - "patch"
- - "watch"
+ verbs: *everything
# Brokers and the namespace annotation controllers manipulate Deployments.
- apiGroups:
- "apps"
resources:
- "deployments"
- verbs:
- - "get"
- - "list"
- - "create"
- - "update"
- - "delete"
- - "patch"
- - "watch"
+ verbs: *everything
# The namespace annotation controller needs to manipulate RoleBindings.
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- "rolebindings"
- verbs:
- - "get"
- - "list"
- - "create"
- - "update"
- - "delete"
- - "patch"
- - "watch"
+ verbs: *everything
# Our own resources and statuses we care about.
- apiGroups:
@@ -91,11 +70,13 @@ rules:
- "subscriptions/status"
- "triggers"
- "triggers/status"
- verbs:
- - "get"
- - "list"
- - "create"
- - "update"
- - "delete"
- - "patch"
- - "watch"
+ verbs: *everything
+
+ # Source resources and statuses we care about.
+ - apiGroups:
+ - "sources.eventing.knative.dev"
+ resources:
+ - "cronjobsources"
+ - "cronjobsources/status"
+ - "cronjobsources/finalizers"
+ verbs: *everything
diff --git a/config/300-cronjobsource.yaml b/config/300-cronjobsource.yaml
new file mode 100644
index 00000000000..50974d4718e
--- /dev/null
+++ b/config/300-cronjobsource.yaml
@@ -0,0 +1,84 @@
+# Copyright 2019 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ labels:
+ eventing.knative.dev/source: "true"
+ knative.dev/crd-install: "true"
+ name: cronjobsources.sources.eventing.knative.dev
+spec:
+ group: sources.eventing.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - eventing
+ - sources
+ kind: CronJobSource
+ plural: cronjobsources
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ data:
+ type: string
+ schedule:
+ type: string
+ serviceAccountName:
+ type: string
+ sink:
+ type: object
+ required:
+ - schedule
+ type: object
+ status:
+ properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ # we use a string in the stored object but a wrapper object
+ # at runtime.
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ severity:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ - status
+ type: object
+ type: array
+ sinkUri:
+ type: string
+ type: object
+ version: v1alpha1
diff --git a/config/400-source-controller-service.yaml b/config/400-source-controller-service.yaml
new file mode 100644
index 00000000000..3b37422a88b
--- /dev/null
+++ b/config/400-source-controller-service.yaml
@@ -0,0 +1,30 @@
+# Copyright 2019 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: sources-controller
+ serving.knative.dev/release: devel
+ name: sources-controller
+ namespace: knative-eventing
+spec:
+ ports:
+ - name: metrics
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: sources-controller
\ No newline at end of file
diff --git a/config/500-controller.yaml b/config/500-controller.yaml
index f1466e1a55a..2cb7d815c06 100644
--- a/config/500-controller.yaml
+++ b/config/500-controller.yaml
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -49,6 +50,8 @@ spec:
value: github.com/knative/eventing/cmd/broker/filter
- name: BROKER_FILTER_SERVICE_ACCOUNT
value: eventing-broker-filter
+ - name: CRONJOB_RA_IMAGE
+ value: github.com/knative/eventing/cmd/cronjob_receive_adapter
ports:
- containerPort: 9090
name: metrics
diff --git a/config/500-sources-controller.yaml b/config/500-sources-controller.yaml
new file mode 100644
index 00000000000..6f77f264418
--- /dev/null
+++ b/config/500-sources-controller.yaml
@@ -0,0 +1,68 @@
+# Copyright 2019 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: sources-controller
+ namespace: knative-eventing
+ labels:
+ eventing.knative.dev/release: devel
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: sources-controller
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ labels:
+ app: sources-controller
+ eventing.knative.dev/release: devel
+ spec:
+ serviceAccountName: eventing-controller
+ containers:
+ - name: controller
+ # This is the Go import path for the binary that is containerized
+ # and substituted here.
+ image: github.com/knative/eventing/cmd/sources-controller
+ resources:
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ limits:
+ cpu: 1000m
+ memory: 1000Mi
+ ports:
+ - name: metrics
+ containerPort: 9090
+ volumeMounts:
+ - name: config-logging
+ mountPath: /etc/config-logging
+ env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ - name: CRONJOB_RA_IMAGE
+ # This is the Go import path for cron job receive adapter binary
+ # that is containerized and substituted here.
+ value: github.com/knative/eventing/cmd/cronjob_receive_adapter
+ volumes:
+ - name: config-logging
+ configMap:
+ name: config-logging
diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh
index b651c1fc19a..dea5391fec7 100755
--- a/hack/update-codegen.sh
+++ b/hack/update-codegen.sh
@@ -28,7 +28,7 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${REPO_ROOT_DIR}; ls -d -1 ./vendor/k8s.io/code-
# instead of the $GOPATH directly. For normal projects this can be dropped.
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
github.com/knative/eventing/pkg/client github.com/knative/eventing/pkg/apis \
- "eventing:v1alpha1" \
+ "eventing:v1alpha1 sources:v1alpha1" \
--go-header-file ${REPO_ROOT_DIR}/hack/boilerplate/boilerplate.go.txt
# Only deepcopy the Duck types, as they are not real resources.
diff --git a/pkg/adapter/cronjobevents/adapter.go b/pkg/adapter/cronjobevents/adapter.go
new file mode 100644
index 00000000000..ee3146e3472
--- /dev/null
+++ b/pkg/adapter/cronjobevents/adapter.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cronjobevents
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/cloudevents/sdk-go/pkg/cloudevents"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
+ "github.com/knative/eventing/pkg/kncloudevents"
+ "github.com/knative/pkg/logging"
+ "github.com/robfig/cron"
+ "go.uber.org/zap"
+)
+
+const (
+ eventType = "dev.knative.cronjob.event"
+)
+
+// TODO: this should be a k8s cron.
+
+// Adapter implements the Cron Job adapter to trigger a Sink.
+type Adapter struct {
+ // Schedule is a cron format string such as 0 * * * * or @hourly
+ Schedule string
+
+ // Data is the data to be posted to the target.
+ Data string
+
+ // SinkURI is the URI messages will be forwarded on to.
+ SinkURI string
+
+ // client sends cloudevents.
+ client client.Client
+}
+
+// Initialize cloudevent client
+func (a *Adapter) initClient() error {
+ if a.client == nil {
+ var err error
+ if a.client, err = kncloudevents.NewDefaultClient(a.SinkURI); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (a *Adapter) Start(ctx context.Context, stopCh <-chan struct{}) error {
+ logger := logging.FromContext(ctx)
+
+ sched, err := cron.ParseStandard(a.Schedule)
+ if err != nil {
+ logger.Error("Unparseable schedule: ", a.Schedule, zap.Error(err))
+ return err
+ }
+
+ if err = a.initClient(); err != nil {
+ logger.Error("Failed to create cloudevent client", zap.Error(err))
+ return err
+ }
+
+ c := cron.New()
+ c.Schedule(sched, cron.FuncJob(a.cronTick))
+ c.Start()
+ <-stopCh
+ c.Stop()
+ logger.Info("Shutting down.")
+ return nil
+}
+
+func (a *Adapter) cronTick() {
+ logger := logging.FromContext(context.TODO())
+
+ event := cloudevents.Event{
+ Context: cloudevents.EventContextV02{
+ Type: eventType,
+ Source: *types.ParseURLRef("/CronJob"),
+ }.AsV02(),
+ Data: message(a.Data),
+ }
+ if _, err := a.client.Send(context.TODO(), event); err != nil {
+ logger.Error("failed to send cloudevent", err)
+ }
+}
+
+type Message struct {
+ Body string `json:"body"`
+}
+
+func message(body string) interface{} {
+ // try to marshal the body into an interface.
+ var objmap map[string]*json.RawMessage
+ if err := json.Unmarshal([]byte(body), &objmap); err != nil {
+ //default to a wrapped message.
+ return Message{Body: body}
+ }
+ return objmap
+}
diff --git a/pkg/adapter/cronjobevents/adapter_test.go b/pkg/adapter/cronjobevents/adapter_test.go
new file mode 100644
index 00000000000..ae9ca48dd42
--- /dev/null
+++ b/pkg/adapter/cronjobevents/adapter_test.go
@@ -0,0 +1,208 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cronjobevents
+
+import (
+ "context"
+ "encoding/json"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestStart_ServeHTTP(t *testing.T) {
+ testCases := map[string]struct {
+ schedule string
+ sink func(http.ResponseWriter, *http.Request)
+ reqBody string
+ error bool
+ }{
+ "happy": {
+ schedule: "* * * * *", // every minute
+ sink: sinkAccepted,
+ reqBody: `{"body":"data"}`,
+ },
+ "rejected": {
+ schedule: "* * * * *", // every minute
+ sink: sinkRejected,
+ reqBody: `{"body":"data"}`,
+ error: true,
+ },
+ }
+ for n, tc := range testCases {
+ t.Run(n, func(t *testing.T) {
+ h := &fakeHandler{
+ handler: tc.sink,
+ }
+ sinkServer := httptest.NewServer(h)
+ defer sinkServer.Close()
+
+ a := &Adapter{
+ Schedule: tc.schedule,
+ Data: "data",
+ SinkURI: sinkServer.URL,
+ }
+
+ if err := a.initClient(); err != nil {
+ t.Errorf("failed to create cloudevent client, %v", err)
+ }
+
+ stop := make(chan struct{})
+ go func() {
+ if err := a.Start(context.TODO(), stop); err != nil {
+ if tc.error {
+ // skip
+ } else {
+ t.Errorf("failed to start, %v", err)
+ }
+ }
+ }()
+
+ a.cronTick() // force a tick.
+
+ if tc.reqBody != string(h.body) {
+ t.Errorf("expected request body %q, but got %q", tc.reqBody, h.body)
+ }
+ log.Print("test done")
+ })
+ }
+}
+
+func TestStartBadCron(t *testing.T) {
+ schedule := "bad"
+
+ a := &Adapter{
+ Schedule: schedule,
+ }
+
+ stop := make(chan struct{})
+ if err := a.Start(context.TODO(), stop); err == nil {
+
+ t.Errorf("failed to fail, %v", err)
+
+ }
+}
+
+func TestPostMessage_ServeHTTP(t *testing.T) {
+ testCases := map[string]struct {
+ sink func(http.ResponseWriter, *http.Request)
+ reqBody string
+ error bool
+ }{
+ "happy": {
+ sink: sinkAccepted,
+ reqBody: `{"body":"data"}`,
+ },
+ "rejected": {
+ sink: sinkRejected,
+ reqBody: `{"body":"data"}`,
+ error: true,
+ },
+ }
+ for n, tc := range testCases {
+ t.Run(n, func(t *testing.T) {
+ h := &fakeHandler{
+ handler: tc.sink,
+ }
+ sinkServer := httptest.NewServer(h)
+ defer sinkServer.Close()
+
+ a := &Adapter{
+ Data: "data",
+ SinkURI: sinkServer.URL,
+ }
+
+ if err := a.initClient(); err != nil {
+ t.Errorf("failed to create cloudevent client, %v", err)
+ }
+
+ a.cronTick()
+
+ if tc.reqBody != string(h.body) {
+ t.Errorf("expected request body %q, but got %q", tc.reqBody, h.body)
+ }
+ })
+ }
+}
+
+func TestMessage(t *testing.T) {
+ testCases := map[string]struct {
+ body string
+ want string
+ }{
+ "json simple": {
+ body: `{"message": "Hello world!"}`,
+ want: `{"message":"Hello world!"}`,
+ },
+ "json complex": {
+ body: `{"message": "Hello world!","extra":{"a":"sub", "b":[1,2,3]}}`,
+ want: `{"extra":{"a":"sub","b":[1,2,3]},"message":"Hello world!"}`,
+ },
+ "string": {
+ body: "Hello, World!",
+ want: `{"body":"Hello, World!"}`,
+ },
+ }
+ for n, tc := range testCases {
+ t.Run(n, func(t *testing.T) {
+
+ m := message(tc.body)
+
+ j, err := json.Marshal(m)
+ if err != nil {
+ t.Errorf("failed to marshel message: %v", err)
+ }
+
+ got := string(j)
+ if diff := cmp.Diff(tc.want, got); diff != "" {
+ t.Errorf("%s: (-want, +got) = %v", n, diff)
+ }
+ })
+ }
+}
+
+type fakeHandler struct {
+ body []byte
+ ran int
+ handler func(http.ResponseWriter, *http.Request)
+}
+
+func (h *fakeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "can not read body", http.StatusBadRequest)
+ return
+ }
+ h.body = body
+
+ defer r.Body.Close()
+ h.handler(w, r)
+
+ h.ran++
+}
+
+func sinkAccepted(writer http.ResponseWriter, req *http.Request) {
+ writer.WriteHeader(http.StatusOK)
+}
+
+func sinkRejected(writer http.ResponseWriter, _ *http.Request) {
+ writer.WriteHeader(http.StatusRequestTimeout)
+}
diff --git a/pkg/apis/duck/v1alpha1/doc.go b/pkg/apis/duck/v1alpha1/doc.go
index 3e5d1e3bc5a..5019094b1e5 100644
--- a/pkg/apis/duck/v1alpha1/doc.go
+++ b/pkg/apis/duck/v1alpha1/doc.go
@@ -1,9 +1,12 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
+
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/pkg/apis/duck/v1alpha1/subscribable_types.go b/pkg/apis/duck/v1alpha1/subscribable_types.go
index 740a035ead3..407a7912407 100644
--- a/pkg/apis/duck/v1alpha1/subscribable_types.go
+++ b/pkg/apis/duck/v1alpha1/subscribable_types.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018 The Knative Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
package v1alpha1
diff --git a/pkg/apis/duck/v1alpha1/subscribable_types_test.go b/pkg/apis/duck/v1alpha1/subscribable_types_test.go
index ccc6512ab36..5052347a97f 100644
--- a/pkg/apis/duck/v1alpha1/subscribable_types_test.go
+++ b/pkg/apis/duck/v1alpha1/subscribable_types_test.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018 The Knative Authors
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
package v1alpha1
diff --git a/pkg/apis/eventing/v1alpha1/doc.go b/pkg/apis/eventing/v1alpha1/doc.go
index e080fe737d2..1b017951a8a 100644
--- a/pkg/apis/eventing/v1alpha1/doc.go
+++ b/pkg/apis/eventing/v1alpha1/doc.go
@@ -1,9 +1,12 @@
/*
-Copyright 2018 The Knative Authors
+Copyright 2019 The Knative Authors
+
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
+
http://www.apache.org/licenses/LICENSE-2.0
+
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/pkg/apis/sources/register.go b/pkg/apis/sources/register.go
new file mode 100644
index 00000000000..4d2a4477695
--- /dev/null
+++ b/pkg/apis/sources/register.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sources
+
+const (
+ GroupName = "sources.eventing.knative.dev"
+)
diff --git a/pkg/apis/sources/v1alpha1/cron_job_lifecycle.go b/pkg/apis/sources/v1alpha1/cron_job_lifecycle.go
new file mode 100644
index 00000000000..c51d6d4feec
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/cron_job_lifecycle.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+)
+
+const (
+ // CronJobConditionReady has status True when the CronJobSource is ready to send events.
+ CronJobConditionReady = duckv1alpha1.ConditionReady
+
+ // CronJobConditionValidSchedule has status True when the CronJobSource has been configured with a valid schedule.
+ CronJobConditionValidSchedule duckv1alpha1.ConditionType = "ValidSchedule"
+
+ // CronJobConditionSinkProvided has status True when the CronJobSource has been configured with a sink target.
+ CronJobConditionSinkProvided duckv1alpha1.ConditionType = "SinkProvided"
+
+ // CronJobConditionDeployed has status True when the CronJobSource has had it's receive adapter deployment created.
+ CronJobConditionDeployed duckv1alpha1.ConditionType = "Deployed"
+)
+
+var cronJobSourceCondSet = duckv1alpha1.NewLivingConditionSet(
+ CronJobConditionValidSchedule,
+ CronJobConditionSinkProvided,
+ CronJobConditionDeployed)
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (s *CronJobSourceStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha1.Condition {
+ return cronJobSourceCondSet.Manage(s).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (s *CronJobSourceStatus) IsReady() bool {
+ return cronJobSourceCondSet.Manage(s).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *CronJobSourceStatus) InitializeConditions() {
+ cronJobSourceCondSet.Manage(s).InitializeConditions()
+}
+
+// TODO: this is a bad method name, change it.
+// MarkSchedule sets the condition that the source has a valid schedule configured.
+func (s *CronJobSourceStatus) MarkSchedule() {
+ cronJobSourceCondSet.Manage(s).MarkTrue(CronJobConditionValidSchedule)
+}
+
+// MarkInvalidSchedule sets the condition that the source does not have a valid schedule configured.
+func (s *CronJobSourceStatus) MarkInvalidSchedule(reason, messageFormat string, messageA ...interface{}) {
+ cronJobSourceCondSet.Manage(s).MarkFalse(CronJobConditionValidSchedule, reason, messageFormat, messageA...)
+}
+
+// MarkSink sets the condition that the source has a sink configured.
+func (s *CronJobSourceStatus) MarkSink(uri string) {
+ s.SinkURI = uri
+ if len(uri) > 0 {
+ cronJobSourceCondSet.Manage(s).MarkTrue(CronJobConditionSinkProvided)
+ } else {
+ cronJobSourceCondSet.Manage(s).MarkUnknown(CronJobConditionSinkProvided, "SinkEmpty", "Sink has resolved to empty.%s", "")
+ }
+}
+
+// MarkNoSink sets the condition that the source does not have a sink configured.
+func (s *CronJobSourceStatus) MarkNoSink(reason, messageFormat string, messageA ...interface{}) {
+ cronJobSourceCondSet.Manage(s).MarkFalse(CronJobConditionSinkProvided, reason, messageFormat, messageA...)
+}
+
+// MarkDeployed sets the condition that the source has been deployed.
+func (s *CronJobSourceStatus) MarkDeployed() {
+ cronJobSourceCondSet.Manage(s).MarkTrue(CronJobConditionDeployed)
+}
+
+// MarkDeploying sets the condition that the source is deploying.
+func (s *CronJobSourceStatus) MarkDeploying(reason, messageFormat string, messageA ...interface{}) {
+ cronJobSourceCondSet.Manage(s).MarkUnknown(CronJobConditionDeployed, reason, messageFormat, messageA...)
+}
+
+// MarkNotDeployed sets the condition that the source has not been deployed.
+func (s *CronJobSourceStatus) MarkNotDeployed(reason, messageFormat string, messageA ...interface{}) {
+ cronJobSourceCondSet.Manage(s).MarkFalse(CronJobConditionDeployed, reason, messageFormat, messageA...)
+}
diff --git a/pkg/apis/sources/v1alpha1/cron_job_lifecycle_test.go b/pkg/apis/sources/v1alpha1/cron_job_lifecycle_test.go
new file mode 100644
index 00000000000..9dd7aaefb88
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/cron_job_lifecycle_test.go
@@ -0,0 +1,376 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1_test
+
+import (
+ "testing"
+
+ corev1 "k8s.io/api/core/v1"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+)
+
+func TestCronJobSourceStatusIsReady(t *testing.T) {
+ tests := []struct {
+ name string
+ s *v1alpha1.CronJobSourceStatus
+ want bool
+ }{{
+ name: "uninitialized",
+ s: &v1alpha1.CronJobSourceStatus{},
+ want: false,
+ }, {
+ name: "initialized",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkDeployed()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark schedule",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink and deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark schedule and sink",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark schedule, sink and deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: true,
+ }, {
+ name: "mark schedule, sink and deployed then not deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkNotDeployed("Testing", "")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark schedule, sink and not deployed then deploying then deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ s.MarkNotDeployed("MarkNotDeployed", "")
+ s.MarkDeploying("MarkDeploying", "")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: true,
+ }, {
+ name: "mark schedule validated, sink empty and deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark schedule validated, sink empty and deployed then sink",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("")
+ s.MarkDeployed()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ want: true,
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.s.IsReady()
+ if diff := cmp.Diff(test.want, got); diff != "" {
+ t.Errorf("%s: unexpected condition (-want, +got) = %v", test.name, diff)
+ }
+ })
+ }
+}
+
+func TestCronJobSourceStatusGetCondition(t *testing.T) {
+ tests := []struct {
+ name string
+ s *v1alpha1.CronJobSourceStatus
+ condQuery duckv1alpha1.ConditionType
+ want *duckv1alpha1.Condition
+ }{{
+ name: "uninitialized",
+ s: &v1alpha1.CronJobSourceStatus{},
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: nil,
+ }, {
+ name: "initialized",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark sink",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark schedule",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark schedule, sink and deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ }, {
+ name: "mark schedule, sink and deployed then no sink",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkNoSink("Testing", "hi%s", "")
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionFalse,
+ Reason: "Testing",
+ Message: "hi",
+ },
+ }, {
+ name: "mark schedule, sink and deployed then invalid schedule",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkInvalidSchedule("Testing", "hi%s", "")
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionFalse,
+ Reason: "Testing",
+ Message: "hi",
+ },
+ }, {
+ name: "mark schedule, sink and deployed then deploying",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkDeploying("Testing", "hi%s", "")
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionUnknown,
+ Reason: "Testing",
+ Message: "hi",
+ },
+ }, {
+ name: "mark schedule, sink and deployed then not deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkNotDeployed("Testing", "hi%s", "")
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionFalse,
+ Reason: "Testing",
+ Message: "hi",
+ },
+ }, {
+ name: "mark schedule, sink and not deployed then deploying then deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("uri://example")
+ s.MarkNotDeployed("MarkNotDeployed", "%s", "")
+ s.MarkDeploying("MarkDeploying", "%s", "")
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ }, {
+ name: "mark schedule, sink empty and deployed",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("")
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionUnknown,
+ Reason: "SinkEmpty",
+ Message: "Sink has resolved to empty.",
+ },
+ }, {
+ name: "mark schedule, sink empty and deployed then sink",
+ s: func() *v1alpha1.CronJobSourceStatus {
+ s := &v1alpha1.CronJobSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSchedule()
+ s.MarkSink("")
+ s.MarkDeployed()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ condQuery: v1alpha1.CronJobConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: v1alpha1.CronJobConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.s.GetCondition(test.condQuery)
+ ignoreTime := cmpopts.IgnoreFields(duckv1alpha1.Condition{},
+ "LastTransitionTime", "Severity")
+ if diff := cmp.Diff(test.want, got, ignoreTime); diff != "" {
+ t.Errorf("unexpected condition (-want, +got) = %v", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/apis/sources/v1alpha1/cron_job_types.go b/pkg/apis/sources/v1alpha1/cron_job_types.go
new file mode 100644
index 00000000000..439184aba9d
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/cron_job_types.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "github.com/knative/pkg/apis/duck"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ "github.com/knative/pkg/kmeta"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:defaulter-gen=true
+
+// CronJobSource is the Schema for the cronjobsources API.
+type CronJobSource struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec CronJobSourceSpec `json:"spec,omitempty"`
+ Status CronJobSourceStatus `json:"status,omitempty"`
+}
+
+// TODO: Check that CronJobSource can be validated and can be defaulted.
+
+// Check that it is a runtime object.
+var _ runtime.Object = (*CronJobSource)(nil)
+
+// Check that we can create OwnerReferences to a Configuration.
+var _ kmeta.OwnerRefable = (*CronJobSource)(nil)
+
+// Check that CronJobSource implements the Conditions duck type.
+var _ = duck.VerifyType(&CronJobSource{}, &duckv1alpha1.Conditions{})
+
+// CronJobSourceSpec defines the desired state of the CronJobSource.
+type CronJobSourceSpec struct {
+
+ // Schedule is the cronjob schedule.
+ // +required
+ Schedule string `json:"schedule"`
+
+ // Data is the data posted to the target function.
+ Data string `json:"data,omitempty"`
+
+ // Sink is a reference to an object that will resolve to a domain name to use as the sink.
+ // +optional
+ Sink *corev1.ObjectReference `json:"sink,omitempty"`
+
+ // ServiceAccoutName is the name of the ServiceAccount that will be used to run the Receive
+ // Adapter Deployment.
+ ServiceAccountName string `json:"serviceAccountName,omitempty"`
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (s *CronJobSource) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("CronJobSource")
+}
+
+// CronJobSourceStatus defines the observed state of CronJobSource.
+type CronJobSourceStatus struct {
+ // inherits duck/v1alpha1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1alpha1.Status `json:",inline"`
+
+ // SinkURI is the current active sink URI that has been configured for the CronJobSource.
+ // +optional
+ SinkURI string `json:"sinkUri,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CronJobSourceList contains a list of CronJobSources.
+type CronJobSourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []CronJobSource `json:"items"`
+}
diff --git a/pkg/apis/sources/v1alpha1/doc.go b/pkg/apis/sources/v1alpha1/doc.go
new file mode 100644
index 00000000000..7d7f6738fbc
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1alpha1 contains API Schema definitions for the sources v1alpha1 API group
+// +k8s:deepcopy-gen=package
+// +groupName=sources.eventing.knative.dev
+package v1alpha1
diff --git a/pkg/apis/sources/v1alpha1/register.go b/pkg/apis/sources/v1alpha1/register.go
new file mode 100644
index 00000000000..a170bc79c54
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "github.com/knative/eventing/pkg/apis/sources"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: sources.GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &CronJobSource{},
+ &CronJobSourceList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/pkg/apis/sources/v1alpha1/register_test.go b/pkg/apis/sources/v1alpha1/register_test.go
new file mode 100644
index 00000000000..2c4e17d8b5e
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/register_test.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "testing"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func TestResource(t *testing.T) {
+ want := schema.GroupResource{
+ Group: "sources.eventing.knative.dev",
+ Resource: "foo",
+ }
+
+ got := Resource("foo")
+
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unexpected resource (-want, +got) = %v", diff)
+ }
+}
+
+// Kind takes an unqualified resource and returns a Group qualified GroupKind
+func TestKind(t *testing.T) {
+ want := schema.GroupKind{
+ Group: "sources.eventing.knative.dev",
+ Kind: "kind",
+ }
+
+ got := Kind("kind")
+
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unexpected resource (-want, +got) = %v", diff)
+ }
+}
+
+// TestKnownTypes makes sure that expected types get added.
+func TestKnownTypes(t *testing.T) {
+ scheme := runtime.NewScheme()
+ addKnownTypes(scheme)
+ types := scheme.KnownTypes(SchemeGroupVersion)
+
+ for _, name := range []string{
+ "CronJobSource",
+ "CronJobSourceList",
+ } {
+ if _, ok := types[name]; !ok {
+ t.Errorf("Did not find %q as registered type", name)
+ }
+ }
+
+}
diff --git a/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000000..686c603a5e3
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,125 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobSource) DeepCopyInto(out *CronJobSource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSource.
+func (in *CronJobSource) DeepCopy() *CronJobSource {
+ if in == nil {
+ return nil
+ }
+ out := new(CronJobSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CronJobSource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobSourceList) DeepCopyInto(out *CronJobSourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CronJobSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSourceList.
+func (in *CronJobSourceList) DeepCopy() *CronJobSourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(CronJobSourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CronJobSourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobSourceSpec) DeepCopyInto(out *CronJobSourceSpec) {
+ *out = *in
+ if in.Sink != nil {
+ in, out := &in.Sink, &out.Sink
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSourceSpec.
+func (in *CronJobSourceSpec) DeepCopy() *CronJobSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CronJobSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CronJobSourceStatus) DeepCopyInto(out *CronJobSourceStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSourceStatus.
+func (in *CronJobSourceStatus) DeepCopy() *CronJobSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CronJobSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go
index e93eb9138e5..526251ec438 100644
--- a/pkg/client/clientset/versioned/clientset.go
+++ b/pkg/client/clientset/versioned/clientset.go
@@ -20,6 +20,7 @@ package versioned
import (
eventingv1alpha1 "github.com/knative/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1"
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@@ -30,6 +31,9 @@ type Interface interface {
EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface
// Deprecated: please explicitly pick a version if possible.
Eventing() eventingv1alpha1.EventingV1alpha1Interface
+ SourcesV1alpha1() sourcesv1alpha1.SourcesV1alpha1Interface
+ // Deprecated: please explicitly pick a version if possible.
+ Sources() sourcesv1alpha1.SourcesV1alpha1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
@@ -37,6 +41,7 @@ type Interface interface {
type Clientset struct {
*discovery.DiscoveryClient
eventingV1alpha1 *eventingv1alpha1.EventingV1alpha1Client
+ sourcesV1alpha1 *sourcesv1alpha1.SourcesV1alpha1Client
}
// EventingV1alpha1 retrieves the EventingV1alpha1Client
@@ -50,6 +55,17 @@ func (c *Clientset) Eventing() eventingv1alpha1.EventingV1alpha1Interface {
return c.eventingV1alpha1
}
+// SourcesV1alpha1 retrieves the SourcesV1alpha1Client
+func (c *Clientset) SourcesV1alpha1() sourcesv1alpha1.SourcesV1alpha1Interface {
+ return c.sourcesV1alpha1
+}
+
+// Deprecated: Sources retrieves the default version of SourcesClient.
+// Please explicitly pick a version.
+func (c *Clientset) Sources() sourcesv1alpha1.SourcesV1alpha1Interface {
+ return c.sourcesV1alpha1
+}
+
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
@@ -70,6 +86,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
if err != nil {
return nil, err
}
+ cs.sourcesV1alpha1, err = sourcesv1alpha1.NewForConfig(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
@@ -83,6 +103,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.eventingV1alpha1 = eventingv1alpha1.NewForConfigOrDie(c)
+ cs.sourcesV1alpha1 = sourcesv1alpha1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
@@ -92,6 +113,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.eventingV1alpha1 = eventingv1alpha1.New(c)
+ cs.sourcesV1alpha1 = sourcesv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go
index 5fc64b9c1c7..fbb1c267d76 100644
--- a/pkg/client/clientset/versioned/fake/clientset_generated.go
+++ b/pkg/client/clientset/versioned/fake/clientset_generated.go
@@ -22,6 +22,8 @@ import (
clientset "github.com/knative/eventing/pkg/client/clientset/versioned"
eventingv1alpha1 "github.com/knative/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1"
fakeeventingv1alpha1 "github.com/knative/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake"
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1"
+ fakesourcesv1alpha1 "github.com/knative/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
@@ -80,3 +82,13 @@ func (c *Clientset) EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interfac
func (c *Clientset) Eventing() eventingv1alpha1.EventingV1alpha1Interface {
return &fakeeventingv1alpha1.FakeEventingV1alpha1{Fake: &c.Fake}
}
+
+// SourcesV1alpha1 retrieves the SourcesV1alpha1Client
+func (c *Clientset) SourcesV1alpha1() sourcesv1alpha1.SourcesV1alpha1Interface {
+ return &fakesourcesv1alpha1.FakeSourcesV1alpha1{Fake: &c.Fake}
+}
+
+// Sources retrieves the SourcesV1alpha1Client
+func (c *Clientset) Sources() sourcesv1alpha1.SourcesV1alpha1Interface {
+ return &fakesourcesv1alpha1.FakeSourcesV1alpha1{Fake: &c.Fake}
+}
diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go
index e29260ec25f..28e19da277e 100644
--- a/pkg/client/clientset/versioned/fake/register.go
+++ b/pkg/client/clientset/versioned/fake/register.go
@@ -20,6 +20,7 @@ package fake
import (
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -32,6 +33,7 @@ var codecs = serializer.NewCodecFactory(scheme)
var parameterCodec = runtime.NewParameterCodec(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
eventingv1alpha1.AddToScheme,
+ sourcesv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go
index 5df4708af46..b78dadc4c61 100644
--- a/pkg/client/clientset/versioned/scheme/register.go
+++ b/pkg/client/clientset/versioned/scheme/register.go
@@ -20,6 +20,7 @@ package scheme
import (
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -32,6 +33,7 @@ var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
eventingv1alpha1.AddToScheme,
+ sourcesv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/cronjobsource.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/cronjobsource.go
new file mode 100644
index 00000000000..bb6a1257c6f
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/cronjobsource.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ scheme "github.com/knative/eventing/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CronJobSourcesGetter has a method to return a CronJobSourceInterface.
+// A group's client should implement this interface.
+type CronJobSourcesGetter interface {
+ CronJobSources(namespace string) CronJobSourceInterface
+}
+
+// CronJobSourceInterface has methods to work with CronJobSource resources.
+type CronJobSourceInterface interface {
+ Create(*v1alpha1.CronJobSource) (*v1alpha1.CronJobSource, error)
+ Update(*v1alpha1.CronJobSource) (*v1alpha1.CronJobSource, error)
+ UpdateStatus(*v1alpha1.CronJobSource) (*v1alpha1.CronJobSource, error)
+ Delete(name string, options *v1.DeleteOptions) error
+ DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+ Get(name string, options v1.GetOptions) (*v1alpha1.CronJobSource, error)
+ List(opts v1.ListOptions) (*v1alpha1.CronJobSourceList, error)
+ Watch(opts v1.ListOptions) (watch.Interface, error)
+ Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CronJobSource, err error)
+ CronJobSourceExpansion
+}
+
+// cronJobSources implements CronJobSourceInterface
+type cronJobSources struct {
+ client rest.Interface
+ ns string
+}
+
+// newCronJobSources returns a CronJobSources
+func newCronJobSources(c *SourcesV1alpha1Client, namespace string) *cronJobSources {
+ return &cronJobSources{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the cronJobSource, and returns the corresponding cronJobSource object, and an error if there is any.
+func (c *cronJobSources) Get(name string, options v1.GetOptions) (result *v1alpha1.CronJobSource, err error) {
+ result = &v1alpha1.CronJobSource{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("cronjobsources").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CronJobSources that match those selectors.
+func (c *cronJobSources) List(opts v1.ListOptions) (result *v1alpha1.CronJobSourceList, err error) {
+ result = &v1alpha1.CronJobSourceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("cronjobsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested cronJobSources.
+func (c *cronJobSources) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("cronjobsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Watch()
+}
+
+// Create takes the representation of a cronJobSource and creates it. Returns the server's representation of the cronJobSource, and an error, if there is any.
+func (c *cronJobSources) Create(cronJobSource *v1alpha1.CronJobSource) (result *v1alpha1.CronJobSource, err error) {
+ result = &v1alpha1.CronJobSource{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("cronjobsources").
+ Body(cronJobSource).
+ Do().
+ Into(result)
+ return
+}
+
+// Update takes the representation of a cronJobSource and updates it. Returns the server's representation of the cronJobSource, and an error, if there is any.
+func (c *cronJobSources) Update(cronJobSource *v1alpha1.CronJobSource) (result *v1alpha1.CronJobSource, err error) {
+ result = &v1alpha1.CronJobSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("cronjobsources").
+ Name(cronJobSource.Name).
+ Body(cronJobSource).
+ Do().
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *cronJobSources) UpdateStatus(cronJobSource *v1alpha1.CronJobSource) (result *v1alpha1.CronJobSource, err error) {
+ result = &v1alpha1.CronJobSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("cronjobsources").
+ Name(cronJobSource.Name).
+ SubResource("status").
+ Body(cronJobSource).
+ Do().
+ Into(result)
+ return
+}
+
+// Delete takes name of the cronJobSource and deletes it. Returns an error if one occurs.
+func (c *cronJobSources) Delete(name string, options *v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("cronjobsources").
+ Name(name).
+ Body(options).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *cronJobSources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("cronjobsources").
+ VersionedParams(&listOptions, scheme.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Patch applies the patch and returns the patched cronJobSource.
+func (c *cronJobSources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CronJobSource, err error) {
+ result = &v1alpha1.CronJobSource{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("cronjobsources").
+ SubResource(subresources...).
+ Name(name).
+ Body(data).
+ Do().
+ Into(result)
+ return
+}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/doc.go
new file mode 100644
index 00000000000..a1c6bb9fe8f
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/doc.go
new file mode 100644
index 00000000000..a00e5d7b21a
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_cronjobsource.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_cronjobsource.go
new file mode 100644
index 00000000000..a7b30f81401
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_cronjobsource.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCronJobSources implements CronJobSourceInterface
+type FakeCronJobSources struct {
+ Fake *FakeSourcesV1alpha1
+ ns string
+}
+
+var cronjobsourcesResource = schema.GroupVersionResource{Group: "sources.eventing.knative.dev", Version: "v1alpha1", Resource: "cronjobsources"}
+
+var cronjobsourcesKind = schema.GroupVersionKind{Group: "sources.eventing.knative.dev", Version: "v1alpha1", Kind: "CronJobSource"}
+
+// Get takes name of the cronJobSource, and returns the corresponding cronJobSource object, and an error if there is any.
+func (c *FakeCronJobSources) Get(name string, options v1.GetOptions) (result *v1alpha1.CronJobSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(cronjobsourcesResource, c.ns, name), &v1alpha1.CronJobSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.CronJobSource), err
+}
+
+// List takes label and field selectors, and returns the list of CronJobSources that match those selectors.
+func (c *FakeCronJobSources) List(opts v1.ListOptions) (result *v1alpha1.CronJobSourceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(cronjobsourcesResource, cronjobsourcesKind, c.ns, opts), &v1alpha1.CronJobSourceList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.CronJobSourceList{ListMeta: obj.(*v1alpha1.CronJobSourceList).ListMeta}
+ for _, item := range obj.(*v1alpha1.CronJobSourceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested cronJobSources.
+func (c *FakeCronJobSources) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(cronjobsourcesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a cronJobSource and creates it. Returns the server's representation of the cronJobSource, and an error, if there is any.
+func (c *FakeCronJobSources) Create(cronJobSource *v1alpha1.CronJobSource) (result *v1alpha1.CronJobSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(cronjobsourcesResource, c.ns, cronJobSource), &v1alpha1.CronJobSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.CronJobSource), err
+}
+
+// Update takes the representation of a cronJobSource and updates it. Returns the server's representation of the cronJobSource, and an error, if there is any.
+func (c *FakeCronJobSources) Update(cronJobSource *v1alpha1.CronJobSource) (result *v1alpha1.CronJobSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(cronjobsourcesResource, c.ns, cronJobSource), &v1alpha1.CronJobSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.CronJobSource), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCronJobSources) UpdateStatus(cronJobSource *v1alpha1.CronJobSource) (*v1alpha1.CronJobSource, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(cronjobsourcesResource, "status", c.ns, cronJobSource), &v1alpha1.CronJobSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.CronJobSource), err
+}
+
+// Delete takes name of the cronJobSource and deletes it. Returns an error if one occurs.
+func (c *FakeCronJobSources) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(cronjobsourcesResource, c.ns, name), &v1alpha1.CronJobSource{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCronJobSources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(cronjobsourcesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.CronJobSourceList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched cronJobSource.
+func (c *FakeCronJobSources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.CronJobSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(cronjobsourcesResource, c.ns, name, data, subresources...), &v1alpha1.CronJobSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.CronJobSource), err
+}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go
new file mode 100644
index 00000000000..c2d9faeb94a
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/client/clientset/versioned/typed/sources/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeSourcesV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeSourcesV1alpha1) CronJobSources(namespace string) v1alpha1.CronJobSourceInterface {
+ return &FakeCronJobSources{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeSourcesV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go
new file mode 100644
index 00000000000..e3e7bf27492
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type CronJobSourceExpansion interface{}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go
new file mode 100644
index 00000000000..31733ad3ea4
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ "github.com/knative/eventing/pkg/client/clientset/versioned/scheme"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ rest "k8s.io/client-go/rest"
+)
+
+type SourcesV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ CronJobSourcesGetter
+}
+
+// SourcesV1alpha1Client is used to interact with features provided by the sources.eventing.knative.dev group.
+type SourcesV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *SourcesV1alpha1Client) CronJobSources(namespace string) CronJobSourceInterface {
+ return newCronJobSources(c, namespace)
+}
+
+// NewForConfig creates a new SourcesV1alpha1Client for the given config.
+func NewForConfig(c *rest.Config) (*SourcesV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return &SourcesV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new SourcesV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *SourcesV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new SourcesV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *SourcesV1alpha1Client {
+ return &SourcesV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *SourcesV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go
index f5f0897b7c9..c591485735a 100644
--- a/pkg/client/informers/externalversions/factory.go
+++ b/pkg/client/informers/externalversions/factory.go
@@ -26,6 +26,7 @@ import (
versioned "github.com/knative/eventing/pkg/client/clientset/versioned"
eventing "github.com/knative/eventing/pkg/client/informers/externalversions/eventing"
internalinterfaces "github.com/knative/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ sources "github.com/knative/eventing/pkg/client/informers/externalversions/sources"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -173,8 +174,13 @@ type SharedInformerFactory interface {
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Eventing() eventing.Interface
+ Sources() sources.Interface
}
func (f *sharedInformerFactory) Eventing() eventing.Interface {
return eventing.New(f, f.namespace, f.tweakListOptions)
}
+
+func (f *sharedInformerFactory) Sources() sources.Interface {
+ return sources.New(f, f.namespace, f.tweakListOptions)
+}
diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go
index 8aa9b1ad555..013b0b1e9fc 100644
--- a/pkg/client/informers/externalversions/generic.go
+++ b/pkg/client/informers/externalversions/generic.go
@@ -22,6 +22,7 @@ import (
"fmt"
v1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
@@ -64,6 +65,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
case v1alpha1.SchemeGroupVersion.WithResource("triggers"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().Triggers().Informer()}, nil
+ // Group=sources.eventing.knative.dev, Version=v1alpha1
+ case sourcesv1alpha1.SchemeGroupVersion.WithResource("cronjobsources"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1alpha1().CronJobSources().Informer()}, nil
+
}
return nil, fmt.Errorf("no informer found for %v", resource)
diff --git a/pkg/client/informers/externalversions/sources/interface.go b/pkg/client/informers/externalversions/sources/interface.go
new file mode 100644
index 00000000000..d638f2a8578
--- /dev/null
+++ b/pkg/client/informers/externalversions/sources/interface.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package sources
+
+import (
+ internalinterfaces "github.com/knative/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/knative/eventing/pkg/client/informers/externalversions/sources/v1alpha1"
+)
+
+// Interface provides access to each of this group's versions.
+type Interface interface {
+ // V1alpha1 provides access to shared informers for resources in V1alpha1.
+ V1alpha1() v1alpha1.Interface
+}
+
+type group struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// V1alpha1 returns a new v1alpha1.Interface.
+func (g *group) V1alpha1() v1alpha1.Interface {
+ return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/pkg/client/informers/externalversions/sources/v1alpha1/cronjobsource.go b/pkg/client/informers/externalversions/sources/v1alpha1/cronjobsource.go
new file mode 100644
index 00000000000..c6d831f8ecc
--- /dev/null
+++ b/pkg/client/informers/externalversions/sources/v1alpha1/cronjobsource.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ time "time"
+
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ versioned "github.com/knative/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/knative/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/knative/eventing/pkg/client/listers/sources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// CronJobSourceInformer provides access to a shared informer and lister for
+// CronJobSources.
+type CronJobSourceInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.CronJobSourceLister
+}
+
+type cronJobSourceInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewCronJobSourceInformer constructs a new informer for CronJobSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewCronJobSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredCronJobSourceInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredCronJobSourceInformer constructs a new informer for CronJobSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredCronJobSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1alpha1().CronJobSources(namespace).List(options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1alpha1().CronJobSources(namespace).Watch(options)
+ },
+ },
+ &sourcesv1alpha1.CronJobSource{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *cronJobSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredCronJobSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *cronJobSourceInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&sourcesv1alpha1.CronJobSource{}, f.defaultInformer)
+}
+
+func (f *cronJobSourceInformer) Lister() v1alpha1.CronJobSourceLister {
+ return v1alpha1.NewCronJobSourceLister(f.Informer().GetIndexer())
+}
diff --git a/pkg/client/informers/externalversions/sources/v1alpha1/interface.go b/pkg/client/informers/externalversions/sources/v1alpha1/interface.go
new file mode 100644
index 00000000000..244340025a8
--- /dev/null
+++ b/pkg/client/informers/externalversions/sources/v1alpha1/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ internalinterfaces "github.com/knative/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // CronJobSources returns a CronJobSourceInformer.
+ CronJobSources() CronJobSourceInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// CronJobSources returns a CronJobSourceInformer.
+func (v *version) CronJobSources() CronJobSourceInformer {
+ return &cronJobSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/pkg/client/listers/sources/v1alpha1/cronjobsource.go b/pkg/client/listers/sources/v1alpha1/cronjobsource.go
new file mode 100644
index 00000000000..a720fb0e9df
--- /dev/null
+++ b/pkg/client/listers/sources/v1alpha1/cronjobsource.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// CronJobSourceLister helps list CronJobSources.
+type CronJobSourceLister interface {
+ // List lists all CronJobSources in the indexer.
+ List(selector labels.Selector) (ret []*v1alpha1.CronJobSource, err error)
+ // CronJobSources returns an object that can list and get CronJobSources.
+ CronJobSources(namespace string) CronJobSourceNamespaceLister
+ CronJobSourceListerExpansion
+}
+
+// cronJobSourceLister implements the CronJobSourceLister interface.
+type cronJobSourceLister struct {
+ indexer cache.Indexer
+}
+
+// NewCronJobSourceLister returns a new CronJobSourceLister.
+func NewCronJobSourceLister(indexer cache.Indexer) CronJobSourceLister {
+ return &cronJobSourceLister{indexer: indexer}
+}
+
+// List lists all CronJobSources in the indexer.
+func (s *cronJobSourceLister) List(selector labels.Selector) (ret []*v1alpha1.CronJobSource, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.CronJobSource))
+ })
+ return ret, err
+}
+
+// CronJobSources returns an object that can list and get CronJobSources.
+func (s *cronJobSourceLister) CronJobSources(namespace string) CronJobSourceNamespaceLister {
+ return cronJobSourceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// CronJobSourceNamespaceLister helps list and get CronJobSources.
+type CronJobSourceNamespaceLister interface {
+ // List lists all CronJobSources in the indexer for a given namespace.
+ List(selector labels.Selector) (ret []*v1alpha1.CronJobSource, err error)
+ // Get retrieves the CronJobSource from the indexer for a given namespace and name.
+ Get(name string) (*v1alpha1.CronJobSource, error)
+ CronJobSourceNamespaceListerExpansion
+}
+
+// cronJobSourceNamespaceLister implements the CronJobSourceNamespaceLister
+// interface.
+type cronJobSourceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all CronJobSources in the indexer for a given namespace.
+func (s cronJobSourceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CronJobSource, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.CronJobSource))
+ })
+ return ret, err
+}
+
+// Get retrieves the CronJobSource from the indexer for a given namespace and name.
+func (s cronJobSourceNamespaceLister) Get(name string) (*v1alpha1.CronJobSource, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("cronjobsource"), name)
+ }
+ return obj.(*v1alpha1.CronJobSource), nil
+}
diff --git a/pkg/client/listers/sources/v1alpha1/expansion_generated.go b/pkg/client/listers/sources/v1alpha1/expansion_generated.go
new file mode 100644
index 00000000000..444c53f7032
--- /dev/null
+++ b/pkg/client/listers/sources/v1alpha1/expansion_generated.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// CronJobSourceListerExpansion allows custom methods to be added to
+// CronJobSourceLister.
+type CronJobSourceListerExpansion interface{}
+
+// CronJobSourceNamespaceListerExpansion allows custom methods to be added to
+// CronJobSourceNamespaceLister.
+type CronJobSourceNamespaceListerExpansion interface{}
diff --git a/pkg/duck/sinks.go b/pkg/duck/sinks.go
new file mode 100644
index 00000000000..e5a83686d94
--- /dev/null
+++ b/pkg/duck/sinks.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+ "context"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/dynamic"
+
+ duckapis "github.com/knative/pkg/apis"
+ "github.com/knative/pkg/apis/duck"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+)
+
+// GetSinkURI retrieves the sink URI from the object referenced by the given
+// ObjectReference.
+func GetSinkURI(ctx context.Context, dynamicClient dynamic.Interface, sink *corev1.ObjectReference, namespace string) (string, error) {
+ if sink == nil {
+ return "", fmt.Errorf("sink ref is nil")
+ }
+
+ rc := dynamicClient.Resource(duckapis.KindToResource(sink.GroupVersionKind()))
+ if rc == nil {
+ return "", fmt.Errorf("failed to create dynamic client resource")
+ }
+
+ u, err := rc.Namespace(namespace).Get(sink.Name, metav1.GetOptions{})
+ if err != nil {
+ return "", err
+ }
+
+ objIdentifier := fmt.Sprintf("\"%s/%s\" (%s)", u.GetNamespace(), u.GetName(), u.GroupVersionKind())
+
+ t := duckv1alpha1.AddressableType{}
+ err = duck.FromUnstructured(u, &t)
+ if err != nil {
+ return "", fmt.Errorf("failed to deserialize sink %s: %v", objIdentifier, err)
+ }
+
+ if t.Status.Address == nil {
+ return "", fmt.Errorf("sink %s does not contain address", objIdentifier)
+ }
+
+ if t.Status.Address.Hostname == "" {
+ return "", fmt.Errorf("sink %s contains an empty hostname", objIdentifier)
+ }
+
+ return fmt.Sprintf("http://%s/", t.Status.Address.Hostname), nil
+}
diff --git a/pkg/duck/sinks_test.go b/pkg/duck/sinks_test.go
new file mode 100644
index 00000000000..e52c94ce742
--- /dev/null
+++ b/pkg/duck/sinks_test.go
@@ -0,0 +1,210 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/dynamic/fake"
+ "k8s.io/client-go/kubernetes/scheme"
+)
+
+var (
+ addressableDNS = "addressable.sink.svc.cluster.local"
+
+ addressableName = "testsink"
+ addressableKind = "Sink"
+ addressableAPIVersion = "duck.knative.dev/v1alpha1"
+
+ unaddressableName = "testunaddressable"
+ unaddressableKind = "KResource"
+ unaddressableAPIVersion = "duck.knative.dev/v1alpha1"
+ unaddressableResource = "kresources.duck.knative.dev"
+
+ testNS = "testnamespace"
+)
+
+func init() {
+ // Add types to scheme
+ duckv1alpha1.AddToScheme(scheme.Scheme)
+}
+
+func TestGetSinkURI(t *testing.T) {
+ testCases := map[string]struct {
+ objects []runtime.Object
+ namespace string
+ want string
+ wantErr error
+ ref *corev1.ObjectReference
+ }{
+ "happy": {
+ objects: []runtime.Object{
+ getAddressable(),
+ },
+ namespace: testNS,
+ ref: getAddressableRef(),
+ want: fmt.Sprintf("http://%s/", addressableDNS),
+ },
+ "nil hostname": {
+ objects: []runtime.Object{
+ getAddressableNilHostname(),
+ },
+ namespace: testNS,
+ ref: getUnaddressableRef(),
+ wantErr: fmt.Errorf(`sink "testnamespace/testunaddressable" (duck.knative.dev/v1alpha1, Kind=KResource) contains an empty hostname`),
+ },
+ "nil sink": {
+ objects: []runtime.Object{
+ getAddressableNilHostname(),
+ },
+ namespace: testNS,
+ ref: nil,
+ wantErr: fmt.Errorf(`sink ref is nil`),
+ },
+ "nil address": {
+ objects: []runtime.Object{
+ getAddressableNilAddress(),
+ },
+ namespace: testNS,
+ ref: nil,
+ wantErr: fmt.Errorf(`sink ref is nil`),
+ },
+ "notSink": {
+ objects: []runtime.Object{
+ getAddressableNoStatus(),
+ },
+ namespace: testNS,
+ ref: getUnaddressableRef(),
+ wantErr: fmt.Errorf(`sink "testnamespace/testunaddressable" (duck.knative.dev/v1alpha1, Kind=KResource) does not contain address`),
+ },
+ "notFound": {
+ namespace: testNS,
+ ref: getUnaddressableRef(),
+ wantErr: fmt.Errorf(`%s "%s" not found`, unaddressableResource, unaddressableName),
+ },
+ }
+ for n, tc := range testCases {
+ t.Run(n, func(t *testing.T) {
+ ctx := context.Background()
+ client := fake.NewSimpleDynamicClient(scheme.Scheme, tc.objects...)
+ uri, gotErr := GetSinkURI(ctx, client, tc.ref, tc.namespace)
+ if gotErr != nil {
+ if tc.wantErr != nil {
+ if diff := cmp.Diff(tc.wantErr.Error(), gotErr.Error()); diff != "" {
+ t.Errorf("%s: unexpected error (-want, +got) = %v", n, diff)
+ }
+ } else {
+ t.Errorf("%s: unexpected error %v", n, gotErr.Error())
+ }
+ }
+ if gotErr == nil {
+ got := uri
+ if diff := cmp.Diff(tc.want, got); diff != "" {
+ t.Errorf("%s: unexpected object (-want, +got) = %v", n, diff)
+ }
+ }
+ })
+ }
+}
+
+func getAddressable() *unstructured.Unstructured {
+ return &unstructured.Unstructured{
+ Object: map[string]interface{}{
+ "apiVersion": addressableAPIVersion,
+ "kind": addressableKind,
+ "metadata": map[string]interface{}{
+ "namespace": testNS,
+ "name": addressableName,
+ },
+ "status": map[string]interface{}{
+ "address": map[string]interface{}{
+ "hostname": addressableDNS,
+ },
+ },
+ },
+ }
+}
+
+func getAddressableNoStatus() *unstructured.Unstructured {
+ return &unstructured.Unstructured{
+ Object: map[string]interface{}{
+ "apiVersion": unaddressableAPIVersion,
+ "kind": unaddressableKind,
+ "metadata": map[string]interface{}{
+ "namespace": testNS,
+ "name": unaddressableName,
+ },
+ },
+ }
+}
+
+func getAddressableNilAddress() *unstructured.Unstructured {
+ return &unstructured.Unstructured{
+ Object: map[string]interface{}{
+ "apiVersion": unaddressableAPIVersion,
+ "kind": unaddressableKind,
+ "metadata": map[string]interface{}{
+ "namespace": testNS,
+ "name": unaddressableName,
+ },
+ "status": map[string]interface{}{
+ "address": map[string]interface{}(nil),
+ },
+ },
+ }
+}
+
+func getAddressableNilHostname() *unstructured.Unstructured {
+ return &unstructured.Unstructured{
+ Object: map[string]interface{}{
+ "apiVersion": unaddressableAPIVersion,
+ "kind": unaddressableKind,
+ "metadata": map[string]interface{}{
+ "namespace": testNS,
+ "name": unaddressableName,
+ },
+ "status": map[string]interface{}{
+ "address": map[string]interface{}{
+ "hostname": nil,
+ },
+ },
+ },
+ }
+}
+
+func getAddressableRef() *corev1.ObjectReference {
+ return &corev1.ObjectReference{
+ Kind: addressableKind,
+ Name: addressableName,
+ APIVersion: addressableAPIVersion,
+ }
+}
+
+func getUnaddressableRef() *corev1.ObjectReference {
+ return &corev1.ObjectReference{
+ Kind: unaddressableKind,
+ Name: unaddressableName,
+ APIVersion: unaddressableAPIVersion,
+ }
+}
diff --git a/pkg/utils/resolve/subscriber.go b/pkg/duck/subscriber.go
similarity index 99%
rename from pkg/utils/resolve/subscriber.go
rename to pkg/duck/subscriber.go
index 8fa5a3c47a0..9458868234b 100644
--- a/pkg/utils/resolve/subscriber.go
+++ b/pkg/duck/subscriber.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package resolve
+package duck
import (
"context"
diff --git a/pkg/utils/resolve/subscriber_test.go b/pkg/duck/subscriber_test.go
similarity index 99%
rename from pkg/utils/resolve/subscriber_test.go
rename to pkg/duck/subscriber_test.go
index ec13a4f5fab..2ede971e1f1 100644
--- a/pkg/utils/resolve/subscriber_test.go
+++ b/pkg/duck/subscriber_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package resolve
+package duck
import (
"context"
@@ -35,10 +35,6 @@ import (
"k8s.io/client-go/kubernetes/scheme"
)
-const (
- testNS = "test-namespace"
-)
-
var (
uri = "http://example.com"
diff --git a/pkg/kncloudevents/good_client.go b/pkg/kncloudevents/good_client.go
new file mode 100644
index 00000000000..9a000dba51a
--- /dev/null
+++ b/pkg/kncloudevents/good_client.go
@@ -0,0 +1,30 @@
+package kncloudevents
+
+import (
+ "github.com/cloudevents/sdk-go"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
+)
+
+func NewDefaultClient(target ...string) (cloudevents.Client, error) {
+ tOpts := []http.Option{cloudevents.WithBinaryEncoding()}
+ if len(target) > 0 && target[0] != "" {
+ tOpts = append(tOpts, cloudevents.WithTarget(target[0]))
+ }
+
+ // Make an http transport for the CloudEvents client.
+ t, err := cloudevents.NewHTTPTransport(tOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Use the transport to make a new CloudEvents client.
+ c, err := cloudevents.NewClient(t,
+ cloudevents.WithUUIDs(),
+ cloudevents.WithTimeNow(),
+ )
+
+ if err != nil {
+ return nil, err
+ }
+ return c, nil
+}
diff --git a/pkg/logconfig/config.go b/pkg/logconfig/config.go
index 9ef2e550781..48b3d842cab 100644
--- a/pkg/logconfig/config.go
+++ b/pkg/logconfig/config.go
@@ -33,6 +33,9 @@ const (
// Controller is the name of the override key used inside of the logging config for Controller.
Controller = "controller"
+ // SourcesController is the name of the override key used inside of the logging config for Sources Controller.
+ SourcesController = "sources-controller"
+
// Webhook is the name of the override key used inside of the logging config for Webhook Controller.
WebhookNameEnv = "WEBHOOK_NAME"
)
diff --git a/pkg/reconciler/cronjobsource/cronjobsource.go b/pkg/reconciler/cronjobsource/cronjobsource.go
new file mode 100644
index 00000000000..488f144ca5b
--- /dev/null
+++ b/pkg/reconciler/cronjobsource/cronjobsource.go
@@ -0,0 +1,287 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cronjobsource
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "reflect"
+ "sync"
+ "time"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ appsv1informers "k8s.io/client-go/informers/apps/v1"
+ appsv1listers "k8s.io/client-go/listers/apps/v1"
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ sourceinformers "github.com/knative/eventing/pkg/client/informers/externalversions/sources/v1alpha1"
+ listers "github.com/knative/eventing/pkg/client/listers/sources/v1alpha1"
+ "github.com/knative/eventing/pkg/duck"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/cronjobsource/resources"
+ "github.com/knative/pkg/controller"
+ "github.com/knative/pkg/logging"
+ "github.com/robfig/cron"
+ "go.uber.org/zap"
+)
+
+const (
+ // ReconcilerName is the name of the reconciler
+ ReconcilerName = "CronJobSources"
+ // controllerAgentName is the string used by this controller to identify
+ // itself when creating events.
+ controllerAgentName = "cronjob-source-controller"
+
+ // Name of the corev1.Events emitted from the reconciliation process
+ cronjobReconciled = "CronJobSourceReconciled"
+ cronjobUpdateStatusFailed = "CronJobSourceUpdateStatusFailed"
+
+ // raImageEnvVar is the name of the environment variable that contains the receive adapter's
+ // image. It must be defined.
+ raImageEnvVar = "CRONJOB_RA_IMAGE"
+)
+
+type Reconciler struct {
+ *reconciler.Base
+
+ receiveAdapterImage string
+ once sync.Once
+
+ // listers index properties about resources
+ cronjobLister listers.CronJobSourceLister
+ deploymentLister appsv1listers.DeploymentLister
+}
+
+// Check that our Reconciler implements controller.Reconciler
+var _ controller.Reconciler = (*Reconciler)(nil)
+
+// NewController initializes the controller and is called by the generated code
+// Registers event handlers to enqueue events
+func NewController(
+ opt reconciler.Options,
+ cronjobsourceInformer sourceinformers.CronJobSourceInformer,
+ deploymentInformer appsv1informers.DeploymentInformer,
+) *controller.Impl {
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ cronjobLister: cronjobsourceInformer.Lister(),
+ deploymentLister: deploymentInformer.Lister(),
+ }
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ r.Logger.Info("Setting up event handlers")
+ cronjobsourceInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
+
+ deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
+ FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("CronJobSource")),
+ Handler: reconciler.Handler(impl.EnqueueControllerOf),
+ })
+
+ return impl
+}
+
+// Reconcile compares the actual state with the desired, and attempts to
+// converge the two. It then updates the Status block of the CronJobSource
+// resource with the current status of the resource.
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ // Convert the namespace/name string into a distinct namespace and name
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
+ }
+
+ // Get the CronJobSource resource with this namespace/name
+ original, err := r.cronjobLister.CronJobSources(namespace).Get(name)
+ if apierrs.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ logging.FromContext(ctx).Error("CronJobSource key in work queue no longer exists", zap.Any("key", key))
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // Don't modify the informers copy
+ cronjob := original.DeepCopy()
+
+ // Reconcile this copy of the CronJobSource and then write back any status
+ // updates regardless of whether the reconcile error out.
+ err = r.reconcile(ctx, cronjob)
+ if err != nil {
+ logging.FromContext(ctx).Warn("Error reconciling CronJobSource", zap.Error(err))
+ } else {
+ logging.FromContext(ctx).Debug("CronJobSource reconciled")
+ r.Recorder.Eventf(cronjob, corev1.EventTypeNormal, cronjobReconciled, `CronJobSource reconciled: "%s/%s"`, cronjob.Namespace, cronjob.Name)
+ }
+
+ if _, updateStatusErr := r.updateStatus(ctx, cronjob.DeepCopy()); updateStatusErr != nil {
+ logging.FromContext(ctx).Warn("Failed to update the CronJobSource", zap.Error(err))
+ r.Recorder.Eventf(cronjob, corev1.EventTypeWarning, cronjobUpdateStatusFailed, "Failed to update CronJobSource's status: %v", err)
+ return updateStatusErr
+ }
+
+ // Requeue if the resource is not ready:
+ return err
+}
+
+func (r *Reconciler) reconcile(ctx context.Context, cronjob *v1alpha1.CronJobSource) error {
+ // This Source attempts to reconcile three things.
+ // 1. Determine the sink's URI.
+ // - Nothing to delete.
+ // 2. Create a receive adapter in the form of a Deployment.
+ // - Will be garbage collected by K8s when this CronJobSource is deleted.
+
+ cronjob.Status.InitializeConditions()
+
+ _, err := cron.ParseStandard(cronjob.Spec.Schedule)
+ if err != nil {
+ cronjob.Status.MarkInvalidSchedule("Invalid", "")
+ return err
+ }
+ cronjob.Status.MarkSchedule()
+ sinkURI, err := duck.GetSinkURI(ctx, r.DynamicClientSet, cronjob.Spec.Sink, cronjob.Namespace)
+ if err != nil {
+ cronjob.Status.MarkNoSink("NotFound", "")
+ return err
+ }
+ cronjob.Status.MarkSink(sinkURI)
+
+ _, err = r.createReceiveAdapter(ctx, cronjob, sinkURI)
+ if err != nil {
+ r.Logger.Error("Unable to create the receive adapter", zap.Error(err))
+ return err
+ }
+ cronjob.Status.MarkDeployed()
+ return nil
+}
+
+func (r *Reconciler) getReceiveAdapterImage() string {
+ if r.receiveAdapterImage == "" {
+ r.once.Do(func() {
+ raImage, defined := os.LookupEnv(raImageEnvVar)
+ if !defined {
+ panic(fmt.Errorf("required environment variable %q not defined", raImageEnvVar))
+ }
+ r.receiveAdapterImage = raImage
+ })
+ }
+ return r.receiveAdapterImage
+}
+
+func (r *Reconciler) createReceiveAdapter(ctx context.Context, src *v1alpha1.CronJobSource, sinkURI string) (*appsv1.Deployment, error) {
+ ra, err := r.getReceiveAdapter(ctx, src)
+ if err != nil && !apierrors.IsNotFound(err) {
+ logging.FromContext(ctx).Error("Unable to get an existing receive adapter", zap.Error(err))
+ return nil, err
+ }
+ adapterArgs := resources.ReceiveAdapterArgs{
+ Image: r.getReceiveAdapterImage(),
+ Source: src,
+ Labels: resources.Labels(src.Name),
+ SinkURI: sinkURI,
+ }
+ expected := resources.MakeReceiveAdapter(&adapterArgs)
+ if ra != nil {
+ if r.podSpecChanged(ra.Spec.Template.Spec, expected.Spec.Template.Spec) {
+ ra.Spec.Template.Spec = expected.Spec.Template.Spec
+ if ra, err = r.KubeClientSet.AppsV1().Deployments(src.Namespace).Update(ra); err != nil {
+ return ra, err
+ }
+ logging.FromContext(ctx).Desugar().Info("Receive Adapter updated.", zap.Any("receiveAdapter", ra))
+ } else {
+ logging.FromContext(ctx).Desugar().Info("Reusing existing receive adapter", zap.Any("receiveAdapter", ra))
+ }
+ return ra, nil
+ }
+
+ if ra, err = r.KubeClientSet.AppsV1().Deployments(src.Namespace).Create(expected); err != nil {
+ return nil, err
+ }
+ logging.FromContext(ctx).Desugar().Info("Receive Adapter created.", zap.Any("receiveAdapter", expected))
+ return ra, err
+}
+
+func (r *Reconciler) podSpecChanged(oldPodSpec corev1.PodSpec, newPodSpec corev1.PodSpec) bool {
+ if !equality.Semantic.DeepDerivative(newPodSpec, oldPodSpec) {
+ return true
+ }
+ if len(oldPodSpec.Containers) != len(newPodSpec.Containers) {
+ return true
+ }
+ for i := range newPodSpec.Containers {
+ if !equality.Semantic.DeepEqual(newPodSpec.Containers[i].Env, oldPodSpec.Containers[i].Env) {
+ return true
+ }
+ }
+ return false
+}
+
+func (r *Reconciler) getReceiveAdapter(ctx context.Context, src *v1alpha1.CronJobSource) (*appsv1.Deployment, error) {
+ dl, err := r.KubeClientSet.AppsV1().Deployments(src.Namespace).List(metav1.ListOptions{
+ LabelSelector: r.getLabelSelector(src).String(),
+ })
+ if err != nil {
+ logging.FromContext(ctx).Desugar().Error("Unable to list cronjobs: %v", zap.Error(err))
+ return nil, err
+ }
+ for _, dep := range dl.Items {
+ if metav1.IsControlledBy(&dep, src) {
+ return &dep, nil
+ }
+ }
+ return nil, apierrors.NewNotFound(schema.GroupResource{}, "")
+}
+
+func (r *Reconciler) getLabelSelector(src *v1alpha1.CronJobSource) labels.Selector {
+ return labels.SelectorFromSet(resources.Labels(src.Name))
+}
+
+func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.CronJobSource) (*v1alpha1.CronJobSource, error) {
+ cronjob, err := r.cronjobLister.CronJobSources(desired.Namespace).Get(desired.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If there's nothing to update, just return.
+ if reflect.DeepEqual(cronjob.Status, desired.Status) {
+ return cronjob, nil
+ }
+
+ becomesReady := desired.Status.IsReady() && !cronjob.Status.IsReady()
+
+ // Don't modify the informers copy.
+ existing := cronjob.DeepCopy()
+ existing.Status = desired.Status
+
+ cj, err := r.EventingClientSet.SourcesV1alpha1().CronJobSources(desired.Namespace).UpdateStatus(existing)
+ if err == nil && becomesReady {
+ duration := time.Since(cj.ObjectMeta.CreationTimestamp.Time)
+ r.Logger.Infof("CronJobSource %q became ready after %v", cronjob.Name, duration)
+ //r.StatsReporter.ReportServiceReady(subscription.Namespace, subscription.Name, duration) // TODO: stats
+ }
+
+ return cj, err
+}
diff --git a/pkg/reconciler/cronjobsource/cronjobsource_test.go b/pkg/reconciler/cronjobsource/cronjobsource_test.go
new file mode 100644
index 00000000000..a971cb1e4e9
--- /dev/null
+++ b/pkg/reconciler/cronjobsource/cronjobsource_test.go
@@ -0,0 +1,307 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Veroute.on 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cronjobsource
+
+import (
+ "github.com/knative/eventing/pkg/reconciler/cronjobsource/resources"
+ "github.com/knative/eventing/pkg/utils"
+ "k8s.io/apimachinery/pkg/runtime"
+ "os"
+ "testing"
+
+ clientgotesting "k8s.io/client-go/testing"
+
+ fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ informers "github.com/knative/eventing/pkg/client/informers/externalversions"
+ "github.com/knative/eventing/pkg/reconciler"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ //"k8s.io/apimachinery/pkg/runtime"
+ kubeinformers "k8s.io/client-go/informers"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/kubernetes/scheme"
+
+ . "github.com/knative/eventing/pkg/reconciler/testing"
+ . "github.com/knative/pkg/reconciler/testing"
+
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ v1 "k8s.io/api/apps/v1"
+)
+
+var (
+ // deletionTime is used when objects are marked as deleted. Rfc3339Copy()
+ // truncates to seconds to match the loss of precision during serialization.
+ deletionTime = metav1.Now().Rfc3339Copy()
+
+ trueVal = true
+
+ sinkGVK = metav1.GroupVersionKind{
+ Group: "eventing.knative.dev",
+ Version: "v1alpha1",
+ Kind: "Channel",
+ }
+ sinkRef = corev1.ObjectReference{
+ Name: sinkName,
+ Kind: "Channel",
+ APIVersion: "eventing.knative.dev/v1alpha1",
+ }
+ sinkDNS = "sink.mynamespace.svc." + utils.GetClusterDomainName()
+ sinkURI = "http://" + sinkDNS + "/"
+)
+
+const (
+ image = "github.com/knative/test/image"
+ sourceName = "test-cronjob-source"
+ sourceUID = "1234-5678-90"
+ testNS = "testnamespace"
+ testSchedule = "*/2 * * * *"
+ testData = "data"
+
+ sinkName = "testsink"
+)
+
+func init() {
+ // Add types to scheme
+ _ = v1.AddToScheme(scheme.Scheme)
+ _ = corev1.AddToScheme(scheme.Scheme)
+ _ = duckv1alpha1.AddToScheme(scheme.Scheme)
+
+ _ = os.Setenv("CRONJOB_RA_IMAGE", image)
+}
+
+func TestAllCases(t *testing.T) {
+ table := TableTest{
+ {
+ Name: "bad workqueue key",
+ // Make sure Reconcile handles bad keys.
+ Key: "too/many/parts",
+ }, {
+ Name: "key not found",
+ // Make sure Reconcile handles good keys that don't exist.
+ Key: "foo/not-found",
+ }, {
+ Name: "invalid schedule",
+ Objects: []runtime.Object{
+ NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: "invalid schedule",
+ Data: testData,
+ Sink: &sinkRef,
+ }),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantErr: true,
+ //WantEvents: []string{
+ // Eventf(corev1.EventTypeWarning, "Fail", ""), // TODO: BUGBUGBUG This should make an event.
+ //},
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: "invalid schedule",
+ Data: testData,
+ Sink: &sinkRef,
+ }),
+ // Status Update:
+ WithInitCronJobSourceConditions,
+ WithInvalidCronJobSourceSchedule,
+ ),
+ }},
+ }, {
+ Name: "missing sink",
+ Objects: []runtime.Object{
+ NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: testSchedule,
+ Data: testData,
+ Sink: &sinkRef,
+ }),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantErr: true,
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: testSchedule,
+ Data: testData,
+ Sink: &sinkRef,
+ }),
+ // Status Update:
+ WithInitCronJobSourceConditions,
+ WithValidCronJobSourceSchedule,
+ WithCronJobSourceSinkNotFound,
+ ),
+ }},
+ }, {
+ Name: "valid",
+ Objects: []runtime.Object{
+ NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: testSchedule,
+ Data: testData,
+ Sink: &sinkRef,
+ }),
+ ),
+ NewChannel(sinkName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(sinkDNS),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "CronJobSourceReconciled", `CronJobSource reconciled: "%s/%s"`, testNS, sourceName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: testSchedule,
+ Data: testData,
+ Sink: &sinkRef,
+ }),
+ // Status Update:
+ WithInitCronJobSourceConditions,
+ WithValidCronJobSourceSchedule,
+ WithCronJobSourceDeployed,
+ WithCronJobSourceSink(sinkURI),
+ ),
+ }},
+ WantCreates: []metav1.Object{
+ makeReceiveAdapter(),
+ },
+ }, {
+ Name: "valid, existing ra",
+ Objects: []runtime.Object{
+ NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: testSchedule,
+ Data: testData,
+ Sink: &sinkRef,
+ }),
+ ),
+ NewChannel(sinkName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(sinkDNS),
+ ),
+ makeReceiveAdapter(),
+ },
+ Key: testNS + "/" + sourceName,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "CronJobSourceReconciled", `CronJobSource reconciled: "%s/%s"`, testNS, sourceName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: testSchedule,
+ Data: testData,
+ Sink: &sinkRef,
+ }),
+ // Status Update:
+ WithInitCronJobSourceConditions,
+ WithValidCronJobSourceSchedule,
+ WithCronJobSourceDeployed,
+ WithCronJobSourceSink(sinkURI),
+ ),
+ }},
+ }, {
+ Name: "valid, no change",
+ Objects: []runtime.Object{
+ NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: testSchedule,
+ Data: testData,
+ Sink: &sinkRef,
+ }),
+ WithInitCronJobSourceConditions,
+ WithValidCronJobSourceSchedule,
+ WithCronJobSourceDeployed,
+ WithCronJobSourceSink(sinkURI),
+ ),
+ NewChannel(sinkName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(sinkDNS),
+ ),
+ makeReceiveAdapter(),
+ },
+ Key: testNS + "/" + sourceName,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "CronJobSourceReconciled", `CronJobSource reconciled: "%s/%s"`, testNS, sourceName),
+ },
+ },
+ }
+
+ defer logtesting.ClearAll()
+ table.Test(t, MakeFactory(func(listers *Listers, opt reconciler.Options) controller.Reconciler {
+ return &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ cronjobLister: listers.GetCronJobSourceLister(),
+ deploymentLister: listers.GetDeploymentLister(),
+ }
+ }))
+
+}
+
+func TestNew(t *testing.T) {
+ defer logtesting.ClearAll()
+ kubeClient := fakekubeclientset.NewSimpleClientset()
+ eventingClient := fakeclientset.NewSimpleClientset()
+ eventingInformer := informers.NewSharedInformerFactory(eventingClient, 0)
+ kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
+
+ cronjobInformer := eventingInformer.Sources().V1alpha1().CronJobSources()
+ deploymentInformer := kubeInformer.Apps().V1().Deployments()
+
+ c := NewController(reconciler.Options{
+ KubeClientSet: kubeClient,
+ EventingClientSet: eventingClient,
+ Logger: logtesting.TestLogger(t),
+ },
+ cronjobInformer,
+ deploymentInformer,
+ )
+
+ if c == nil {
+ t.Fatal("Expected NewController to return a non-nil value")
+ }
+}
+
+func makeReceiveAdapter() *v1.Deployment {
+ source := NewCronSourceJob(sourceName, testNS,
+ WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
+ Schedule: testSchedule,
+ Data: testData,
+ Sink: &sinkRef,
+ },
+ ),
+ // Status Update:
+ WithInitCronJobSourceConditions,
+ WithValidCronJobSourceSchedule,
+ WithCronJobSourceDeployed,
+ WithCronJobSourceSink(sinkURI),
+ )
+
+ args := resources.ReceiveAdapterArgs{
+ Image: image,
+ Source: source,
+ Labels: resources.Labels(sourceName),
+ SinkURI: sinkURI,
+ }
+ return resources.MakeReceiveAdapter(&args)
+}
diff --git a/pkg/reconciler/cronjobsource/doc.go b/pkg/reconciler/cronjobsource/doc.go
new file mode 100644
index 00000000000..aa960fc7bad
--- /dev/null
+++ b/pkg/reconciler/cronjobsource/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package cronjobsource implements the CronJobSource controller.
+package cronjobsource
diff --git a/pkg/reconciler/cronjobsource/resources/labels.go b/pkg/reconciler/cronjobsource/resources/labels.go
new file mode 100644
index 00000000000..04dc6098fda
--- /dev/null
+++ b/pkg/reconciler/cronjobsource/resources/labels.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+const (
+ // controllerAgentName is the string used by this controller to identify
+ // itself when creating events.
+ controllerAgentName = "cronjob-source-controller"
+)
+
+func Labels(name string) map[string]string {
+ return map[string]string{
+ "knative-eventing-source": controllerAgentName,
+ "knative-eventing-source-name": name,
+ }
+}
diff --git a/pkg/reconciler/cronjobsource/resources/receive_adapter.go b/pkg/reconciler/cronjobsource/resources/receive_adapter.go
new file mode 100644
index 00000000000..9f516db0cab
--- /dev/null
+++ b/pkg/reconciler/cronjobsource/resources/receive_adapter.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "fmt"
+ "github.com/knative/pkg/kmeta"
+
+ v1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+)
+
+// ReceiveAdapterArgs are the arguments needed to create a Cron Job Source Receive Adapter. Every
+// field is required.
+type ReceiveAdapterArgs struct {
+ Image string
+ Source *v1alpha1.CronJobSource
+ Labels map[string]string
+ SinkURI string
+}
+
+// MakeReceiveAdapter generates (but does not insert into K8s) the Receive Adapter Deployment for
+// Cron Job Sources.
+func MakeReceiveAdapter(args *ReceiveAdapterArgs) *v1.Deployment {
+ replicas := int32(1)
+ return &v1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: args.Source.Namespace,
+ GenerateName: fmt.Sprintf("cronjob-%s-", args.Source.Name),
+ Labels: args.Labels,
+ OwnerReferences: []metav1.OwnerReference{
+ *kmeta.NewControllerRef(args.Source),
+ },
+ },
+ Spec: v1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: args.Labels,
+ },
+ Replicas: &replicas,
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "sidecar.istio.io/inject": "true", // TODO this might be removed.
+ },
+ Labels: args.Labels,
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: args.Source.Spec.ServiceAccountName,
+ Containers: []corev1.Container{
+ {
+ Name: "receive-adapter",
+ Image: args.Image,
+ Env: []corev1.EnvVar{
+ {
+ Name: "SCHEDULE",
+ Value: args.Source.Spec.Schedule,
+ },
+ {
+ Name: "DATA",
+ Value: args.Source.Spec.Data,
+ },
+ {
+ Name: "SINK_URI",
+ Value: args.SinkURI,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
diff --git a/pkg/reconciler/cronjobsource/resources/receive_adapter_test.go b/pkg/reconciler/cronjobsource/resources/receive_adapter_test.go
new file mode 100644
index 00000000000..e8b2c5fd1df
--- /dev/null
+++ b/pkg/reconciler/cronjobsource/resources/receive_adapter_test.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "testing"
+
+ v1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+)
+
+func TestMakeReceiveAdapter(t *testing.T) {
+ src := &v1alpha1.CronJobSource{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "source-name",
+ Namespace: "source-namespace",
+ },
+ Spec: v1alpha1.CronJobSourceSpec{
+ ServiceAccountName: "source-svc-acct",
+ Schedule: "*/2 * * * *",
+ Data: "data",
+ },
+ }
+
+ got := MakeReceiveAdapter(&ReceiveAdapterArgs{
+ Image: "test-image",
+ Source: src,
+ Labels: map[string]string{
+ "test-key1": "test-value1",
+ "test-key2": "test-value2",
+ },
+ SinkURI: "sink-uri",
+ })
+
+ one := int32(1)
+ yes := true
+ want := &v1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "source-namespace",
+ GenerateName: "cronjob-source-name-",
+ Labels: map[string]string{
+ "test-key1": "test-value1",
+ "test-key2": "test-value2",
+ },
+ OwnerReferences: []metav1.OwnerReference{{
+ APIVersion: "sources.eventing.knative.dev/v1alpha1",
+ Kind: "CronJobSource",
+ Name: "source-name",
+ Controller: &yes,
+ BlockOwnerDeletion: &yes,
+ }},
+ },
+ Spec: v1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "test-key1": "test-value1",
+ "test-key2": "test-value2",
+ },
+ },
+ Replicas: &one,
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "sidecar.istio.io/inject": "true",
+ },
+ Labels: map[string]string{
+ "test-key1": "test-value1",
+ "test-key2": "test-value2",
+ },
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: "source-svc-acct",
+ Containers: []corev1.Container{
+ {
+ Name: "receive-adapter",
+ Image: "test-image",
+ Env: []corev1.EnvVar{
+ {
+ Name: "SCHEDULE",
+ Value: "*/2 * * * *",
+ },
+ {
+ Name: "DATA",
+ Value: "data",
+ },
+ {
+ Name: "SINK_URI",
+ Value: "sink-uri",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unexpected cron job (-want, +got) = %v", diff)
+ }
+}
diff --git a/pkg/reconciler/subscription/subscription.go b/pkg/reconciler/subscription/subscription.go
index e241a1b0a70..9c0e9058469 100644
--- a/pkg/reconciler/subscription/subscription.go
+++ b/pkg/reconciler/subscription/subscription.go
@@ -20,17 +20,16 @@ import (
"context"
"encoding/json"
"fmt"
- "k8s.io/apimachinery/pkg/labels"
"reflect"
"time"
- eventingduck "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
+ eventingduckv1alpha1 "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
listers "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
+ eventingduck "github.com/knative/eventing/pkg/duck"
"github.com/knative/eventing/pkg/logging"
"github.com/knative/eventing/pkg/reconciler"
- "github.com/knative/eventing/pkg/utils/resolve"
"github.com/knative/pkg/apis/duck"
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
"github.com/knative/pkg/controller"
@@ -40,6 +39,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
@@ -157,7 +157,7 @@ func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc
}
// Verify that `channel` exists.
- if _, err := resolve.ObjectReference(ctx, r.DynamicClientSet, subscription.Namespace, &subscription.Spec.Channel); err != nil {
+ if _, err := eventingduck.ObjectReference(ctx, r.DynamicClientSet, subscription.Namespace, &subscription.Spec.Channel); err != nil {
logging.FromContext(ctx).Warn("Failed to validate Channel exists",
zap.Error(err),
zap.Any("channel", subscription.Spec.Channel))
@@ -165,7 +165,7 @@ func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subsc
return err
}
- subscriberURI, err := resolve.SubscriberSpec(ctx, r.DynamicClientSet, subscription.Namespace, subscription.Spec.Subscriber)
+ subscriberURI, err := eventingduck.SubscriberSpec(ctx, r.DynamicClientSet, subscription.Namespace, subscription.Spec.Subscriber)
if err != nil {
logging.FromContext(ctx).Warn("Failed to resolve Subscriber",
zap.Error(err),
@@ -266,7 +266,7 @@ func (r *Reconciler) resolveResult(ctx context.Context, namespace string, replyS
if isNilOrEmptyReply(replyStrategy) {
return "", nil
}
- obj, err := resolve.ObjectReference(ctx, r.DynamicClientSet, namespace, replyStrategy.Channel)
+ obj, err := eventingduck.ObjectReference(ctx, r.DynamicClientSet, namespace, replyStrategy.Channel)
if err != nil {
logging.FromContext(ctx).Warn("Failed to fetch ReplyStrategy Channel",
zap.Error(err),
@@ -280,7 +280,7 @@ func (r *Reconciler) resolveResult(ctx context.Context, namespace string, replyS
return "", err
}
if s.Status.Address != nil {
- return resolve.DomainToURL(s.Status.Address.Hostname), nil
+ return eventingduck.DomainToURL(s.Status.Address.Hostname), nil
}
return "", fmt.Errorf("status does not contain address")
}
@@ -331,11 +331,11 @@ func (r *Reconciler) listAllSubscriptionsWithPhysicalChannel(ctx context.Context
return subs, nil
}
-func (r *Reconciler) createSubscribable(subs []v1alpha1.Subscription) *eventingduck.Subscribable {
- rv := &eventingduck.Subscribable{}
+func (r *Reconciler) createSubscribable(subs []v1alpha1.Subscription) *eventingduckv1alpha1.Subscribable {
+ rv := &eventingduckv1alpha1.Subscribable{}
for _, sub := range subs {
if sub.Status.PhysicalSubscription.SubscriberURI != "" || sub.Status.PhysicalSubscription.ReplyURI != "" {
- rv.Subscribers = append(rv.Subscribers, eventingduck.ChannelSubscriberSpec{
+ rv.Subscribers = append(rv.Subscribers, eventingduckv1alpha1.ChannelSubscriberSpec{
DeprecatedRef: &corev1.ObjectReference{
APIVersion: sub.APIVersion,
Kind: sub.Kind,
@@ -352,13 +352,13 @@ func (r *Reconciler) createSubscribable(subs []v1alpha1.Subscription) *eventingd
return rv
}
-func (r *Reconciler) patchPhysicalFrom(ctx context.Context, namespace string, physicalFrom corev1.ObjectReference, subs *eventingduck.Subscribable) error {
+func (r *Reconciler) patchPhysicalFrom(ctx context.Context, namespace string, physicalFrom corev1.ObjectReference, subs *eventingduckv1alpha1.Subscribable) error {
// First get the original object and convert it to only the bits we care about
- s, err := resolve.ObjectReference(ctx, r.DynamicClientSet, namespace, &physicalFrom)
+ s, err := eventingduck.ObjectReference(ctx, r.DynamicClientSet, namespace, &physicalFrom)
if err != nil {
return err
}
- original := eventingduck.Channel{}
+ original := eventingduckv1alpha1.Channel{}
err = duck.FromUnstructured(s, &original)
if err != nil {
return err
@@ -372,7 +372,7 @@ func (r *Reconciler) patchPhysicalFrom(ctx context.Context, namespace string, ph
return err
}
- resourceClient, err := resolve.ResourceInterface(r.DynamicClientSet, namespace, &physicalFrom)
+ resourceClient, err := eventingduck.ResourceInterface(r.DynamicClientSet, namespace, &physicalFrom)
if err != nil {
logging.FromContext(ctx).Warn("Failed to create dynamic resource client", zap.Error(err))
return err
diff --git a/pkg/reconciler/testing/cronjobsource.go b/pkg/reconciler/testing/cronjobsource.go
new file mode 100644
index 00000000000..97faf4dcf38
--- /dev/null
+++ b/pkg/reconciler/testing/cronjobsource.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+)
+
+// CronJobSourceOption enables further configuration of a CronJob.
+type CronJobSourceOption func(*v1alpha1.CronJobSource)
+
+// NewCronJob creates a CronJob with CronJobOptions
+func NewCronSourceJob(name, namespace string, o ...CronJobSourceOption) *v1alpha1.CronJobSource {
+ c := &v1alpha1.CronJobSource{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ }
+ for _, opt := range o {
+ opt(c)
+ }
+ //c.SetDefaults(context.Background()) // TODO: We should add defaults and validation.
+ return c
+}
+
+// WithInitCronJobConditions initializes the CronJobSource's conditions.
+func WithInitCronJobSourceConditions(s *v1alpha1.CronJobSource) {
+ s.Status.InitializeConditions()
+}
+
+func WithValidCronJobSourceSchedule(s *v1alpha1.CronJobSource) {
+ s.Status.MarkSchedule()
+}
+
+func WithInvalidCronJobSourceSchedule(s *v1alpha1.CronJobSource) {
+ s.Status.MarkInvalidSchedule("Invalid", "")
+}
+
+func WithCronJobSourceSinkNotFound(s *v1alpha1.CronJobSource) {
+ s.Status.MarkNoSink("NotFound", "")
+}
+
+func WithCronJobSourceSink(uri string) CronJobSourceOption {
+ return func(s *v1alpha1.CronJobSource) {
+ s.Status.MarkSink(uri)
+ }
+}
+
+func WithCronJobSourceDeployed(s *v1alpha1.CronJobSource) {
+ s.Status.MarkDeployed()
+}
+
+func WithCronJobSourceDeleted(c *v1alpha1.CronJobSource) {
+ t := metav1.NewTime(time.Unix(1e9, 0))
+ c.ObjectMeta.SetDeletionTimestamp(&t)
+}
+
+func WithCronJobSourceSpec(spec v1alpha1.CronJobSourceSpec) CronJobSourceOption {
+ return func(c *v1alpha1.CronJobSource) {
+ c.Spec = spec
+ }
+}
diff --git a/pkg/reconciler/testing/listers.go b/pkg/reconciler/testing/listers.go
index 7ecff7d57e0..8d634d3a91c 100644
--- a/pkg/reconciler/testing/listers.go
+++ b/pkg/reconciler/testing/listers.go
@@ -18,8 +18,10 @@ package testing
import (
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
fakeeventingclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
eventinglisters "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
+ sourcelisters "github.com/knative/eventing/pkg/client/listers/sources/v1alpha1"
istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
fakesharedclientset "github.com/knative/pkg/client/clientset/versioned/fake"
istiolisters "github.com/knative/pkg/client/listers/istio/v1alpha3"
@@ -114,6 +116,10 @@ func (l *Listers) GetVirtualServiceLister() istiolisters.VirtualServiceLister {
return istiolisters.NewVirtualServiceLister(l.indexerFor(&istiov1alpha3.VirtualService{}))
}
+func (l *Listers) GetCronJobSourceLister() sourcelisters.CronJobSourceLister {
+ return sourcelisters.NewCronJobSourceLister(l.indexerFor(&sourcesv1alpha1.CronJobSource{}))
+}
+
// GetGatewayLister gets lister for Istio Gateway resource.
func (l *Listers) GetGatewayLister() istiolisters.GatewayLister {
return istiolisters.NewGatewayLister(l.indexerFor(&istiov1alpha3.Gateway{}))
diff --git a/pkg/reconciler/trigger/trigger.go b/pkg/reconciler/trigger/trigger.go
index 2c8976d819c..c1524b20f98 100644
--- a/pkg/reconciler/trigger/trigger.go
+++ b/pkg/reconciler/trigger/trigger.go
@@ -26,6 +26,7 @@ import (
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
listers "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/duck"
"github.com/knative/eventing/pkg/logging"
"github.com/knative/eventing/pkg/reconciler"
"github.com/knative/eventing/pkg/reconciler/names"
@@ -33,7 +34,6 @@ import (
"github.com/knative/eventing/pkg/reconciler/trigger/resources"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources"
- "github.com/knative/eventing/pkg/utils/resolve"
"github.com/knative/pkg/controller"
"github.com/knative/pkg/tracker"
"go.uber.org/zap"
@@ -250,7 +250,7 @@ func (r *Reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
}
}
- subscriberURI, err := resolve.SubscriberSpec(ctx, r.DynamicClientSet, t.Namespace, t.Spec.Subscriber)
+ subscriberURI, err := duck.SubscriberSpec(ctx, r.DynamicClientSet, t.Namespace, t.Spec.Subscriber)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
return err
diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE
index 8b36c02a3be..dcf5fffc254 100644
--- a/third_party/VENDOR-LICENSE
+++ b/third_party/VENDOR-LICENSE
@@ -5898,6 +5898,33 @@ official policies, either expressed or implied, of Richard Crowley.
+===========================================================
+Import: github.com/knative/eventing/vendor/github.com/robfig/cron
+
+Copyright (C) 2012 Rob Figueiredo
+All Rights Reserved.
+
+MIT LICENSE
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+
===========================================================
Import: github.com/knative/eventing/vendor/github.com/spf13/pflag
diff --git a/vendor/github.com/robfig/cron/LICENSE b/vendor/github.com/robfig/cron/LICENSE
new file mode 100644
index 00000000000..3a0f627ffeb
--- /dev/null
+++ b/vendor/github.com/robfig/cron/LICENSE
@@ -0,0 +1,21 @@
+Copyright (C) 2012 Rob Figueiredo
+All Rights Reserved.
+
+MIT LICENSE
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/robfig/cron/constantdelay.go b/vendor/github.com/robfig/cron/constantdelay.go
new file mode 100644
index 00000000000..cd6e7b1be91
--- /dev/null
+++ b/vendor/github.com/robfig/cron/constantdelay.go
@@ -0,0 +1,27 @@
+package cron
+
+import "time"
+
+// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
+// It does not support jobs more frequent than once a second.
+type ConstantDelaySchedule struct {
+ Delay time.Duration
+}
+
+// Every returns a crontab Schedule that activates once every duration.
+// Delays of less than a second are not supported (will round up to 1 second).
+// Any fields less than a Second are truncated.
+func Every(duration time.Duration) ConstantDelaySchedule {
+ if duration < time.Second {
+ duration = time.Second
+ }
+ return ConstantDelaySchedule{
+ Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
+ }
+}
+
+// Next returns the next time this should be run.
+// This rounds so that the next activation time will be on the second.
+func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
+ return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
+}
diff --git a/vendor/github.com/robfig/cron/cron.go b/vendor/github.com/robfig/cron/cron.go
new file mode 100644
index 00000000000..2318aeb2e7d
--- /dev/null
+++ b/vendor/github.com/robfig/cron/cron.go
@@ -0,0 +1,259 @@
+package cron
+
+import (
+ "log"
+ "runtime"
+ "sort"
+ "time"
+)
+
+// Cron keeps track of any number of entries, invoking the associated func as
+// specified by the schedule. It may be started, stopped, and the entries may
+// be inspected while running.
+type Cron struct {
+ entries []*Entry
+ stop chan struct{}
+ add chan *Entry
+ snapshot chan []*Entry
+ running bool
+ ErrorLog *log.Logger
+ location *time.Location
+}
+
+// Job is an interface for submitted cron jobs.
+type Job interface {
+ Run()
+}
+
+// The Schedule describes a job's duty cycle.
+type Schedule interface {
+ // Return the next activation time, later than the given time.
+ // Next is invoked initially, and then each time the job is run.
+ Next(time.Time) time.Time
+}
+
+// Entry consists of a schedule and the func to execute on that schedule.
+type Entry struct {
+ // The schedule on which this job should be run.
+ Schedule Schedule
+
+ // The next time the job will run. This is the zero time if Cron has not been
+ // started or this entry's schedule is unsatisfiable
+ Next time.Time
+
+ // The last time this job was run. This is the zero time if the job has never
+ // been run.
+ Prev time.Time
+
+ // The Job to run.
+ Job Job
+}
+
+// byTime is a wrapper for sorting the entry array by time
+// (with zero time at the end).
+type byTime []*Entry
+
+func (s byTime) Len() int { return len(s) }
+func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byTime) Less(i, j int) bool {
+ // Two zero times should return false.
+ // Otherwise, zero is "greater" than any other time.
+ // (To sort it at the end of the list.)
+ if s[i].Next.IsZero() {
+ return false
+ }
+ if s[j].Next.IsZero() {
+ return true
+ }
+ return s[i].Next.Before(s[j].Next)
+}
+
+// New returns a new Cron job runner, in the Local time zone.
+func New() *Cron {
+ return NewWithLocation(time.Now().Location())
+}
+
+// NewWithLocation returns a new Cron job runner.
+func NewWithLocation(location *time.Location) *Cron {
+ return &Cron{
+ entries: nil,
+ add: make(chan *Entry),
+ stop: make(chan struct{}),
+ snapshot: make(chan []*Entry),
+ running: false,
+ ErrorLog: nil,
+ location: location,
+ }
+}
+
+// A wrapper that turns a func() into a cron.Job
+type FuncJob func()
+
+func (f FuncJob) Run() { f() }
+
+// AddFunc adds a func to the Cron to be run on the given schedule.
+func (c *Cron) AddFunc(spec string, cmd func()) error {
+ return c.AddJob(spec, FuncJob(cmd))
+}
+
+// AddJob adds a Job to the Cron to be run on the given schedule.
+func (c *Cron) AddJob(spec string, cmd Job) error {
+ schedule, err := Parse(spec)
+ if err != nil {
+ return err
+ }
+ c.Schedule(schedule, cmd)
+ return nil
+}
+
+// Schedule adds a Job to the Cron to be run on the given schedule.
+func (c *Cron) Schedule(schedule Schedule, cmd Job) {
+ entry := &Entry{
+ Schedule: schedule,
+ Job: cmd,
+ }
+ if !c.running {
+ c.entries = append(c.entries, entry)
+ return
+ }
+
+ c.add <- entry
+}
+
+// Entries returns a snapshot of the cron entries.
+func (c *Cron) Entries() []*Entry {
+ if c.running {
+ c.snapshot <- nil
+ x := <-c.snapshot
+ return x
+ }
+ return c.entrySnapshot()
+}
+
+// Location gets the time zone location
+func (c *Cron) Location() *time.Location {
+ return c.location
+}
+
+// Start the cron scheduler in its own go-routine, or no-op if already started.
+func (c *Cron) Start() {
+ if c.running {
+ return
+ }
+ c.running = true
+ go c.run()
+}
+
+// Run the cron scheduler, or no-op if already running.
+func (c *Cron) Run() {
+ if c.running {
+ return
+ }
+ c.running = true
+ c.run()
+}
+
+func (c *Cron) runWithRecovery(j Job) {
+ defer func() {
+ if r := recover(); r != nil {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ c.logf("cron: panic running job: %v\n%s", r, buf)
+ }
+ }()
+ j.Run()
+}
+
+// Run the scheduler. this is private just due to the need to synchronize
+// access to the 'running' state variable.
+func (c *Cron) run() {
+ // Figure out the next activation times for each entry.
+ now := c.now()
+ for _, entry := range c.entries {
+ entry.Next = entry.Schedule.Next(now)
+ }
+
+ for {
+ // Determine the next entry to run.
+ sort.Sort(byTime(c.entries))
+
+ var timer *time.Timer
+ if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
+ // If there are no entries yet, just sleep - it still handles new entries
+ // and stop requests.
+ timer = time.NewTimer(100000 * time.Hour)
+ } else {
+ timer = time.NewTimer(c.entries[0].Next.Sub(now))
+ }
+
+ for {
+ select {
+ case now = <-timer.C:
+ now = now.In(c.location)
+ // Run every entry whose next time was less than now
+ for _, e := range c.entries {
+ if e.Next.After(now) || e.Next.IsZero() {
+ break
+ }
+ go c.runWithRecovery(e.Job)
+ e.Prev = e.Next
+ e.Next = e.Schedule.Next(now)
+ }
+
+ case newEntry := <-c.add:
+ timer.Stop()
+ now = c.now()
+ newEntry.Next = newEntry.Schedule.Next(now)
+ c.entries = append(c.entries, newEntry)
+
+ case <-c.snapshot:
+ c.snapshot <- c.entrySnapshot()
+ continue
+
+ case <-c.stop:
+ timer.Stop()
+ return
+ }
+
+ break
+ }
+ }
+}
+
+// Logs an error to stderr or to the configured error log
+func (c *Cron) logf(format string, args ...interface{}) {
+ if c.ErrorLog != nil {
+ c.ErrorLog.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// Stop stops the cron scheduler if it is running; otherwise it does nothing.
+func (c *Cron) Stop() {
+ if !c.running {
+ return
+ }
+ c.stop <- struct{}{}
+ c.running = false
+}
+
+// entrySnapshot returns a copy of the current cron entry list.
+func (c *Cron) entrySnapshot() []*Entry {
+ entries := []*Entry{}
+ for _, e := range c.entries {
+ entries = append(entries, &Entry{
+ Schedule: e.Schedule,
+ Next: e.Next,
+ Prev: e.Prev,
+ Job: e.Job,
+ })
+ }
+ return entries
+}
+
+// now returns current time in c location
+func (c *Cron) now() time.Time {
+ return time.Now().In(c.location)
+}
diff --git a/vendor/github.com/robfig/cron/doc.go b/vendor/github.com/robfig/cron/doc.go
new file mode 100644
index 00000000000..d02ec2f3b56
--- /dev/null
+++ b/vendor/github.com/robfig/cron/doc.go
@@ -0,0 +1,129 @@
+/*
+Package cron implements a cron spec parser and job runner.
+
+Usage
+
+Callers may register Funcs to be invoked on a given schedule. Cron will run
+them in their own goroutines.
+
+ c := cron.New()
+ c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") })
+ c.AddFunc("@hourly", func() { fmt.Println("Every hour") })
+ c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") })
+ c.Start()
+ ..
+ // Funcs are invoked in their own goroutine, asynchronously.
+ ...
+ // Funcs may also be added to a running Cron
+ c.AddFunc("@daily", func() { fmt.Println("Every day") })
+ ..
+ // Inspect the cron job entries' next and previous run times.
+ inspect(c.Entries())
+ ..
+ c.Stop() // Stop the scheduler (does not stop any jobs already running).
+
+CRON Expression Format
+
+A cron expression represents a set of times, using 6 space-separated fields.
+
+ Field name | Mandatory? | Allowed values | Allowed special characters
+ ---------- | ---------- | -------------- | --------------------------
+ Seconds | Yes | 0-59 | * / , -
+ Minutes | Yes | 0-59 | * / , -
+ Hours | Yes | 0-23 | * / , -
+ Day of month | Yes | 1-31 | * / , - ?
+ Month | Yes | 1-12 or JAN-DEC | * / , -
+ Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
+
+Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun",
+and "sun" are equally accepted.
+
+Special Characters
+
+Asterisk ( * )
+
+The asterisk indicates that the cron expression will match for all values of the
+field; e.g., using an asterisk in the 5th field (month) would indicate every
+month.
+
+Slash ( / )
+
+Slashes are used to describe increments of ranges. For example 3-59/15 in the
+1st field (minutes) would indicate the 3rd minute of the hour and every 15
+minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
+that is, an increment over the largest possible range of the field. The form
+"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
+increment until the end of that specific range. It does not wrap around.
+
+Comma ( , )
+
+Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
+the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
+
+Hyphen ( - )
+
+Hyphens are used to define ranges. For example, 9-17 would indicate every
+hour between 9am and 5pm inclusive.
+
+Question mark ( ? )
+
+Question mark may be used instead of '*' for leaving either day-of-month or
+day-of-week blank.
+
+Predefined schedules
+
+You may use one of several pre-defined schedules in place of a cron expression.
+
+ Entry | Description | Equivalent To
+ ----- | ----------- | -------------
+ @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 *
+ @monthly | Run once a month, midnight, first of month | 0 0 0 1 * *
+ @weekly | Run once a week, midnight between Sat/Sun | 0 0 0 * * 0
+ @daily (or @midnight) | Run once a day, midnight | 0 0 0 * * *
+ @hourly | Run once an hour, beginning of hour | 0 0 * * * *
+
+Intervals
+
+You may also schedule a job to execute at fixed intervals, starting at the time it's added
+or cron is run. This is supported by formatting the cron spec like this:
+
+ @every
+
+where "duration" is a string accepted by time.ParseDuration
+(http://golang.org/pkg/time/#ParseDuration).
+
+For example, "@every 1h30m10s" would indicate a schedule that activates after
+1 hour, 30 minutes, 10 seconds, and then every interval after that.
+
+Note: The interval does not take the job runtime into account. For example,
+if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
+it will have only 2 minutes of idle time between each run.
+
+Time zones
+
+All interpretation and scheduling is done in the machine's local time zone (as
+provided by the Go time package (http://www.golang.org/pkg/time).
+
+Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
+not be run!
+
+Thread safety
+
+Since the Cron service runs concurrently with the calling code, some amount of
+care must be taken to ensure proper synchronization.
+
+All cron methods are designed to be correctly synchronized as long as the caller
+ensures that invocations have a clear happens-before ordering between them.
+
+Implementation
+
+Cron entries are stored in an array, sorted by their next activation time. Cron
+sleeps until the next job is due to be run.
+
+Upon waking:
+ - it runs each entry that is active on that second
+ - it calculates the next run times for the jobs that were run
+ - it re-sorts the array of entries by next activation time.
+ - it goes to sleep until the soonest job.
+*/
+package cron
diff --git a/vendor/github.com/robfig/cron/parser.go b/vendor/github.com/robfig/cron/parser.go
new file mode 100644
index 00000000000..a5e83c0a8dc
--- /dev/null
+++ b/vendor/github.com/robfig/cron/parser.go
@@ -0,0 +1,380 @@
+package cron
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Configuration options for creating a parser. Most options specify which
+// fields should be included, while others enable features. If a field is not
+// included the parser will assume a default value. These options do not change
+// the order fields are parse in.
+type ParseOption int
+
+const (
+ Second ParseOption = 1 << iota // Seconds field, default 0
+ Minute // Minutes field, default 0
+ Hour // Hours field, default 0
+ Dom // Day of month field, default *
+ Month // Month field, default *
+ Dow // Day of week field, default *
+ DowOptional // Optional day of week field, default *
+ Descriptor // Allow descriptors such as @monthly, @weekly, etc.
+)
+
+var places = []ParseOption{
+ Second,
+ Minute,
+ Hour,
+ Dom,
+ Month,
+ Dow,
+}
+
+var defaults = []string{
+ "0",
+ "0",
+ "0",
+ "*",
+ "*",
+ "*",
+}
+
+// A custom Parser that can be configured.
+type Parser struct {
+ options ParseOption
+ optionals int
+}
+
+// Creates a custom Parser with custom options.
+//
+// // Standard parser without descriptors
+// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
+// sched, err := specParser.Parse("0 0 15 */3 *")
+//
+// // Same as above, just excludes time fields
+// subsParser := NewParser(Dom | Month | Dow)
+// sched, err := specParser.Parse("15 */3 *")
+//
+// // Same as above, just makes Dow optional
+// subsParser := NewParser(Dom | Month | DowOptional)
+// sched, err := specParser.Parse("15 */3")
+//
+func NewParser(options ParseOption) Parser {
+ optionals := 0
+ if options&DowOptional > 0 {
+ options |= Dow
+ optionals++
+ }
+ return Parser{options, optionals}
+}
+
+// Parse returns a new crontab schedule representing the given spec.
+// It returns a descriptive error if the spec is not valid.
+// It accepts crontab specs and features configured by NewParser.
+func (p Parser) Parse(spec string) (Schedule, error) {
+ if len(spec) == 0 {
+ return nil, fmt.Errorf("Empty spec string")
+ }
+ if spec[0] == '@' && p.options&Descriptor > 0 {
+ return parseDescriptor(spec)
+ }
+
+ // Figure out how many fields we need
+ max := 0
+ for _, place := range places {
+ if p.options&place > 0 {
+ max++
+ }
+ }
+ min := max - p.optionals
+
+ // Split fields on whitespace
+ fields := strings.Fields(spec)
+
+ // Validate number of fields
+ if count := len(fields); count < min || count > max {
+ if min == max {
+ return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec)
+ }
+ return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec)
+ }
+
+ // Fill in missing fields
+ fields = expandFields(fields, p.options)
+
+ var err error
+ field := func(field string, r bounds) uint64 {
+ if err != nil {
+ return 0
+ }
+ var bits uint64
+ bits, err = getField(field, r)
+ return bits
+ }
+
+ var (
+ second = field(fields[0], seconds)
+ minute = field(fields[1], minutes)
+ hour = field(fields[2], hours)
+ dayofmonth = field(fields[3], dom)
+ month = field(fields[4], months)
+ dayofweek = field(fields[5], dow)
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &SpecSchedule{
+ Second: second,
+ Minute: minute,
+ Hour: hour,
+ Dom: dayofmonth,
+ Month: month,
+ Dow: dayofweek,
+ }, nil
+}
+
+func expandFields(fields []string, options ParseOption) []string {
+ n := 0
+ count := len(fields)
+ expFields := make([]string, len(places))
+ copy(expFields, defaults)
+ for i, place := range places {
+ if options&place > 0 {
+ expFields[i] = fields[n]
+ n++
+ }
+ if n == count {
+ break
+ }
+ }
+ return expFields
+}
+
+var standardParser = NewParser(
+ Minute | Hour | Dom | Month | Dow | Descriptor,
+)
+
+// ParseStandard returns a new crontab schedule representing the given standardSpec
+// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always
+// pass 5 entries representing: minute, hour, day of month, month and day of week,
+// in that order. It returns a descriptive error if the spec is not valid.
+//
+// It accepts
+// - Standard crontab specs, e.g. "* * * * ?"
+// - Descriptors, e.g. "@midnight", "@every 1h30m"
+func ParseStandard(standardSpec string) (Schedule, error) {
+ return standardParser.Parse(standardSpec)
+}
+
+var defaultParser = NewParser(
+ Second | Minute | Hour | Dom | Month | DowOptional | Descriptor,
+)
+
+// Parse returns a new crontab schedule representing the given spec.
+// It returns a descriptive error if the spec is not valid.
+//
+// It accepts
+// - Full crontab specs, e.g. "* * * * * ?"
+// - Descriptors, e.g. "@midnight", "@every 1h30m"
+func Parse(spec string) (Schedule, error) {
+ return defaultParser.Parse(spec)
+}
+
+// getField returns an Int with the bits set representing all of the times that
+// the field represents or error parsing field value. A "field" is a comma-separated
+// list of "ranges".
+func getField(field string, r bounds) (uint64, error) {
+ var bits uint64
+ ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
+ for _, expr := range ranges {
+ bit, err := getRange(expr, r)
+ if err != nil {
+ return bits, err
+ }
+ bits |= bit
+ }
+ return bits, nil
+}
+
+// getRange returns the bits indicated by the given expression:
+// number | number "-" number [ "/" number ]
+// or error parsing range.
+func getRange(expr string, r bounds) (uint64, error) {
+ var (
+ start, end, step uint
+ rangeAndStep = strings.Split(expr, "/")
+ lowAndHigh = strings.Split(rangeAndStep[0], "-")
+ singleDigit = len(lowAndHigh) == 1
+ err error
+ )
+
+ var extra uint64
+ if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
+ start = r.min
+ end = r.max
+ extra = starBit
+ } else {
+ start, err = parseIntOrName(lowAndHigh[0], r.names)
+ if err != nil {
+ return 0, err
+ }
+ switch len(lowAndHigh) {
+ case 1:
+ end = start
+ case 2:
+ end, err = parseIntOrName(lowAndHigh[1], r.names)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("Too many hyphens: %s", expr)
+ }
+ }
+
+ switch len(rangeAndStep) {
+ case 1:
+ step = 1
+ case 2:
+ step, err = mustParseInt(rangeAndStep[1])
+ if err != nil {
+ return 0, err
+ }
+
+ // Special handling: "N/step" means "N-max/step".
+ if singleDigit {
+ end = r.max
+ }
+ default:
+ return 0, fmt.Errorf("Too many slashes: %s", expr)
+ }
+
+ if start < r.min {
+ return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
+ }
+ if end > r.max {
+ return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr)
+ }
+ if start > end {
+ return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
+ }
+ if step == 0 {
+ return 0, fmt.Errorf("Step of range should be a positive number: %s", expr)
+ }
+
+ return getBits(start, end, step) | extra, nil
+}
+
+// parseIntOrName returns the (possibly-named) integer contained in expr.
+func parseIntOrName(expr string, names map[string]uint) (uint, error) {
+ if names != nil {
+ if namedInt, ok := names[strings.ToLower(expr)]; ok {
+ return namedInt, nil
+ }
+ }
+ return mustParseInt(expr)
+}
+
+// mustParseInt parses the given expression as an int or returns an error.
+func mustParseInt(expr string) (uint, error) {
+ num, err := strconv.Atoi(expr)
+ if err != nil {
+ return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err)
+ }
+ if num < 0 {
+ return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr)
+ }
+
+ return uint(num), nil
+}
+
+// getBits sets all bits in the range [min, max], modulo the given step size.
+func getBits(min, max, step uint) uint64 {
+ var bits uint64
+
+ // If step is 1, use shifts.
+ if step == 1 {
+ return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
+ }
+
+ // Else, use a simple loop.
+ for i := min; i <= max; i += step {
+ bits |= 1 << i
+ }
+ return bits
+}
+
+// all returns all bits within the given bounds. (plus the star bit)
+func all(r bounds) uint64 {
+ return getBits(r.min, r.max, 1) | starBit
+}
+
+// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
+func parseDescriptor(descriptor string) (Schedule, error) {
+ switch descriptor {
+ case "@yearly", "@annually":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: 1 << dom.min,
+ Month: 1 << months.min,
+ Dow: all(dow),
+ }, nil
+
+ case "@monthly":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: 1 << dom.min,
+ Month: all(months),
+ Dow: all(dow),
+ }, nil
+
+ case "@weekly":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: all(dom),
+ Month: all(months),
+ Dow: 1 << dow.min,
+ }, nil
+
+ case "@daily", "@midnight":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: all(dom),
+ Month: all(months),
+ Dow: all(dow),
+ }, nil
+
+ case "@hourly":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: all(hours),
+ Dom: all(dom),
+ Month: all(months),
+ Dow: all(dow),
+ }, nil
+ }
+
+ const every = "@every "
+ if strings.HasPrefix(descriptor, every) {
+ duration, err := time.ParseDuration(descriptor[len(every):])
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err)
+ }
+ return Every(duration), nil
+ }
+
+ return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor)
+}
diff --git a/vendor/github.com/robfig/cron/spec.go b/vendor/github.com/robfig/cron/spec.go
new file mode 100644
index 00000000000..aac9a60b954
--- /dev/null
+++ b/vendor/github.com/robfig/cron/spec.go
@@ -0,0 +1,158 @@
+package cron
+
+import "time"
+
+// SpecSchedule specifies a duty cycle (to the second granularity), based on a
+// traditional crontab specification. It is computed initially and stored as bit sets.
+type SpecSchedule struct {
+ Second, Minute, Hour, Dom, Month, Dow uint64
+}
+
+// bounds provides a range of acceptable values (plus a map of name to value).
+type bounds struct {
+ min, max uint
+ names map[string]uint
+}
+
+// The bounds for each field.
+var (
+ seconds = bounds{0, 59, nil}
+ minutes = bounds{0, 59, nil}
+ hours = bounds{0, 23, nil}
+ dom = bounds{1, 31, nil}
+ months = bounds{1, 12, map[string]uint{
+ "jan": 1,
+ "feb": 2,
+ "mar": 3,
+ "apr": 4,
+ "may": 5,
+ "jun": 6,
+ "jul": 7,
+ "aug": 8,
+ "sep": 9,
+ "oct": 10,
+ "nov": 11,
+ "dec": 12,
+ }}
+ dow = bounds{0, 6, map[string]uint{
+ "sun": 0,
+ "mon": 1,
+ "tue": 2,
+ "wed": 3,
+ "thu": 4,
+ "fri": 5,
+ "sat": 6,
+ }}
+)
+
+const (
+ // Set the top bit if a star was included in the expression.
+ starBit = 1 << 63
+)
+
+// Next returns the next time this schedule is activated, greater than the given
+// time. If no time can be found to satisfy the schedule, return the zero time.
+func (s *SpecSchedule) Next(t time.Time) time.Time {
+ // General approach:
+ // For Month, Day, Hour, Minute, Second:
+ // Check if the time value matches. If yes, continue to the next field.
+ // If the field doesn't match the schedule, then increment the field until it matches.
+ // While incrementing the field, a wrap-around brings it back to the beginning
+ // of the field list (since it is necessary to re-verify previous field
+ // values)
+
+ // Start at the earliest possible time (the upcoming second).
+ t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
+
+ // This flag indicates whether a field has been incremented.
+ added := false
+
+ // If no time is found within five years, return zero.
+ yearLimit := t.Year() + 5
+
+WRAP:
+ if t.Year() > yearLimit {
+ return time.Time{}
+ }
+
+ // Find the first applicable month.
+ // If it's this month, then do nothing.
+ for 1< 0
+ dowMatch bool = 1< 0
+ )
+ if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
+ return domMatch && dowMatch
+ }
+ return domMatch || dowMatch
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go
index 50d918b8788..feb1157b15b 100644
--- a/vendor/golang.org/x/oauth2/google/appengine.go
+++ b/vendor/golang.org/x/oauth2/google/appengine.go
@@ -5,85 +5,34 @@
package google
import (
- "sort"
- "strings"
- "sync"
+ "context"
"time"
- "golang.org/x/net/context"
"golang.org/x/oauth2"
)
-// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex.
-var appengineFlex bool
-
-// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible.
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
-// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible.
var appengineAppIDFunc func(c context.Context) string
-// AppEngineTokenSource returns a token source that fetches tokens
-// issued to the current App Engine application's service account.
-// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
-// that involves user accounts, see oauth2.Config instead.
+// AppEngineTokenSource returns a token source that fetches tokens from either
+// the current application's service account or from the metadata server,
+// depending on the App Engine environment. See below for environment-specific
+// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that
+// involves user accounts, see oauth2.Config instead.
+//
+// First generation App Engine runtimes (<= Go 1.9):
+// AppEngineTokenSource returns a token source that fetches tokens issued to the
+// current App Engine application's service account. The provided context must have
+// come from appengine.NewContext.
//
-// The provided context must have come from appengine.NewContext.
+// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible:
+// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the
+// flexible environment. It delegates to ComputeTokenSource, and the provided
+// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource,
+// which DefaultTokenSource will use in this case) instead.
func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
- if appengineTokenFunc == nil {
- panic("google: AppEngineTokenSource can only be used on App Engine.")
- }
- scopes := append([]string{}, scope...)
- sort.Strings(scopes)
- return &appEngineTokenSource{
- ctx: ctx,
- scopes: scopes,
- key: strings.Join(scopes, " "),
- }
-}
-
-// aeTokens helps the fetched tokens to be reused until their expiration.
-var (
- aeTokensMu sync.Mutex
- aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
-)
-
-type tokenLock struct {
- mu sync.Mutex // guards t; held while fetching or updating t
- t *oauth2.Token
-}
-
-type appEngineTokenSource struct {
- ctx context.Context
- scopes []string
- key string // to aeTokens map; space-separated scopes
-}
-
-func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
- if appengineTokenFunc == nil {
- panic("google: AppEngineTokenSource can only be used on App Engine.")
- }
-
- aeTokensMu.Lock()
- tok, ok := aeTokens[ts.key]
- if !ok {
- tok = &tokenLock{}
- aeTokens[ts.key] = tok
- }
- aeTokensMu.Unlock()
-
- tok.mu.Lock()
- defer tok.mu.Unlock()
- if tok.t.Valid() {
- return tok.t, nil
- }
- access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
- if err != nil {
- return nil, err
- }
- tok.t = &oauth2.Token{
- AccessToken: access,
- Expiry: exp,
- }
- return tok.t, nil
+ return appEngineTokenSource(ctx, scope...)
}
diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go
new file mode 100644
index 00000000000..83dacac320a
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go
@@ -0,0 +1,77 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+// This file applies to App Engine first generation runtimes (<= Go 1.9).
+
+package google
+
+import (
+ "context"
+ "sort"
+ "strings"
+ "sync"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/appengine"
+)
+
+func init() {
+ appengineTokenFunc = appengine.AccessToken
+ appengineAppIDFunc = appengine.AppID
+}
+
+// See comment on AppEngineTokenSource in appengine.go.
+func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+ scopes := append([]string{}, scope...)
+ sort.Strings(scopes)
+ return &gaeTokenSource{
+ ctx: ctx,
+ scopes: scopes,
+ key: strings.Join(scopes, " "),
+ }
+}
+
+// aeTokens helps the fetched tokens to be reused until their expiration.
+var (
+ aeTokensMu sync.Mutex
+ aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
+)
+
+type tokenLock struct {
+ mu sync.Mutex // guards t; held while fetching or updating t
+ t *oauth2.Token
+}
+
+type gaeTokenSource struct {
+ ctx context.Context
+ scopes []string
+ key string // to aeTokens map; space-separated scopes
+}
+
+func (ts *gaeTokenSource) Token() (*oauth2.Token, error) {
+ aeTokensMu.Lock()
+ tok, ok := aeTokens[ts.key]
+ if !ok {
+ tok = &tokenLock{}
+ aeTokens[ts.key] = tok
+ }
+ aeTokensMu.Unlock()
+
+ tok.mu.Lock()
+ defer tok.mu.Unlock()
+ if tok.t.Valid() {
+ return tok.t, nil
+ }
+ access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
+ if err != nil {
+ return nil, err
+ }
+ tok.t = &oauth2.Token{
+ AccessToken: access,
+ Expiry: exp,
+ }
+ return tok.t, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
new file mode 100644
index 00000000000..04c2c2216af
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible.
+
+package google
+
+import (
+ "context"
+ "log"
+ "sync"
+
+ "golang.org/x/oauth2"
+)
+
+var logOnce sync.Once // only spam about deprecation once
+
+// See comment on AppEngineTokenSource in appengine.go.
+func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+ logOnce.Do(func() {
+ log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.")
+ })
+ return ComputeTokenSource("")
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go
deleted file mode 100644
index 56669eaa98d..00000000000
--- a/vendor/golang.org/x/oauth2/google/appengine_hook.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine appenginevm
-
-package google
-
-import "google.golang.org/appengine"
-
-func init() {
- appengineTokenFunc = appengine.AccessToken
- appengineAppIDFunc = appengine.AppID
-}
diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go
deleted file mode 100644
index 5d0231af2dd..00000000000
--- a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appenginevm
-
-package google
-
-func init() {
- appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server.
-}
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
index a31607437d3..ad2c09236c5 100644
--- a/vendor/golang.org/x/oauth2/google/default.go
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -5,6 +5,7 @@
package google
import (
+ "context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -14,10 +15,28 @@ import (
"runtime"
"cloud.google.com/go/compute/metadata"
- "golang.org/x/net/context"
"golang.org/x/oauth2"
)
+// Credentials holds Google credentials, including "Application Default Credentials".
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+type Credentials struct {
+ ProjectID string // may be empty
+ TokenSource oauth2.TokenSource
+
+ // JSON contains the raw bytes from a JSON credentials file.
+ // This field may be nil if authentication is provided by the
+ // environment and not with a credentials file, e.g. when code is
+ // running on Google Cloud Platform.
+ JSON []byte
+}
+
+// DefaultCredentials is the old name of Credentials.
+//
+// Deprecated: use Credentials instead.
+type DefaultCredentials = Credentials
+
// DefaultClient returns an HTTP Client that uses the
// DefaultTokenSource to obtain authentication credentials.
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
@@ -39,8 +58,22 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc
return creds.TokenSource, nil
}
-// Common implementation for FindDefaultCredentials.
-func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCredentials, error) {
+// FindDefaultCredentials searches for "Application Default Credentials".
+//
+// It looks for credentials in the following places,
+// preferring the first location found:
+//
+// 1. A JSON file whose path is specified by the
+// GOOGLE_APPLICATION_CREDENTIALS environment variable.
+// 2. A JSON file in a location known to the gcloud command-line tool.
+// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
+// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
+// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses
+// the appengine.AccessToken function.
+// 4. On Google Compute Engine, Google App Engine standard second generation runtimes
+// (>= Go 1.11), and Google App Engine flexible environment, it fetches
+// credentials from the metadata server.
+func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) {
// First, try the environment variable.
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
if filename := os.Getenv(envVar); filename != "" {
@@ -59,20 +92,23 @@ func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCrede
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
}
- // Third, if we're on Google App Engine use those credentials.
- if appengineTokenFunc != nil && !appengineFlex {
+ // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9)
+ // use those credentials. App Engine standard second generation runtimes (>= Go 1.11)
+ // and App Engine flexible use ComputeTokenSource and the metadata server.
+ if appengineTokenFunc != nil {
return &DefaultCredentials{
ProjectID: appengineAppIDFunc(ctx),
TokenSource: AppEngineTokenSource(ctx, scopes...),
}, nil
}
- // Fourth, if we're on Google Compute Engine use the metadata server.
+ // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime,
+ // or App Engine flexible, use the metadata server.
if metadata.OnGCE() {
id, _ := metadata.ProjectID()
return &DefaultCredentials{
ProjectID: id,
- TokenSource: ComputeTokenSource(""),
+ TokenSource: ComputeTokenSource("", scopes...),
}, nil
}
@@ -81,8 +117,11 @@ func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCrede
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
}
-// Common implementation for CredentialsFromJSON.
-func credentialsFromJSON(ctx context.Context, jsonData []byte, scopes []string) (*DefaultCredentials, error) {
+// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can
+// represent either a Google Developers Console client_credentials.json file (as in
+// ConfigFromJSON) or a Google Developers service account key file (as in
+// JWTConfigFromJSON).
+func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) {
var f credentialsFile
if err := json.Unmarshal(jsonData, &f); err != nil {
return nil, err
diff --git a/vendor/golang.org/x/oauth2/google/doc_go19.go b/vendor/golang.org/x/oauth2/google/doc.go
similarity index 99%
rename from vendor/golang.org/x/oauth2/google/doc_go19.go
rename to vendor/golang.org/x/oauth2/google/doc.go
index 2a86325fe3b..73be629033d 100644
--- a/vendor/golang.org/x/oauth2/google/doc_go19.go
+++ b/vendor/golang.org/x/oauth2/google/doc.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build go1.9
-
// Package google provides support for making OAuth2 authorized and authenticated
// HTTP requests to Google APIs. It supports the Web server flow, client-side
// credentials, service accounts, Google Compute Engine service accounts, and Google
diff --git a/vendor/golang.org/x/oauth2/google/doc_not_go19.go b/vendor/golang.org/x/oauth2/google/doc_not_go19.go
deleted file mode 100644
index 5c3c6e14812..00000000000
--- a/vendor/golang.org/x/oauth2/google/doc_not_go19.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.9
-
-// Package google provides support for making OAuth2 authorized and authenticated
-// HTTP requests to Google APIs. It supports the Web server flow, client-side
-// credentials, service accounts, Google Compute Engine service accounts, and Google
-// App Engine service accounts.
-//
-// A brief overview of the package follows. For more information, please read
-// https://developers.google.com/accounts/docs/OAuth2
-// and
-// https://developers.google.com/accounts/docs/application-default-credentials.
-//
-// OAuth2 Configs
-//
-// Two functions in this package return golang.org/x/oauth2.Config values from Google credential
-// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON,
-// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or
-// create an http.Client.
-//
-//
-// Credentials
-//
-// The DefaultCredentials type represents Google Application Default Credentials, as
-// well as other forms of credential.
-//
-// Use FindDefaultCredentials to obtain Application Default Credentials.
-// FindDefaultCredentials looks in some well-known places for a credentials file, and
-// will call AppEngineTokenSource or ComputeTokenSource as needed.
-//
-// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials,
-// then use the credentials to construct an http.Client or an oauth2.TokenSource.
-//
-// Use CredentialsFromJSON to obtain credentials from either of the two JSON
-// formats described in OAuth2 Configs, above. (The DefaultCredentials returned may
-// not be "Application Default Credentials".) The TokenSource in the returned value
-// is the same as the one obtained from the oauth2.Config returned from
-// ConfigFromJSON or JWTConfigFromJSON, but the DefaultCredentials may contain
-// additional information that is useful is some circumstances.
-package google // import "golang.org/x/oauth2/google"
diff --git a/vendor/golang.org/x/oauth2/google/go19.go b/vendor/golang.org/x/oauth2/google/go19.go
deleted file mode 100644
index 4d0318b1e16..00000000000
--- a/vendor/golang.org/x/oauth2/google/go19.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.9
-
-package google
-
-import (
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
-)
-
-// Credentials holds Google credentials, including "Application Default Credentials".
-// For more details, see:
-// https://developers.google.com/accounts/docs/application-default-credentials
-type Credentials struct {
- ProjectID string // may be empty
- TokenSource oauth2.TokenSource
-
- // JSON contains the raw bytes from a JSON credentials file.
- // This field may be nil if authentication is provided by the
- // environment and not with a credentials file, e.g. when code is
- // running on Google Cloud Platform.
- JSON []byte
-}
-
-// DefaultCredentials is the old name of Credentials.
-//
-// Deprecated: use Credentials instead.
-type DefaultCredentials = Credentials
-
-// FindDefaultCredentials searches for "Application Default Credentials".
-//
-// It looks for credentials in the following places,
-// preferring the first location found:
-//
-// 1. A JSON file whose path is specified by the
-// GOOGLE_APPLICATION_CREDENTIALS environment variable.
-// 2. A JSON file in a location known to the gcloud command-line tool.
-// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
-// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
-// 3. On Google App Engine it uses the appengine.AccessToken function.
-// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
-// credentials from the metadata server.
-// (In this final case any provided scopes are ignored.)
-func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) {
- return findDefaultCredentials(ctx, scopes)
-}
-
-// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can
-// represent either a Google Developers Console client_credentials.json file (as in
-// ConfigFromJSON) or a Google Developers service account key file (as in
-// JWTConfigFromJSON).
-func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) {
- return credentialsFromJSON(ctx, jsonData, scopes)
-}
diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go
index f7481fbcc63..6eb2aa95f5b 100644
--- a/vendor/golang.org/x/oauth2/google/google.go
+++ b/vendor/golang.org/x/oauth2/google/google.go
@@ -5,26 +5,28 @@
package google
import (
+ "context"
"encoding/json"
"errors"
"fmt"
+ "net/url"
"strings"
"time"
"cloud.google.com/go/compute/metadata"
- "golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
)
// Endpoint is Google's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
- AuthURL: "https://accounts.google.com/o/oauth2/auth",
- TokenURL: "https://accounts.google.com/o/oauth2/token",
+ AuthURL: "https://accounts.google.com/o/oauth2/auth",
+ TokenURL: "https://oauth2.googleapis.com/token",
+ AuthStyle: oauth2.AuthStyleInParams,
}
// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
-const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+const JWTTokenURL = "https://oauth2.googleapis.com/token"
// ConfigFromJSON uses a Google Developers Console client_credentials.json
// file to construct a config.
@@ -150,14 +152,16 @@ func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oau
// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
// this token source if your program is running on a GCE instance.
// If no account is specified, "default" is used.
+// If no scopes are specified, a set of default scopes are automatically granted.
// Further information about retrieving access tokens from the GCE metadata
// server can be found at https://cloud.google.com/compute/docs/authentication.
-func ComputeTokenSource(account string) oauth2.TokenSource {
- return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope})
}
type computeSource struct {
account string
+ scopes []string
}
func (cs computeSource) Token() (*oauth2.Token, error) {
@@ -168,7 +172,13 @@ func (cs computeSource) Token() (*oauth2.Token, error) {
if acct == "" {
acct = "default"
}
- tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+ tokenURI := "instance/service-accounts/" + acct + "/token"
+ if len(cs.scopes) > 0 {
+ v := url.Values{}
+ v.Set("scopes", strings.Join(cs.scopes, ","))
+ tokenURI = tokenURI + "?" + v.Encode()
+ }
+ tokenJSON, err := metadata.Get(tokenURI)
if err != nil {
return nil, err
}
diff --git a/vendor/golang.org/x/oauth2/google/not_go19.go b/vendor/golang.org/x/oauth2/google/not_go19.go
deleted file mode 100644
index 544e40624e1..00000000000
--- a/vendor/golang.org/x/oauth2/google/not_go19.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.9
-
-package google
-
-import (
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
-)
-
-// DefaultCredentials holds Google credentials, including "Application Default Credentials".
-// For more details, see:
-// https://developers.google.com/accounts/docs/application-default-credentials
-type DefaultCredentials struct {
- ProjectID string // may be empty
- TokenSource oauth2.TokenSource
-
- // JSON contains the raw bytes from a JSON credentials file.
- // This field may be nil if authentication is provided by the
- // environment and not with a credentials file, e.g. when code is
- // running on Google Cloud Platform.
- JSON []byte
-}
-
-// FindDefaultCredentials searches for "Application Default Credentials".
-//
-// It looks for credentials in the following places,
-// preferring the first location found:
-//
-// 1. A JSON file whose path is specified by the
-// GOOGLE_APPLICATION_CREDENTIALS environment variable.
-// 2. A JSON file in a location known to the gcloud command-line tool.
-// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
-// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
-// 3. On Google App Engine it uses the appengine.AccessToken function.
-// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
-// credentials from the metadata server.
-// (In this final case any provided scopes are ignored.)
-func FindDefaultCredentials(ctx context.Context, scopes ...string) (*DefaultCredentials, error) {
- return findDefaultCredentials(ctx, scopes)
-}
-
-// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can
-// represent either a Google Developers Console client_credentials.json file (as in
-// ConfigFromJSON) or a Google Developers service account key file (as in
-// JWTConfigFromJSON).
-//
-// Note: despite the name, the returned credentials may not be Application Default Credentials.
-func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*DefaultCredentials, error) {
- return credentialsFromJSON(ctx, jsonData, scopes)
-}
diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go
index b9660caddf0..456224bc789 100644
--- a/vendor/golang.org/x/oauth2/google/sdk.go
+++ b/vendor/golang.org/x/oauth2/google/sdk.go
@@ -6,6 +6,7 @@ package google
import (
"bufio"
+ "context"
"encoding/json"
"errors"
"fmt"
@@ -18,7 +19,6 @@ import (
"strings"
"time"
- "golang.org/x/net/context"
"golang.org/x/oauth2"
)
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
index fc63fcab3ff..c0ab196cf46 100644
--- a/vendor/golang.org/x/oauth2/internal/oauth2.go
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -26,7 +26,7 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) {
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
- return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+ return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err)
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
index 30fb315d139..355c386961d 100644
--- a/vendor/golang.org/x/oauth2/internal/token.go
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -5,19 +5,21 @@
package internal
import (
+ "context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
+ "math"
"mime"
"net/http"
"net/url"
"strconv"
"strings"
+ "sync"
"time"
- "golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
@@ -61,22 +63,21 @@ type tokenJSON struct {
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
- Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
}
func (e *tokenJSON) expiry() (t time.Time) {
if v := e.ExpiresIn; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
- if v := e.Expires; v != 0 {
- return time.Now().Add(time.Duration(v) * time.Second)
- }
return
}
type expirationTime int32
func (e *expirationTime) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 || string(b) == "null" {
+ return nil
+ }
var n json.Number
err := json.Unmarshal(b, &n)
if err != nil {
@@ -86,97 +87,78 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error {
if err != nil {
return err
}
+ if i > math.MaxInt32 {
+ i = math.MaxInt32
+ }
*e = expirationTime(i)
return nil
}
-var brokenAuthHeaderProviders = []string{
- "https://accounts.google.com/",
- "https://api.codeswholesale.com/oauth/token",
- "https://api.dropbox.com/",
- "https://api.dropboxapi.com/",
- "https://api.instagram.com/",
- "https://api.netatmo.net/",
- "https://api.odnoklassniki.ru/",
- "https://api.pushbullet.com/",
- "https://api.soundcloud.com/",
- "https://api.twitch.tv/",
- "https://app.box.com/",
- "https://connect.stripe.com/",
- "https://login.mailchimp.com/",
- "https://login.microsoftonline.com/",
- "https://login.salesforce.com/",
- "https://login.windows.net",
- "https://login.live.com/",
- "https://oauth.sandbox.trainingpeaks.com/",
- "https://oauth.trainingpeaks.com/",
- "https://oauth.vk.com/",
- "https://openapi.baidu.com/",
- "https://slack.com/",
- "https://test-sandbox.auth.corp.google.com",
- "https://test.salesforce.com/",
- "https://user.gini.net/",
- "https://www.douban.com/",
- "https://www.googleapis.com/",
- "https://www.linkedin.com/",
- "https://www.strava.com/oauth/",
- "https://www.wunderlist.com/oauth/",
- "https://api.patreon.com/",
- "https://sandbox.codeswholesale.com/oauth/token",
- "https://api.sipgate.com/v1/authorization/oauth",
- "https://api.medium.com/v1/tokens",
- "https://log.finalsurge.com/oauth/token",
- "https://multisport.todaysplan.com.au/rest/oauth/access_token",
- "https://whats.todaysplan.com.au/rest/oauth/access_token",
-}
+// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
+//
+// Deprecated: this function no longer does anything. Caller code that
+// wants to avoid potential extra HTTP requests made during
+// auto-probing of the provider's auth style should set
+// Endpoint.AuthStyle.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
+
+// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
+type AuthStyle int
-// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints.
-var brokenAuthHeaderDomains = []string{
- ".auth0.com",
- ".force.com",
- ".myshopify.com",
- ".okta.com",
- ".oktapreview.com",
+const (
+ AuthStyleUnknown AuthStyle = 0
+ AuthStyleInParams AuthStyle = 1
+ AuthStyleInHeader AuthStyle = 2
+)
+
+// authStyleCache is the set of tokenURLs we've successfully used via
+// RetrieveToken and which style auth we ended up using.
+// It's called a cache, but it doesn't (yet?) shrink. It's expected that
+// the set of OAuth2 servers a program contacts over time is fixed and
+// small.
+var authStyleCache struct {
+ sync.Mutex
+ m map[string]AuthStyle // keyed by tokenURL
}
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {
- brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
+// ResetAuthCache resets the global authentication style cache used
+// for AuthStyleUnknown token requests.
+func ResetAuthCache() {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ authStyleCache.m = nil
}
-// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
-// implements the OAuth2 spec correctly
-// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
-// In summary:
-// - Reddit only accepts client secret in the Authorization header
-// - Dropbox accepts either it in URL param or Auth header, but not both.
-// - Google only accepts URL param (not spec compliant?), not Auth header
-// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
-func providerAuthHeaderWorks(tokenURL string) bool {
- for _, s := range brokenAuthHeaderProviders {
- if strings.HasPrefix(tokenURL, s) {
- // Some sites fail to implement the OAuth2 spec fully.
- return false
- }
- }
+// lookupAuthStyle reports which auth style we last used with tokenURL
+// when calling RetrieveToken and whether we have ever done so.
+func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ style, ok = authStyleCache.m[tokenURL]
+ return
+}
- if u, err := url.Parse(tokenURL); err == nil {
- for _, s := range brokenAuthHeaderDomains {
- if strings.HasSuffix(u.Host, s) {
- return false
- }
- }
+// setAuthStyle adds an entry to authStyleCache, documented above.
+func setAuthStyle(tokenURL string, v AuthStyle) {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ if authStyleCache.m == nil {
+ authStyleCache.m = make(map[string]AuthStyle)
}
-
- // Assume the provider implements the spec properly
- // otherwise. We can add more exceptions as they're
- // discovered. We will _not_ be adding configurable hooks
- // to this package to let users select server bugs.
- return true
+ authStyleCache.m[tokenURL] = v
}
-func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) {
- bustedAuth := !providerAuthHeaderWorks(tokenURL)
- if bustedAuth {
+// newTokenRequest returns a new *http.Request to retrieve a new token
+// from tokenURL using the provided clientID, clientSecret, and POST
+// body parameters.
+//
+// inParams is whether the clientID & clientSecret should be encoded
+// as the POST body. An 'inParams' value of true means to send it in
+// the POST body (along with any values in v); false means to send it
+// in the Authorization header.
+func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) {
+ if authStyle == AuthStyleInParams {
+ v = cloneURLValues(v)
if clientID != "" {
v.Set("client_id", clientID)
}
@@ -189,15 +171,70 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- if !bustedAuth {
+ if authStyle == AuthStyleInHeader {
req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
}
+ return req, nil
+}
+
+func cloneURLValues(v url.Values) url.Values {
+ v2 := make(url.Values, len(v))
+ for k, vv := range v {
+ v2[k] = append([]string(nil), vv...)
+ }
+ return v2
+}
+
+func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) {
+ needsAuthStyleProbe := authStyle == 0
+ if needsAuthStyleProbe {
+ if style, ok := lookupAuthStyle(tokenURL); ok {
+ authStyle = style
+ needsAuthStyleProbe = false
+ } else {
+ authStyle = AuthStyleInHeader // the first way we'll try
+ }
+ }
+ req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
+ if err != nil {
+ return nil, err
+ }
+ token, err := doTokenRoundTrip(ctx, req)
+ if err != nil && needsAuthStyleProbe {
+ // If we get an error, assume the server wants the
+ // clientID & clientSecret in a different form.
+ // See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+ // In summary:
+ // - Reddit only accepts client secret in the Authorization header
+ // - Dropbox accepts either it in URL param or Auth header, but not both.
+ // - Google only accepts URL param (not spec compliant?), not Auth header
+ // - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+ //
+ // We used to maintain a big table in this code of all the sites and which way
+ // they went, but maintaining it didn't scale & got annoying.
+ // So just try both ways.
+ authStyle = AuthStyleInParams // the second way we'll try
+ req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
+ token, err = doTokenRoundTrip(ctx, req)
+ }
+ if needsAuthStyleProbe && err == nil {
+ setAuthStyle(tokenURL, authStyle)
+ }
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token != nil && token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ return token, err
+}
+
+func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
if err != nil {
return nil, err
}
- defer r.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ r.Body.Close()
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
@@ -223,12 +260,6 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
Raw: vals,
}
e := vals.Get("expires_in")
- if e == "" {
- // TODO(jbd): Facebook's OAuth2 implementation is broken and
- // returns expires_in field in expires. Remove the fallback to expires,
- // when Facebook fixes their implementation.
- e = vals.Get("expires")
- }
expires, _ := strconv.Atoi(e)
if expires != 0 {
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
@@ -247,13 +278,8 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
}
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
}
- // Don't overwrite `RefreshToken` with an empty value
- // if this was a token refreshing request.
- if token.RefreshToken == "" {
- token.RefreshToken = v.Get("refresh_token")
- }
if token.AccessToken == "" {
- return token, errors.New("oauth2: server response missing access_token")
+ return nil, errors.New("oauth2: server response missing access_token")
}
return token, nil
}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
index d16f9ae1fea..572074a637d 100644
--- a/vendor/golang.org/x/oauth2/internal/transport.go
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -5,9 +5,8 @@
package internal
import (
+ "context"
"net/http"
-
- "golang.org/x/net/context"
)
// HTTPClient is the context key to use with golang.org/x/net/context's
diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go
index e08f3159590..99f3e0a32c8 100644
--- a/vendor/golang.org/x/oauth2/jwt/jwt.go
+++ b/vendor/golang.org/x/oauth2/jwt/jwt.go
@@ -9,6 +9,7 @@
package jwt
import (
+ "context"
"encoding/json"
"fmt"
"io"
@@ -18,7 +19,6 @@ import (
"strings"
"time"
- "golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
"golang.org/x/oauth2/jws"
@@ -61,6 +61,11 @@ type Config struct {
// Expires optionally specifies how long the token is valid for.
Expires time.Duration
+
+ // Audience optionally specifies the intended audience of the
+ // request. If empty, the value of TokenURL is used as the
+ // intended audience.
+ Audience string
}
// TokenSource returns a JWT TokenSource using the configuration
@@ -105,6 +110,9 @@ func (js jwtSource) Token() (*oauth2.Token, error) {
if t := js.conf.Expires; t > 0 {
claimSet.Exp = time.Now().Add(t).Unix()
}
+ if aud := js.conf.Audience; aud != "" {
+ claimSet.Aud = aud
+ }
h := *defaultHeader
h.KeyID = js.conf.PrivateKeyID
payload, err := jws.Encode(&h, claimSet, pk)
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
index a047a5f98b6..428283f0b01 100644
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -3,19 +3,20 @@
// license that can be found in the LICENSE file.
// Package oauth2 provides support for making
-// OAuth2 authorized and authenticated HTTP requests.
+// OAuth2 authorized and authenticated HTTP requests,
+// as specified in RFC 6749.
// It can additionally grant authorization with Bearer JWT.
package oauth2 // import "golang.org/x/oauth2"
import (
"bytes"
+ "context"
"errors"
"net/http"
"net/url"
"strings"
"sync"
- "golang.org/x/net/context"
"golang.org/x/oauth2/internal"
)
@@ -25,17 +26,13 @@ import (
// Deprecated: Use context.Background() or context.TODO() instead.
var NoContext = context.TODO()
-// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
-// identified by the tokenURL prefix as an OAuth2 implementation
-// which doesn't support the HTTP Basic authentication
-// scheme to authenticate with the authorization server.
-// Once a server is registered, credentials (client_id and client_secret)
-// will be passed as query parameters rather than being present
-// in the Authorization header.
-// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {
- internal.RegisterBrokenAuthHeaderProvider(tokenURL)
-}
+// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
+//
+// Deprecated: this function no longer does anything. Caller code that
+// wants to avoid potential extra HTTP requests made during
+// auto-probing of the provider's auth style should set
+// Endpoint.AuthStyle.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
// Config describes a typical 3-legged OAuth2 flow, with both the
// client application information and the server's endpoint URLs.
@@ -70,13 +67,38 @@ type TokenSource interface {
Token() (*Token, error)
}
-// Endpoint contains the OAuth 2.0 provider's authorization and token
+// Endpoint represents an OAuth 2.0 provider's authorization and token
// endpoint URLs.
type Endpoint struct {
AuthURL string
TokenURL string
+
+ // AuthStyle optionally specifies how the endpoint wants the
+ // client ID & client secret sent. The zero value means to
+ // auto-detect.
+ AuthStyle AuthStyle
}
+// AuthStyle represents how requests for tokens are authenticated
+// to the server.
+type AuthStyle int
+
+const (
+ // AuthStyleAutoDetect means to auto-detect which authentication
+ // style the provider wants by trying both ways and caching
+ // the successful way for the future.
+ AuthStyleAutoDetect AuthStyle = 0
+
+ // AuthStyleInParams sends the "client_id" and "client_secret"
+ // in the POST body as application/x-www-form-urlencoded parameters.
+ AuthStyleInParams AuthStyle = 1
+
+ // AuthStyleInHeader sends the client_id and client_password
+ // using HTTP Basic Authorization. This is an optional style
+ // described in the OAuth2 RFC 6749 section 2.3.1.
+ AuthStyleInHeader AuthStyle = 2
+)
+
var (
// AccessTypeOnline and AccessTypeOffline are options passed
// to the Options.AuthCodeURL method. They modify the
@@ -123,6 +145,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
//
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
// as ApprovalForce.
+// It can also be used to pass the PKCE challenge.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
var buf bytes.Buffer
buf.WriteString(c.Endpoint.AuthURL)
@@ -161,8 +185,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
// and when other authorization grant types are not available."
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
//
-// The HTTP client to use is derived from the context.
-// If nil, http.DefaultClient is used.
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
v := url.Values{
"grant_type": {"password"},
@@ -180,12 +203,14 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor
// It is used after a resource provider redirects the user back
// to the Redirect URI (the URL obtained from AuthCodeURL).
//
-// The HTTP client to use is derived from the context.
-// If a client is not provided via the context, http.DefaultClient is used.
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
//
// The code will be in the *http.Request.FormValue("code"). Before
// calling Exchange, be sure to validate FormValue("state").
-func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
+//
+// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
+func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
v := url.Values{
"grant_type": {"authorization_code"},
"code": {code},
@@ -193,6 +218,9 @@ func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
if c.RedirectURL != "" {
v.Set("redirect_uri", c.RedirectURL)
}
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
return retrieveToken(ctx, c, v)
}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 34db8cdc8a3..822720341af 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -5,6 +5,7 @@
package oauth2
import (
+ "context"
"fmt"
"net/http"
"net/url"
@@ -12,7 +13,6 @@ import (
"strings"
"time"
- "golang.org/x/net/context"
"golang.org/x/oauth2/internal"
)
@@ -118,13 +118,16 @@ func (t *Token) Extra(key string) interface{} {
return v
}
+// timeNow is time.Now but pulled out as a variable for tests.
+var timeNow = time.Now
+
// expired reports whether the token is expired.
// t must be non-nil.
func (t *Token) expired() bool {
if t.Expiry.IsZero() {
return false
}
- return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
+ return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
}
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
@@ -151,7 +154,7 @@ func tokenFromInternal(t *internal.Token) *Token {
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
// with an error..
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
- tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+ tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle))
if err != nil {
if rErr, ok := err.(*internal.RetrieveError); ok {
return nil, (*RetrieveError)(rErr)
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
index 92ac7e2531f..aa0d34f1e0e 100644
--- a/vendor/golang.org/x/oauth2/transport.go
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -31,9 +31,17 @@ type Transport struct {
}
// RoundTrip authorizes and authenticates the request with an
-// access token. If no token exists or token is expired,
-// tries to refresh/fetch a new token.
+// access token from Transport's Source.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqBodyClosed := false
+ if req.Body != nil {
+ defer func() {
+ if !reqBodyClosed {
+ req.Body.Close()
+ }
+ }()
+ }
+
if t.Source == nil {
return nil, errors.New("oauth2: Transport's Source is nil")
}
@@ -46,6 +54,10 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
token.SetAuthHeader(req2)
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
+
+ // req.Body is assumed to have been closed by the base RoundTripper.
+ reqBodyClosed = true
+
if err != nil {
t.setModReq(req, nil)
return nil, err
From abba8f31fe75f675899dbad49aad0f24fd81f29b Mon Sep 17 00:00:00 2001
From: Matthias Wessendorf
Date: Thu, 25 Apr 2019 18:07:30 +0200
Subject: [PATCH 56/76] k8s forces additionalProperties to true, hence no need
to set it ourselves (#1095)
---
config/300-broker.yaml | 1 -
config/300-channel.yaml | 1 -
2 files changed, 2 deletions(-)
diff --git a/config/300-broker.yaml b/config/300-broker.yaml
index e3cd3738e4e..fb57e91d7bf 100644
--- a/config/300-broker.yaml
+++ b/config/300-broker.yaml
@@ -73,4 +73,3 @@ spec:
minLength: 1
arguments:
type: object
- additionalProperties: true
diff --git a/config/300-channel.yaml b/config/300-channel.yaml
index d5d03bf9654..3244f00fb57 100644
--- a/config/300-channel.yaml
+++ b/config/300-channel.yaml
@@ -66,7 +66,6 @@ spec:
minLength: 1
arguments:
type: object
- additionalProperties: true
subscribable:
type: object
properties:
From 9565689235fa2ac2ec41ad021ab696b37ded8c91 Mon Sep 17 00:00:00 2001
From: Akash Verenkar <45154452+akashrv@users.noreply.github.com>
Date: Thu, 25 Apr 2019 09:52:29 -0700
Subject: [PATCH 57/76] Remove Istio dependency from Eventing (Part - 3) -
Natss (#1074)
* WIP
* WIP - In-memory working with E2E tests
* WIP - remove istio dependency from in-memroy channel
* UTs pass, E2E tests pass with in-memory as well as kafka
* fixed uts that failed due to last K8s service change
* Removed unnecessary space from a line
* dding istio annotation to test POD. This will ve needed when running E2E
tests against channels other than in-memory
* Bug fix to set clusterIp of K8s service only when it is not of type ExternalName
* WIP kafka channel
* WIP kafka - UTs and E2E pass
More UTs needded
* Updated code based on PR comments
* WIP
* Updates based on PR comments
* Updates based on PR comments
* Fixed UTs
* Updated VENDOR_LICENSE
* WIP. Update fanout sidecar
* Merge from upstream master
* UTs pass, ITs passed. COde ready for PR
* Update natss to not use ISTIO. UTs and E2E tests pass.
* Updates based on PR comments
* REmoved permission to istio virtual service from controller
* Changes based on PR comments
* Added back permission that was removed by mistake
* Remove istio references
* WIP
* Removed one more reference of istio
* Revert kafka.yaml local change
* Revert kafka dispatcher change
* Removing Mutex. No need to use Mutex when using atomic value for hostToChannelMap
* Minor updates based on PR comments
---
contrib/natss/config/broker/natss.yaml | 2 -
contrib/natss/config/provisioner.yaml | 10 ---
.../pkg/controller/channel/controller.go | 11 ----
.../natss/pkg/controller/channel/reconcile.go | 11 +---
.../pkg/controller/channel/reconcile_test.go | 63 ++++++++----------
contrib/natss/pkg/controller/main.go | 2 -
.../natss/pkg/dispatcher/channel/reconcile.go | 13 ++++
.../pkg/dispatcher/dispatcher/dispatcher.go | 52 ++++++++++++++-
.../dispatcher/dispatcher/dispatcher_test.go | 64 +++++++++++++++++++
pkg/channelwatcher/channel_watcher.go | 6 +-
10 files changed, 158 insertions(+), 76 deletions(-)
diff --git a/contrib/natss/config/broker/natss.yaml b/contrib/natss/config/broker/natss.yaml
index df040e07b98..2f1bb433fbd 100644
--- a/contrib/natss/config/broker/natss.yaml
+++ b/contrib/natss/config/broker/natss.yaml
@@ -62,8 +62,6 @@ spec:
app: nats-streaming
template:
metadata:
- annotations:
- sidecar.istio.io/inject: "true"
labels: *labels
spec:
containers:
diff --git a/contrib/natss/config/provisioner.yaml b/contrib/natss/config/provisioner.yaml
index 681146deca6..6790bbe7b1d 100644
--- a/contrib/natss/config/provisioner.yaml
+++ b/contrib/natss/config/provisioner.yaml
@@ -55,16 +55,6 @@ rules:
- watch
- create
- update
- - apiGroups:
- - networking.istio.io
- resources:
- - virtualservices
- verbs:
- - get
- - list
- - watch
- - create
- - update
---
diff --git a/contrib/natss/pkg/controller/channel/controller.go b/contrib/natss/pkg/controller/channel/controller.go
index 9968eedf9e2..5427bb180c1 100644
--- a/contrib/natss/pkg/controller/channel/controller.go
+++ b/contrib/natss/pkg/controller/channel/controller.go
@@ -24,7 +24,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
corev1 "k8s.io/api/core/v1"
)
@@ -65,15 +64,5 @@ func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Cont
logger.Error("Unable to watch K8s Services.", zap.Error(err))
return nil, err
}
-
- // Watch the VirtualServices that are owned by Channels.
- err = c.Watch(&source.Kind{
- Type: &istiov1alpha3.VirtualService{},
- }, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true})
- if err != nil {
- logger.Error("Unable to watch VirtualServices.", zap.Error(err))
- return nil, err
- }
-
return c, nil
}
diff --git a/contrib/natss/pkg/controller/channel/reconcile.go b/contrib/natss/pkg/controller/channel/reconcile.go
index 4eb9445bccd..c0f03f563d5 100644
--- a/contrib/natss/pkg/controller/channel/reconcile.go
+++ b/contrib/natss/pkg/controller/channel/reconcile.go
@@ -115,26 +115,19 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
// We are syncing two things:
// 1. The K8s Service to talk to this Channel.
- // 2. The Istio VirtualService to talk to this Channel.
if c.DeletionTimestamp != nil {
- // K8s garbage collection will delete the K8s service and VirtualService for this channel.
+ // K8s garbage collection will delete the K8s service for this channel.
return nil
}
- svc, err := provisioners.CreateK8sService(ctx, r.client, c)
+ svc, err := provisioners.CreateK8sService(ctx, r.client, c, provisioners.ExternalService(c))
if err != nil {
r.logger.Info("Error creating the Channel's K8s Service", zap.Error(err))
return err
}
c.Status.SetAddress(names.ServiceHostName(svc.Name, svc.Namespace))
- _, err = provisioners.CreateVirtualService(ctx, r.client, c, svc)
- if err != nil {
- r.logger.Info("Error creating the Virtual Service for the Channel", zap.Error(err))
- return err
- }
-
c.Status.MarkProvisioned()
return nil
}
diff --git a/contrib/natss/pkg/controller/channel/reconcile_test.go b/contrib/natss/pkg/controller/channel/reconcile_test.go
index 6c4cbaf144e..5ba77cad8aa 100644
--- a/contrib/natss/pkg/controller/channel/reconcile_test.go
+++ b/contrib/natss/pkg/controller/channel/reconcile_test.go
@@ -22,11 +22,11 @@ import (
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/provisioners"
- util "github.com/knative/eventing/pkg/provisioners"
+ "github.com/knative/eventing/pkg/reconciler/names"
controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
"github.com/knative/eventing/pkg/utils"
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ "github.com/knative/pkg/system"
_ "github.com/knative/pkg/system/testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -56,7 +56,6 @@ var (
func init() {
// Add types to scheme
eventingv1alpha1.AddToScheme(scheme.Scheme)
- istiov1alpha3.AddToScheme(scheme.Scheme)
}
var testCases = []controllertesting.TestCase{
@@ -65,12 +64,12 @@ var testCases = []controllertesting.TestCase{
InitialState: []runtime.Object{
makeNewClusterChannelProvisioner(clusterChannelProvisionerName, true),
makeNewChannel(channelName, clusterChannelProvisionerName),
- makeVirtualService(),
},
ReconcileKey: fmt.Sprintf("%s/%s", testNS, channelName),
WantResult: reconcile.Result{},
WantPresent: []runtime.Object{
makeNewChannelProvisionedStatus(channelName, clusterChannelProvisionerName),
+ makeK8sService(channelName, clusterChannelProvisionerName),
},
IgnoreTimes: true,
},
@@ -214,18 +213,29 @@ func makeNewClusterChannelProvisioner(name string, isReady bool) *eventingv1alph
return clusterChannelProvisioner
}
-func makeVirtualService() *istiov1alpha3.VirtualService {
- return &istiov1alpha3.VirtualService{
+func om(namespace, name string) metav1.ObjectMeta {
+ return metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name),
+ UID: testUID,
+ }
+}
+
+func makeK8sService(channelName string, clusterChannelProvisionerName string) *corev1.Service {
+ return &corev1.Service{
TypeMeta: metav1.TypeMeta{
- APIVersion: istiov1alpha3.SchemeGroupVersion.String(),
- Kind: "VirtualService",
+ APIVersion: "v1",
+ Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s-channel", testNS),
- Namespace: testNS,
+ GenerateName: fmt.Sprintf("%s-channel-", channelName),
+ Namespace: testNS,
Labels: map[string]string{
- "channel": channelName,
- "provisioner": clusterChannelProvisionerName,
+ provisioners.EventingChannelLabel: channelName,
+ provisioners.OldEventingChannelLabel: channelName,
+ provisioners.EventingProvisionerLabel: clusterChannelProvisionerName,
+ provisioners.OldEventingProvisionerLabel: clusterChannelProvisionerName,
},
OwnerReferences: []metav1.OwnerReference{
{
@@ -238,32 +248,9 @@ func makeVirtualService() *istiov1alpha3.VirtualService {
},
},
},
- Spec: istiov1alpha3.VirtualServiceSpec{
- Hosts: []string{
- serviceAddress,
- fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()),
- },
- HTTP: []istiov1alpha3.HTTPRoute{{
- Rewrite: &istiov1alpha3.HTTPRewrite{
- Authority: fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()),
- },
- Route: []istiov1alpha3.HTTPRouteDestination{{
- Destination: istiov1alpha3.Destination{
- Host: "kafka-provisioner.knative-eventing.svc." + utils.GetClusterDomainName(),
- Port: istiov1alpha3.PortSelector{
- Number: util.PortNumber,
- },
- }},
- }},
- },
+ Spec: corev1.ServiceSpec{
+ ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", clusterChannelProvisionerName), system.Namespace()),
+ Type: "ExternalName",
},
}
}
-
-func om(namespace, name string) metav1.ObjectMeta {
- return metav1.ObjectMeta{
- Namespace: namespace,
- Name: name,
- SelfLink: fmt.Sprintf("/apis/eventing/v1alpha1/namespaces/%s/object/%s", namespace, name),
- }
-}
diff --git a/contrib/natss/pkg/controller/main.go b/contrib/natss/pkg/controller/main.go
index 531e8901f7c..26cb9f6047a 100644
--- a/contrib/natss/pkg/controller/main.go
+++ b/contrib/natss/pkg/controller/main.go
@@ -24,7 +24,6 @@ import (
"github.com/knative/eventing/contrib/natss/pkg/util"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/provisioners"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"github.com/knative/pkg/signals"
"go.uber.org/zap"
"sigs.k8s.io/controller-runtime/pkg/client/config"
@@ -48,7 +47,6 @@ func main() {
// Add custom types to this array to get them into the manager's scheme.
eventingv1alpha1.AddToScheme(mgr.GetScheme())
- istiov1alpha3.AddToScheme(mgr.GetScheme())
_, err = clusterchannelprovisioner.ProvideController(mgr, util.GetDefaultNatssURL(), util.GetDefaultClusterID(), logger.Desugar())
if err != nil {
diff --git a/contrib/natss/pkg/dispatcher/channel/reconcile.go b/contrib/natss/pkg/dispatcher/channel/reconcile.go
index dda10e27f59..f9af9763321 100644
--- a/contrib/natss/pkg/dispatcher/channel/reconcile.go
+++ b/contrib/natss/pkg/dispatcher/channel/reconcile.go
@@ -29,6 +29,7 @@ import (
ccpcontroller "github.com/knative/eventing/contrib/natss/pkg/controller/clusterchannelprovisioner"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/channelwatcher"
)
type reconciler struct {
@@ -122,5 +123,17 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
r.logger.Error("UpdateSubscriptions() failed: ", zap.Error(err))
return false, err
}
+
+ chanList, err := channelwatcher.ListAllChannels(ctx, r.client, r.shouldReconcile)
+ if err != nil {
+ r.logger.Error("Error getting channel list", zap.Error(err))
+ return false, err
+ }
+
+ if err := r.subscriptionsSupervisor.UpdateHostToChannelMap(ctx, chanList); err != nil {
+ r.logger.Error("Error updating host to channel map", zap.Error(err))
+ return false, err
+ }
+
return false, nil
}
diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
index 28a0b9ad7de..ecbb7d8211f 100644
--- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
+++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
@@ -17,12 +17,15 @@ limitations under the License.
package dispatcher
import (
+ "context"
"encoding/json"
"fmt"
"sync"
+ "sync/atomic"
"time"
"github.com/knative/eventing/contrib/natss/pkg/stanutil"
+ "github.com/knative/eventing/pkg/logging"
"github.com/knative/eventing/pkg/provisioners"
stan "github.com/nats-io/go-nats-streaming"
"go.uber.org/zap"
@@ -59,6 +62,8 @@ type SubscriptionsSupervisor struct {
natssConnMux sync.Mutex
natssConn *stan.Conn
natssConnInProgress bool
+
+ hostToChannelMap atomic.Value
}
// NewDispatcher returns a new SubscriptionsSupervisor.
@@ -71,11 +76,15 @@ func NewDispatcher(natssURL, clusterID string, logger *zap.Logger) (*Subscriptio
clusterID: clusterID,
subscriptions: make(map[provisioners.ChannelReference]map[subscriptionReference]*stan.Subscription),
}
- receiver, err := provisioners.NewMessageReceiver(createReceiverFunction(d, logger.Sugar()), logger.Sugar())
+ receiver, err := provisioners.NewMessageReceiver(
+ createReceiverFunction(d, logger.Sugar()),
+ logger.Sugar(),
+ provisioners.ResolveChannelFromHostHeader(provisioners.ResolveChannelFromHostFunc(d.getChannelReferenceFromHost)))
if err != nil {
return nil, err
}
d.receiver = receiver
+ d.setHostToChannelMap(map[string]provisioners.ChannelReference{})
return d, nil
}
@@ -291,3 +300,44 @@ func (s *SubscriptionsSupervisor) unsubscribe(channel provisioners.ChannelRefere
func getSubject(channel provisioners.ChannelReference) string {
return channel.Name + "." + channel.Namespace
}
+
+func (s *SubscriptionsSupervisor) getHostToChannelMap() map[string]provisioners.ChannelReference {
+ return s.hostToChannelMap.Load().(map[string]provisioners.ChannelReference)
+}
+
+func (s *SubscriptionsSupervisor) setHostToChannelMap(hcMap map[string]provisioners.ChannelReference) {
+ s.hostToChannelMap.Store(hcMap)
+}
+
+// UpdateHostToChannelMap will be called from the controller that watches natss channels.
+// It will update internal hostToChannelMap which is used to resolve the hostHeader of the
+// incoming request to the correct ChannelReference in the receiver function.
+func (s *SubscriptionsSupervisor) UpdateHostToChannelMap(ctx context.Context, chanList []eventingv1alpha1.Channel) error {
+ hostToChanMap := make(map[string]provisioners.ChannelReference, len(chanList))
+ for _, c := range chanList {
+ hostName := c.Status.Address.Hostname
+ if cr, ok := hostToChanMap[hostName]; ok {
+ return fmt.Errorf(
+ "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s",
+ hostName,
+ c.Namespace,
+ c.Name,
+ cr.Namespace,
+ cr.Name)
+ }
+ hostToChanMap[hostName] = provisioners.ChannelReference{Name: c.Name, Namespace: c.Namespace}
+ }
+
+ s.setHostToChannelMap(hostToChanMap)
+ logging.FromContext(ctx).Info("hostToChannelMap updated successfully.")
+ return nil
+}
+
+func (s *SubscriptionsSupervisor) getChannelReferenceFromHost(host string) (provisioners.ChannelReference, error) {
+ chMap := s.getHostToChannelMap()
+ cr, ok := chMap[host]
+ if !ok {
+ return cr, fmt.Errorf("Invalid HostName:%q. HostName not found in any of the watched natss channels", host)
+ }
+ return cr, nil
+}
diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go
index 54dab1b13b1..5df19d94d44 100644
--- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go
+++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go
@@ -17,11 +17,13 @@ limitations under the License.
package dispatcher
import (
+ "context"
"encoding/json"
"os"
"testing"
"time"
+ "github.com/google/go-cmp/cmp"
"github.com/knative/eventing/contrib/natss/pkg/stanutil"
"github.com/knative/eventing/pkg/apis/duck/v1alpha1"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
@@ -199,6 +201,68 @@ func TestUpdateSubscriptions(t *testing.T) {
}
}
+func TestUpdateHostToChannelMap(t *testing.T) {
+ tests := []struct {
+ name string
+ chanList []eventingv1alpha1.Channel
+ expected map[string]provisioners.ChannelReference
+ expectedErrorString string
+ }{
+ {
+ name: "Empty channel list",
+ expected: map[string]provisioners.ChannelReference{},
+ }, {
+ name: "Duplicate host name",
+ chanList: []eventingv1alpha1.Channel{
+ *makechannel("chan1", "ns1", "host1"),
+ *makechannel("chan2", "ns2", "host2"),
+ *makechannel("chan3", "ns3", "host2"),
+ },
+ expected: map[string]provisioners.ChannelReference{},
+ expectedErrorString: "Duplicate hostName found. Each channel must have a unique host header. HostName:host2, channel:ns3.chan3, channel:ns2.chan2",
+ }, {
+ name: "Valid list of channels",
+ chanList: []eventingv1alpha1.Channel{
+ *makechannel("chan1", "ns1", "host1"),
+ *makechannel("chan2", "ns2", "host2"),
+ *makechannel("chan3", "ns3", "host3"),
+ },
+ expected: map[string]provisioners.ChannelReference{
+ "host1": provisioners.ChannelReference{Name: "chan1", Namespace: "ns1"},
+ "host2": provisioners.ChannelReference{Name: "chan2", Namespace: "ns2"},
+ "host3": provisioners.ChannelReference{Name: "chan3", Namespace: "ns3"},
+ },
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ s.setHostToChannelMap(map[string]provisioners.ChannelReference{})
+ err := s.UpdateHostToChannelMap(context.TODO(), test.chanList)
+
+ if err != nil {
+ if diff := cmp.Diff(test.expectedErrorString, err.Error()); diff != "" {
+ t.Fatalf("Unexpected difference (-want +got): %v", diff)
+ }
+ }
+
+ if diff := cmp.Diff(test.expected, s.getHostToChannelMap()); diff != "" {
+ t.Fatalf("Unexpected difference (-want +got): %v", diff)
+ }
+ })
+ }
+}
+
+func makechannel(name string, namespace string, hostname string) *eventingv1alpha1.Channel {
+ c := eventingv1alpha1.Channel{}
+ c.Name = name
+ c.Namespace = namespace
+ c.Status.InitializeConditions()
+ c.Status.MarkProvisioned()
+ c.Status.MarkProvisionerInstalled()
+ c.Status.SetAddress(hostname)
+ return &c
+}
+
func startNatss() (*server.StanServer, error) {
logger.Infof("Start NATSS")
var (
diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go
index 26b77b362b1..249fe2ef439 100644
--- a/pkg/channelwatcher/channel_watcher.go
+++ b/pkg/channelwatcher/channel_watcher.go
@@ -87,7 +87,7 @@ type ShouldWatchFunc func(ch *v1alpha1.Channel) bool
// This is used by dispatchers or receivers to update their configs by watching channels.
func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch ShouldWatchFunc) WatchHandlerFunc {
return func(ctx context.Context, c client.Client, _ types.NamespacedName) error {
- channels, err := listAllChannels(ctx, c, shouldWatch)
+ channels, err := ListAllChannels(ctx, c, shouldWatch)
if err != nil {
logging.FromContext(ctx).Info("Unable to list channels", zap.Error(err))
return err
@@ -97,8 +97,8 @@ func UpdateConfigWatchHandler(updateConfig swappable.UpdateConfig, shouldWatch S
}
}
-// listAllChannels queries client and gets list of all channels for which shouldWatch returns true.
-func listAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) {
+// ListAllChannels queries client and gets list of all channels for which shouldWatch returns true.
+func ListAllChannels(ctx context.Context, c client.Client, shouldWatch ShouldWatchFunc) ([]v1alpha1.Channel, error) {
channels := make([]v1alpha1.Channel, 0)
cl := &v1alpha1.ChannelList{}
if err := c.List(ctx, &client.ListOptions{}, cl); err != nil {
From 2a3b8a2dce3447380b48277a176aab1f777b82e5 Mon Sep 17 00:00:00 2001
From: Ignacio Cano
Date: Thu, 25 Apr 2019 10:23:29 -0700
Subject: [PATCH 58/76] Broker controller new infrastructure (#1073)
* first pass to update broker controller
* starting informers properly, listening to own resources
* fix and commenting out UTs
* space
* adding listers
* updating tests
* more tests
* adding deployment functional
* compiling
* more UTs
* more UTs
* more UTs
* more UTs
* more UTs
* updating deps
* updating UTs
* updating UTs
* Updating code gen
* inlining method
* update after comment
* review comments
* serviceInformer and namespaceInformer
* attempt to fix
* solved
* Updates after code review
* Updating gopkg.lock
---
Gopkg.lock | 1 -
cmd/controller/main.go | 122 +-
.../subscription/subscription_test.go | 27 +
pkg/reconciler/testing/broker.go | 134 ++
pkg/reconciler/testing/channel.go | 24 +-
pkg/reconciler/testing/deployment.go | 84 +
pkg/reconciler/testing/service.go | 20 +
pkg/reconciler/testing/subscription.go | 32 +-
pkg/reconciler/trigger/trigger.go | 6 +-
pkg/reconciler/trigger/trigger_test.go | 32 -
pkg/reconciler/v1alpha1/broker/broker.go | 371 ++--
pkg/reconciler/v1alpha1/broker/broker_test.go | 1813 ++++++++---------
.../v1alpha1/broker/resources/filter.go | 15 +-
.../v1alpha1/broker/resources/ingress.go | 19 +-
14 files changed, 1415 insertions(+), 1285 deletions(-)
create mode 100644 pkg/reconciler/testing/broker.go
create mode 100644 pkg/reconciler/testing/deployment.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 099d11bd55e..0fd66fadcbc 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -1400,7 +1400,6 @@
"github.com/knative/test-infra/tools/dep-collector",
"github.com/nats-io/go-nats-streaming",
"github.com/nats-io/nats-streaming-server/server",
- "github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/robfig/cron",
"go.opencensus.io/exporter/prometheus",
"go.opencensus.io/stats",
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 34aee80848a..8bbdc8d81c3 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -17,22 +17,16 @@ limitations under the License.
package main
import (
- "context"
"flag"
"log"
- "net/http"
"os"
- "time"
- "k8s.io/apimachinery/pkg/runtime"
kubeinformers "k8s.io/client-go/informers"
- "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
- eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
informers "github.com/knative/eventing/pkg/client/informers/externalversions"
"github.com/knative/eventing/pkg/logconfig"
"github.com/knative/eventing/pkg/logging"
@@ -42,35 +36,19 @@ import (
"github.com/knative/eventing/pkg/reconciler/subscription"
"github.com/knative/eventing/pkg/reconciler/trigger"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"github.com/knative/pkg/configmap"
kncontroller "github.com/knative/pkg/controller"
"github.com/knative/pkg/logging/logkey"
"github.com/knative/pkg/signals"
- "github.com/knative/pkg/system"
- "github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
controllerruntime "sigs.k8s.io/controller-runtime/pkg/client/config"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
)
-const (
- metricsScrapeAddr = ":9090"
- metricsScrapePath = "/metrics"
-)
-
var (
hardcodedLoggingConfig bool
)
-// SchemeFunc adds types to a Scheme.
-type SchemeFunc func(*runtime.Scheme) error
-
-// ProvideFunc adds a controller to a Manager.
-type ProvideFunc func(manager.Manager, *zap.Logger) (controller.Controller, error)
-
func main() {
flag.Parse()
logf.SetLogger(logf.ZapLogger(false))
@@ -87,16 +65,9 @@ func main() {
logger.Fatalf("Error building kubeconfig: %v", err)
}
- go startPkgController(stopCh, cfg, logger, atomicLevel)
- go startControllerRuntime(stopCh, cfg, logger, atomicLevel)
- <-stopCh
-}
-
-func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.SugaredLogger, atomicLevel zap.AtomicLevel) {
- logger = logger.With(zap.String("controller/impl", "pkg"))
logger.Info("Starting the controller")
- const numControllers = 4
+ const numControllers = 5
cfg.QPS = numControllers * rest.DefaultQPS
cfg.Burst = numControllers * rest.DefaultBurst
opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
@@ -114,6 +85,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
serviceInformer := kubeInformerFactory.Core().V1().Services()
namespaceInformer := kubeInformerFactory.Core().V1().Namespaces()
configMapInformer := kubeInformerFactory.Core().V1().ConfigMaps()
+ deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
// Build all of our controllers, with the clients constructed above.
// Add new controllers to this array.
@@ -139,6 +111,20 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
brokerInformer,
serviceInformer,
),
+ broker.NewController(
+ opt,
+ brokerInformer,
+ subscriptionInformer,
+ channelInformer,
+ serviceInformer,
+ deploymentInformer,
+ broker.ReconcilerArgs{
+ IngressImage: getRequiredEnv("BROKER_INGRESS_IMAGE"),
+ IngressServiceAccountName: getRequiredEnv("BROKER_INGRESS_SERVICE_ACCOUNT"),
+ FilterImage: getRequiredEnv("BROKER_FILTER_IMAGE"),
+ FilterServiceAccountName: getRequiredEnv("BROKER_FILTER_SERVICE_ACCOUNT"),
+ },
+ ),
}
if len(controllers) != numControllers {
logger.Fatalf("Number of controllers and QPS settings mismatch: %d != %d", len(controllers), numControllers)
@@ -165,6 +151,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
configMapInformer.Informer(),
serviceInformer.Informer(),
namespaceInformer.Informer(),
+ deploymentInformer.Informer(),
); err != nil {
logger.Fatalf("Failed to start informers: %v", err)
}
@@ -172,82 +159,7 @@ func startPkgController(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.Su
// Start all of the controllers.
logger.Info("Starting controllers.")
go kncontroller.StartAll(stopCh, controllers...)
-}
-
-// TODO: remove after done integrating all controllers.
-func startControllerRuntime(stopCh <-chan struct{}, cfg *rest.Config, logger *zap.SugaredLogger, atomicLevel zap.AtomicLevel) {
- logger = logger.With(zap.String("controller/impl", "cr"))
- logger.Info("Starting the controller")
-
- kubeClient, err := kubernetes.NewForConfig(cfg)
- if err != nil {
- logger.Fatalf("Error building kubernetes clientset: %v", err)
- }
-
- // Watch the logging config map and dynamically update logging levels.
- configMapWatcher := configmap.NewInformedWatcher(kubeClient, system.Namespace())
- configMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller))
- if err = configMapWatcher.Start(stopCh); err != nil {
- logger.Fatalf("Failed to start controller config map watcher: %v", err)
- }
-
- // Setup a Manager
- mgr, err := manager.New(cfg, manager.Options{})
- if err != nil {
- logger.Fatalf("Failed to create manager: %v", err)
- }
-
- // Add custom types to this array to get them into the manager's scheme.
- schemeFuncs := []SchemeFunc{
- istiov1alpha3.AddToScheme,
- eventingv1alpha1.AddToScheme,
- }
- for _, schemeFunc := range schemeFuncs {
- if err = schemeFunc(mgr.GetScheme()); err != nil {
- logger.Fatalf("Error adding type to manager's scheme: %v", err)
- }
- }
-
- // Add each controller's ProvideController func to this list to have the
- // manager run it.
- providers := []ProvideFunc{
- broker.ProvideController(
- broker.ReconcilerArgs{
- IngressImage: getRequiredEnv("BROKER_INGRESS_IMAGE"),
- IngressServiceAccountName: getRequiredEnv("BROKER_INGRESS_SERVICE_ACCOUNT"),
- FilterImage: getRequiredEnv("BROKER_FILTER_IMAGE"),
- FilterServiceAccountName: getRequiredEnv("BROKER_FILTER_SERVICE_ACCOUNT"),
- }),
- }
- for _, provider := range providers {
- if _, err = provider(mgr, logger.Desugar()); err != nil {
- logger.Fatalf("Error adding controller to manager: %v", err)
- }
- }
-
- // Start the Manager
- go func() {
- if localErr := mgr.Start(stopCh); localErr != nil {
- logger.Fatalf("Error starting manager: %v", localErr)
- }
- }()
-
- // Start the endpoint that Prometheus scraper talks to
- srv := &http.Server{Addr: metricsScrapeAddr}
- http.Handle(metricsScrapePath, promhttp.Handler())
- go func() {
- logger.Infof("Starting metrics listener at %s", metricsScrapeAddr)
- if localErr := srv.ListenAndServe(); localErr != nil {
- logger.Infof("HTTPserver: ListenAndServe() finished with error: %s", localErr)
- }
- }()
-
<-stopCh
-
- // Close the http server gracefully
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- srv.Shutdown(ctx)
}
func init() {
diff --git a/pkg/reconciler/subscription/subscription_test.go b/pkg/reconciler/subscription/subscription_test.go
index 84bf5e5255d..1db1a001d8e 100644
--- a/pkg/reconciler/subscription/subscription_test.go
+++ b/pkg/reconciler/subscription/subscription_test.go
@@ -117,6 +117,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, but subscriber is not addressable",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
),
@@ -133,6 +134,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
// The first reconciliation will initialize the status conditions.
@@ -143,6 +145,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, but subscriber does not exist",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
),
@@ -158,6 +161,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
// The first reconciliation will initialize the status conditions.
@@ -168,6 +172,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, reply does not exist",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithSubscriptionReply(channelGVK, replyName),
@@ -186,6 +191,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithSubscriptionReply(channelGVK, replyName),
@@ -198,6 +204,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, reply is not addressable",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithSubscriptionReply(subscriberGVK, replyName), // reply will be a subscriberGVK for this test
@@ -219,6 +226,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithSubscriptionReply(subscriberGVK, replyName),
@@ -231,6 +239,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, valid channel+subscriber",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
),
@@ -249,6 +258,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
// The first reconciliation will initialize the status conditions.
@@ -267,6 +277,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, valid channel+reply",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionReply(channelGVK, replyName),
),
@@ -286,6 +297,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionReply(channelGVK, replyName),
// The first reconciliation will initialize the status conditions.
@@ -304,6 +316,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, valid channel+subscriber+reply",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithSubscriptionReply(channelGVK, replyName),
@@ -327,6 +340,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithSubscriptionReply(channelGVK, replyName),
@@ -347,6 +361,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, valid remove reply",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithInitSubscriptionConditions,
@@ -372,6 +387,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithInitSubscriptionConditions,
@@ -389,6 +405,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, valid remove subscriber",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithInitSubscriptionConditions,
WithSubscriptionReply(channelGVK, replyName),
@@ -415,6 +432,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionReply(channelGVK, replyName),
WithInitSubscriptionConditions,
@@ -432,6 +450,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, channel+subscriber as service, does not exist",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(serviceGVK, serviceName),
),
@@ -447,6 +466,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(serviceGVK, serviceName),
// The first reconciliation will initialize the status conditions.
@@ -457,6 +477,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription, valid channel+subscriber as service",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(serviceGVK, serviceName),
),
@@ -473,6 +494,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(serviceGVK, serviceName),
// The first reconciliation will initialize the status conditions.
@@ -491,11 +513,13 @@ func TestAllCases(t *testing.T) {
Name: "subscription, two subscribers for a channel",
Objects: []runtime.Object{
NewSubscription("a_"+subscriptionName, testNS,
+ WithSubscriptionUID("a_"+subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(serviceGVK, serviceName),
),
// an already rec'ed subscription
NewSubscription("b_"+subscriptionName, testNS,
+ WithSubscriptionUID("b_"+subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(serviceGVK, serviceName),
WithInitSubscriptionConditions,
@@ -515,6 +539,7 @@ func TestAllCases(t *testing.T) {
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription("a_"+subscriptionName, testNS,
+ WithSubscriptionUID("a_"+subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(serviceGVK, serviceName),
// The first reconciliation will initialize the status conditions.
@@ -534,6 +559,7 @@ func TestAllCases(t *testing.T) {
Name: "subscription deleted",
Objects: []runtime.Object{
NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithSubscriptionReply(channelGVK, replyName),
@@ -562,6 +588,7 @@ func TestAllCases(t *testing.T) {
},
WantUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
+ WithSubscriptionUID(subscriptionUID),
WithSubscriptionChannel(channelGVK, channelName),
WithSubscriptionSubscriberRef(subscriberGVK, subscriberName),
WithSubscriptionReply(channelGVK, replyName),
diff --git a/pkg/reconciler/testing/broker.go b/pkg/reconciler/testing/broker.go
new file mode 100644
index 00000000000..6ff54890053
--- /dev/null
+++ b/pkg/reconciler/testing/broker.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+ "time"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// BrokerOption enables further configuration of a Broker.
+type BrokerOption func(*v1alpha1.Broker)
+
+// NewBroker creates a Broker with BrokerOptions.
+func NewBroker(name, namespace string, o ...BrokerOption) *v1alpha1.Broker {
+ b := &v1alpha1.Broker{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ for _, opt := range o {
+ opt(b)
+ }
+ b.SetDefaults(context.Background())
+ return b
+}
+
+// WithInitBrokerConditions initializes the Broker's conditions.
+func WithInitBrokerConditions(b *v1alpha1.Broker) {
+ b.Status.InitializeConditions()
+}
+
+func WithBrokerDeletionTimestamp(b *v1alpha1.Broker) {
+ t := metav1.NewTime(time.Unix(1e9, 0))
+ b.ObjectMeta.SetDeletionTimestamp(&t)
+}
+
+// WithBrokerChannelProvisioner sets the Broker's ChannelTemplate provisioner.
+func WithBrokerChannelProvisioner(provisioner *corev1.ObjectReference) BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Spec.ChannelTemplate = &v1alpha1.ChannelSpec{
+ Provisioner: provisioner,
+ }
+ }
+}
+
+// WithBrokerAddress sets the Broker's address.
+func WithBrokerAddress(address string) BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.SetAddress(address)
+ }
+}
+
+// WithBrokerReady sets .Status to ready.
+func WithBrokerReady(b *v1alpha1.Broker) {
+ b.Status = *v1alpha1.TestHelper.ReadyBrokerStatus()
+}
+
+// WithTriggerChannelFailed calls .Status.MarkTriggerChannelFailed on the Broker.
+func WithTriggerChannelFailed(reason, msg string) BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.MarkTriggerChannelFailed(reason, msg)
+ }
+}
+
+// WithFilterFailed calls .Status.MarkFilterFailed on the Broker.
+func WithFilterFailed(reason, msg string) BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.MarkFilterFailed(reason, msg)
+ }
+}
+
+// WithIngressFailed calls .Status.MarkIngressFailed on the Broker.
+func WithIngressFailed(reason, msg string) BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.MarkIngressFailed(reason, msg)
+ }
+}
+
+// WithIngressChannelFailed calls .Status.MarkIngressChannelFailed on the Broker.
+func WithIngressChannelFailed(reason, msg string) BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.MarkIngressChannelFailed(reason, msg)
+ }
+}
+
+// WithTriggerChannelReady calls .Status.PropagateTriggerChannelReadiness on the Broker.
+func WithTriggerChannelReady() BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.PropagateTriggerChannelReadiness(v1alpha1.TestHelper.ReadyChannelStatus())
+ }
+}
+
+func WithFilterDeploymentAvailable() BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.PropagateFilterDeploymentAvailability(v1alpha1.TestHelper.AvailableDeployment())
+ }
+}
+
+func WithIngressDeploymentAvailable() BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.PropagateIngressDeploymentAvailability(v1alpha1.TestHelper.AvailableDeployment())
+ }
+}
+
+func WithBrokerIngressChannelReady() BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.PropagateIngressChannelReadiness(v1alpha1.TestHelper.ReadyChannelStatus())
+ }
+}
+
+func WithBrokerIngressSubscriptionFailed(reason, msg string) BrokerOption {
+ return func(b *v1alpha1.Broker) {
+ b.Status.MarkIngressSubscriptionFailed(reason, msg)
+ }
+}
diff --git a/pkg/reconciler/testing/channel.go b/pkg/reconciler/testing/channel.go
index bef61f609c7..cdad87b10fe 100644
--- a/pkg/reconciler/testing/channel.go
+++ b/pkg/reconciler/testing/channel.go
@@ -92,10 +92,14 @@ func WithChannelProvisioner(gvk metav1.GroupVersionKind, name string) ChannelOpt
func WithChannelAddress(hostname string) ChannelOption {
return func(c *v1alpha1.Channel) {
- c.Status.Address.Hostname = hostname
+ c.Status.SetAddress(hostname)
}
}
+func WithChannelReady(c *v1alpha1.Channel) {
+ c.Status = *v1alpha1.TestHelper.ReadyChannelStatus()
+}
+
func WithChannelSubscribers(subscribers []duckv1alpha1.ChannelSubscriberSpec) ChannelOption {
return func(c *v1alpha1.Channel) {
c.Spec.Subscribable = &duckv1alpha1.Subscribable{
@@ -103,3 +107,21 @@ func WithChannelSubscribers(subscribers []duckv1alpha1.ChannelSubscriberSpec) Ch
}
}
}
+
+func WithChannelGenerateName(generateName string) ChannelOption {
+ return func(c *v1alpha1.Channel) {
+ c.ObjectMeta.GenerateName = generateName
+ }
+}
+
+func WithChannelLabels(labels map[string]string) ChannelOption {
+ return func(c *v1alpha1.Channel) {
+ c.ObjectMeta.Labels = labels
+ }
+}
+
+func WithChannelOwnerReferences(ownerReferences []metav1.OwnerReference) ChannelOption {
+ return func(c *v1alpha1.Channel) {
+ c.ObjectMeta.OwnerReferences = ownerReferences
+ }
+}
diff --git a/pkg/reconciler/testing/deployment.go b/pkg/reconciler/testing/deployment.go
new file mode 100644
index 00000000000..dec4cd1cd58
--- /dev/null
+++ b/pkg/reconciler/testing/deployment.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// DeploymentOption enables further configuration of a Deployment.
+type DeploymentOption func(*appsv1.Deployment)
+
+// NewDeployment creates a Deployment with DeploymentOptions.
+func NewDeployment(name, namespace string, do ...DeploymentOption) *appsv1.Deployment {
+ d := &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{{}},
+ },
+ },
+ },
+ }
+ for _, opt := range do {
+ opt(d)
+ }
+ return d
+}
+
+func WithDeploymentLabels(labels map[string]string) DeploymentOption {
+ return func(d *appsv1.Deployment) {
+ d.ObjectMeta.Labels = labels
+ d.Spec.Selector = &metav1.LabelSelector{
+ MatchLabels: labels,
+ }
+ d.Spec.Template.Labels = labels
+ }
+}
+
+func WithDeploymentOwnerReferences(ownerReferences []metav1.OwnerReference) DeploymentOption {
+ return func(d *appsv1.Deployment) {
+ d.OwnerReferences = ownerReferences
+ }
+}
+
+func WithDeploymentAnnotations(annotations map[string]string) DeploymentOption {
+ return func(d *appsv1.Deployment) {
+ d.Spec.Template.Annotations = annotations
+ }
+}
+
+func WithDeploymentServiceAccount(serviceAccountName string) DeploymentOption {
+ return func(d *appsv1.Deployment) {
+ d.Spec.Template.Spec.ServiceAccountName = serviceAccountName
+ }
+}
+
+func WithDeploymentContainer(name, image string, envVars []corev1.EnvVar, containerPorts []corev1.ContainerPort) DeploymentOption {
+ return func(d *appsv1.Deployment) {
+ d.Spec.Template.Spec.Containers[0].Name = name
+ d.Spec.Template.Spec.Containers[0].Image = image
+ d.Spec.Template.Spec.Containers[0].Env = envVars
+ d.Spec.Template.Spec.Containers[0].Ports = containerPorts
+ }
+}
diff --git a/pkg/reconciler/testing/service.go b/pkg/reconciler/testing/service.go
index b074b93a722..d38ab61d499 100644
--- a/pkg/reconciler/testing/service.go
+++ b/pkg/reconciler/testing/service.go
@@ -31,9 +31,29 @@ func NewService(name, namespace string, so ...ServiceOption) *corev1.Service {
Name: name,
Namespace: namespace,
},
+ Spec: corev1.ServiceSpec{},
}
for _, opt := range so {
opt(s)
}
return s
}
+
+func WithServiceOwnerReferences(ownerReferences []metav1.OwnerReference) ServiceOption {
+ return func(s *corev1.Service) {
+ s.OwnerReferences = ownerReferences
+ }
+}
+
+func WithServiceLabels(labels map[string]string) ServiceOption {
+ return func(s *corev1.Service) {
+ s.ObjectMeta.Labels = labels
+ s.Spec.Selector = labels
+ }
+}
+
+func WithServicePorts(ports []corev1.ServicePort) ServiceOption {
+ return func(s *corev1.Service) {
+ s.Spec.Ports = ports
+ }
+}
diff --git a/pkg/reconciler/testing/subscription.go b/pkg/reconciler/testing/subscription.go
index 60ee2a14844..91d3eb9d687 100644
--- a/pkg/reconciler/testing/subscription.go
+++ b/pkg/reconciler/testing/subscription.go
@@ -18,9 +18,10 @@ package testing
import (
"context"
- "k8s.io/apimachinery/pkg/types"
"time"
+ "k8s.io/apimachinery/pkg/types"
+
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -35,7 +36,6 @@ func NewSubscription(name, namespace string, so ...SubscriptionOption) *v1alpha1
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
- UID: types.UID(name + "-abc-123"),
},
}
for _, opt := range so {
@@ -59,17 +59,45 @@ func NewSubscriptionWithoutNamespace(name string, so ...SubscriptionOption) *v1a
return s
}
+func WithSubscriptionUID(uid types.UID) SubscriptionOption {
+ return func(s *v1alpha1.Subscription) {
+ s.UID = uid
+ }
+}
+
+func WithSubscriptionGenerateName(generateName string) SubscriptionOption {
+ return func(c *v1alpha1.Subscription) {
+ c.ObjectMeta.GenerateName = generateName
+ }
+}
+
// WithInitSubscriptionConditions initializes the Subscriptions's conditions.
func WithInitSubscriptionConditions(s *v1alpha1.Subscription) {
s.Status.InitializeConditions()
}
+func WithSubscriptionReady(s *v1alpha1.Subscription) {
+ s.Status = *v1alpha1.TestHelper.ReadySubscriptionStatus()
+}
+
// TODO: this can be a runtime object
func WithSubscriptionDeleted(s *v1alpha1.Subscription) {
t := metav1.NewTime(time.Unix(1e9, 0))
s.ObjectMeta.SetDeletionTimestamp(&t)
}
+func WithSubscriptionOwnerReferences(ownerReferences []metav1.OwnerReference) SubscriptionOption {
+ return func(c *v1alpha1.Subscription) {
+ c.ObjectMeta.OwnerReferences = ownerReferences
+ }
+}
+
+func WithSubscriptionLabels(labels map[string]string) SubscriptionOption {
+ return func(c *v1alpha1.Subscription) {
+ c.ObjectMeta.Labels = labels
+ }
+}
+
func WithSubscriptionChannel(gvk metav1.GroupVersionKind, name string) SubscriptionOption {
return func(s *v1alpha1.Subscription) {
s.Spec.Channel = corev1.ObjectReference{
diff --git a/pkg/reconciler/trigger/trigger.go b/pkg/reconciler/trigger/trigger.go
index c1524b20f98..473ce7c8e4f 100644
--- a/pkg/reconciler/trigger/trigger.go
+++ b/pkg/reconciler/trigger/trigger.go
@@ -297,13 +297,13 @@ func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.Trigger
// getBrokerTriggerChannel return the Broker's Trigger Channel if it exists, otherwise it returns an
// error.
func (r *Reconciler) getBrokerTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
- return r.getChannel(ctx, b, labels.SelectorFromSet(broker.TriggerChannelLabels(b)))
+ return r.getChannel(ctx, b, labels.SelectorFromSet(broker.TriggerChannelLabels(b.Name)))
}
// getBrokerIngressChannel return the Broker's Ingress Channel if it exists, otherwise it returns an
// error.
func (r *Reconciler) getBrokerIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
- return r.getChannel(ctx, b, labels.SelectorFromSet(broker.IngressChannelLabels(b)))
+ return r.getChannel(ctx, b, labels.SelectorFromSet(broker.IngressChannelLabels(b.Name)))
}
// getChannel returns the Broker's channel based on the provided label selector if it exists, otherwise it returns an error.
@@ -326,7 +326,7 @@ func (r *Reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labe
// getBrokerFilterService returns the K8s service for trigger 't' if exists,
// otherwise it returns an error.
func (r *Reconciler) getBrokerFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) {
- services, err := r.serviceLister.Services(b.Namespace).List(labels.SelectorFromSet(brokerresources.FilterLabels(b)))
+ services, err := r.serviceLister.Services(b.Namespace).List(labels.SelectorFromSet(brokerresources.FilterLabels(b.Name)))
if err != nil {
return nil, err
}
diff --git a/pkg/reconciler/trigger/trigger_test.go b/pkg/reconciler/trigger/trigger_test.go
index 7fa47f42b08..b88a3541df6 100644
--- a/pkg/reconciler/trigger/trigger_test.go
+++ b/pkg/reconciler/trigger/trigger_test.go
@@ -53,18 +53,6 @@ const (
var (
trueVal = true
- // deletionTime is used when objects are marked as deleted. Rfc3339Copy()
- // truncates to seconds to match the loss of precision during serialization.
- deletionTime = metav1.Now().Rfc3339Copy()
-
- // Map of events to set test cases' expectations easier.
- events = map[string]corev1.Event{
- triggerReconciled: {Reason: triggerReconciled, Type: corev1.EventTypeNormal},
- triggerUpdateStatusFailed: {Reason: triggerUpdateStatusFailed, Type: corev1.EventTypeWarning},
- triggerReconcileFailed: {Reason: triggerReconcileFailed, Type: corev1.EventTypeWarning},
- subscriptionDeleteFailed: {Reason: subscriptionDeleteFailed, Type: corev1.EventTypeWarning},
- subscriptionCreateFailed: {Reason: subscriptionCreateFailed, Type: corev1.EventTypeWarning},
- }
)
func init() {
@@ -498,26 +486,6 @@ func makeTrigger() *v1alpha1.Trigger {
}
}
-func makeReadyTrigger() *v1alpha1.Trigger {
- t := makeTrigger()
- t.Status = *v1alpha1.TestHelper.ReadyTriggerStatus()
- t.Status.SubscriberURI = fmt.Sprintf("http://%s.%s.svc.%s/", subscriberName, testNS, utils.GetClusterDomainName())
- return t
-}
-
-func makeDeletingTrigger() *v1alpha1.Trigger {
- b := makeReadyTrigger()
- b.DeletionTimestamp = &deletionTime
- return b
-}
-
-func makeTriggerWithNamespaceAndName(namespace, name string) *v1alpha1.Trigger {
- t := makeTrigger()
- t.Namespace = namespace
- t.Name = name
- return t
-}
-
func makeBroker() *v1alpha1.Broker {
return &v1alpha1.Broker{
TypeMeta: metav1.TypeMeta{
diff --git a/pkg/reconciler/v1alpha1/broker/broker.go b/pkg/reconciler/v1alpha1/broker/broker.go
index 6a1032b5189..d0751b786e1 100644
--- a/pkg/reconciler/v1alpha1/broker/broker.go
+++ b/pkg/reconciler/v1alpha1/broker/broker.go
@@ -19,32 +19,36 @@ package broker
import (
"context"
"fmt"
+ "reflect"
+ "time"
+
+ "k8s.io/client-go/tools/cache"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ eventinglisters "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
"github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/reconciler"
"github.com/knative/eventing/pkg/reconciler/names"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources"
+ "github.com/knative/pkg/controller"
"go.uber.org/zap"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
- k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/client-go/tools/record"
- "sigs.k8s.io/controller-runtime/pkg/client"
- runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/handler"
- "sigs.k8s.io/controller-runtime/pkg/manager"
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
- "sigs.k8s.io/controller-runtime/pkg/source"
+ appsv1informers "k8s.io/client-go/informers/apps/v1"
+ corev1informers "k8s.io/client-go/informers/core/v1"
+ appsv1listers "k8s.io/client-go/listers/apps/v1"
+ corev1listers "k8s.io/client-go/listers/core/v1"
)
const (
+ // ReconcilerName is the name of the reconciler
+ ReconcilerName = "Brokers"
// controllerAgentName is the string used by this controller to identify
// itself when creating events.
controllerAgentName = "broker-controller"
@@ -57,11 +61,15 @@ const (
ingressSubscriptionCreateFailed = "IngressSubscriptionCreateFailed"
)
-type reconciler struct {
- client client.Client
- recorder record.EventRecorder
+type Reconciler struct {
+ *reconciler.Base
- logger *zap.Logger
+ // listers index properties about resources
+ brokerLister eventinglisters.BrokerLister
+ channelLister eventinglisters.ChannelLister
+ serviceLister corev1listers.ServiceLister
+ deploymentLister appsv1listers.DeploymentLister
+ subscriptionLister eventinglisters.SubscriptionLister
ingressImage string
ingressServiceAccountName string
@@ -69,8 +77,8 @@ type reconciler struct {
filterServiceAccountName string
}
-// Verify the struct implements reconcile.Reconciler.
-var _ reconcile.Reconciler = &reconciler{}
+// Check that our Reconciler implements controller.Reconciler
+var _ controller.Reconciler = (*Reconciler)(nil)
// ReconcilerArgs are the arguments needed to create a broker.Reconciler.
type ReconcilerArgs struct {
@@ -80,94 +88,100 @@ type ReconcilerArgs struct {
FilterServiceAccountName string
}
-// ProvideController returns a function that returns a Broker controller.
-func ProvideController(args ReconcilerArgs) func(manager.Manager, *zap.Logger) (controller.Controller, error) {
- return func(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) {
- // Setup a new controller to Reconcile Brokers.
- c, err := controller.New(controllerAgentName, mgr, controller.Options{
- Reconciler: &reconciler{
- recorder: mgr.GetRecorder(controllerAgentName),
- logger: logger,
-
- ingressImage: args.IngressImage,
- ingressServiceAccountName: args.IngressServiceAccountName,
- filterImage: args.FilterImage,
- filterServiceAccountName: args.FilterServiceAccountName,
- },
- })
- if err != nil {
- return nil, err
- }
-
- // Watch Brokers.
- if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestForObject{}); err != nil {
- return nil, err
- }
+// NewController initializes the controller and is called by the generated code
+// Registers event handlers to enqueue events
+func NewController(
+ opt reconciler.Options,
+ brokerInformer eventinginformers.BrokerInformer,
+ subscriptionInformer eventinginformers.SubscriptionInformer,
+ channelInformer eventinginformers.ChannelInformer,
+ serviceInformer corev1informers.ServiceInformer,
+ deploymentInformer appsv1informers.DeploymentInformer,
+ args ReconcilerArgs,
+) *controller.Impl {
+
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ brokerLister: brokerInformer.Lister(),
+ channelLister: channelInformer.Lister(),
+ serviceLister: serviceInformer.Lister(),
+ deploymentLister: deploymentInformer.Lister(),
+ subscriptionLister: subscriptionInformer.Lister(),
+ ingressImage: args.IngressImage,
+ ingressServiceAccountName: args.IngressServiceAccountName,
+ filterImage: args.FilterImage,
+ filterServiceAccountName: args.FilterServiceAccountName,
+ }
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ r.Logger.Info("Setting up event handlers")
+
+ brokerInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
+
+ channelInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
+ FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Broker")),
+ Handler: reconciler.Handler(impl.EnqueueControllerOf),
+ })
- // Watch all the resources that the Broker reconciles.
- for _, t := range []runtime.Object{&v1alpha1.Channel{}, &corev1.Service{}, &v1.Deployment{}} {
- err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Broker{}, IsController: true})
- if err != nil {
- return nil, err
- }
- }
+ serviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
+ FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Broker")),
+ Handler: reconciler.Handler(impl.EnqueueControllerOf),
+ })
- return c, nil
- }
-}
+ deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
+ FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Broker")),
+ Handler: reconciler.Handler(impl.EnqueueControllerOf),
+ })
-// InjectClient implements controller runtime's inject.Client.
-func (r *reconciler) InjectClient(c client.Client) error {
- r.client = c
- return nil
+ return impl
}
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Broker resource
// with the current status of the resource.
-func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) {
- ctx := context.TODO()
- ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request)))
-
- broker := &v1alpha1.Broker{}
- err := r.client.Get(ctx, request.NamespacedName, broker)
-
- if k8serrors.IsNotFound(err) {
- logging.FromContext(ctx).Info("Could not find Broker")
- return reconcile.Result{}, nil
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ // Convert the namespace/name string into a distinct namespace and name
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
}
+ ctx = logging.WithLogger(ctx, r.Logger.Desugar().With(zap.String("key", key)))
- if err != nil {
- logging.FromContext(ctx).Error("Could not Get Broker", zap.Error(err))
- return reconcile.Result{}, err
+ // Get the Broker resource with this namespace/name
+ original, err := r.brokerLister.Brokers(namespace).Get(name)
+ if apierrs.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ logging.FromContext(ctx).Info("broker key in work queue no longer exists")
+ return nil
+ } else if err != nil {
+ return err
}
- originalReadiness := broker.Status.IsReady()
+ // Don't modify the informers copy
+ broker := original.DeepCopy()
- // Reconcile this copy of the Broker and then write back any status updates regardless of
- // whether the reconcile error out.
- reconcileErr := r.reconcile(ctx, broker)
- if reconcileErr != nil {
- logging.FromContext(ctx).Error("Error reconciling Broker", zap.Error(reconcileErr))
- r.recorder.Event(broker, corev1.EventTypeWarning, brokerReconcileError, fmt.Sprintf("Broker reconcile error: %v", reconcileErr))
+ // Reconcile this copy of the Broker and then write back any status
+ // updates regardless of whether the reconcile error out.
+ err = r.reconcile(ctx, broker)
+ if err != nil {
+ logging.FromContext(ctx).Warn("Error reconciling Broker", zap.Error(err))
+ r.Recorder.Eventf(broker, corev1.EventTypeWarning, brokerReconcileError, fmt.Sprintf("Broker reconcile error: %v", err))
} else {
logging.FromContext(ctx).Debug("Broker reconciled")
- if originalReadiness != broker.Status.IsReady() {
- r.recorder.Event(broker, corev1.EventTypeNormal, brokerReadinessChanged, fmt.Sprintf("Broker readiness changed to %v", broker.Status.IsReady()))
- }
}
- if _, err = r.updateStatus(broker); err != nil {
- logging.FromContext(ctx).Error("Failed to update Broker status", zap.Error(err))
- r.recorder.Eventf(broker, corev1.EventTypeWarning, brokerUpdateStatusFailed, "Failed to update Broker's status: %v", err)
- return reconcile.Result{}, err
+ if _, updateStatusErr := r.updateStatus(ctx, broker); updateStatusErr != nil {
+ logging.FromContext(ctx).Warn("Failed to update the Broker status", zap.Error(updateStatusErr))
+ r.Recorder.Eventf(broker, corev1.EventTypeWarning, brokerUpdateStatusFailed, "Failed to update Broker's status: %v", updateStatusErr)
+ return updateStatusErr
}
// Requeue if the resource is not ready:
- return reconcile.Result{}, reconcileErr
+ return err
}
-func (r *reconciler) reconcile(ctx context.Context, b *v1alpha1.Broker) error {
+func (r *Reconciler) reconcile(ctx context.Context, b *v1alpha1.Broker) error {
b.Status.InitializeConditions()
// 1. Trigger Channel is created for all events. Triggers will Subscribe to this Channel.
@@ -249,48 +263,36 @@ func (r *reconciler) reconcile(ctx context.Context, b *v1alpha1.Broker) error {
return nil
}
-// updateStatus may in fact update the broker's finalizers in addition to the status.
-func (r *reconciler) updateStatus(broker *v1alpha1.Broker) (*v1alpha1.Broker, error) {
- ctx := context.TODO()
- objectKey := client.ObjectKey{Namespace: broker.Namespace, Name: broker.Name}
- latestBroker := &v1alpha1.Broker{}
-
- if err := r.client.Get(ctx, objectKey, latestBroker); err != nil {
+func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.Broker) (*v1alpha1.Broker, error) {
+ broker, err := r.brokerLister.Brokers(desired.Namespace).Get(desired.Name)
+ if err != nil {
return nil, err
}
- brokerChanged := false
-
- if !equality.Semantic.DeepEqual(latestBroker.Finalizers, broker.Finalizers) {
- latestBroker.SetFinalizers(broker.ObjectMeta.Finalizers)
- if err := r.client.Update(ctx, latestBroker); err != nil {
- return nil, err
- }
- brokerChanged = true
+ // If there's nothing to update, just return.
+ if reflect.DeepEqual(broker.Status, desired.Status) {
+ return broker, nil
}
- if equality.Semantic.DeepEqual(latestBroker.Status, broker.Status) {
- return latestBroker, nil
- }
+ becomesReady := desired.Status.IsReady() && !broker.Status.IsReady()
- if brokerChanged {
- // Re-fetch.
- latestBroker = &v1alpha1.Broker{}
- if err := r.client.Get(ctx, objectKey, latestBroker); err != nil {
- return nil, err
- }
- }
+ // Don't modify the informers copy.
+ existing := broker.DeepCopy()
+ existing.Status = desired.Status
- latestBroker.Status = broker.Status
- if err := r.client.Status().Update(ctx, latestBroker); err != nil {
- return nil, err
+ b, err := r.EventingClientSet.EventingV1alpha1().Brokers(desired.Namespace).UpdateStatus(existing)
+ if err == nil && becomesReady {
+ duration := time.Since(b.ObjectMeta.CreationTimestamp.Time)
+ logging.FromContext(ctx).Sugar().Infof("Broker %q became ready after %v", broker.Name, duration)
+ r.Recorder.Event(broker, corev1.EventTypeNormal, brokerReadinessChanged, fmt.Sprintf("Broker %q became ready", broker.Name))
+ //r.StatsReporter.ReportServiceReady(broker.Namespace, broker.Name, duration) // TODO: stats
}
- return latestBroker, nil
+ return b, err
}
// reconcileFilterDeployment reconciles Broker's 'b' filter deployment.
-func (r *reconciler) reconcileFilterDeployment(ctx context.Context, b *v1alpha1.Broker) (*v1.Deployment, error) {
+func (r *Reconciler) reconcileFilterDeployment(ctx context.Context, b *v1alpha1.Broker) (*v1.Deployment, error) {
expected := resources.MakeFilterDeployment(&resources.FilterArgs{
Broker: b,
Image: r.filterImage,
@@ -300,32 +302,31 @@ func (r *reconciler) reconcileFilterDeployment(ctx context.Context, b *v1alpha1.
}
// reconcileFilterService reconciles Broker's 'b' filter service.
-func (r *reconciler) reconcileFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) {
+func (r *Reconciler) reconcileFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) {
expected := resources.MakeFilterService(b)
return r.reconcileService(ctx, expected)
}
-func (r *reconciler) reconcileTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
+func (r *Reconciler) reconcileTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
get := func() (*v1alpha1.Channel, error) {
- return r.getChannel(ctx, b, labels.SelectorFromSet(TriggerChannelLabels(b)))
+ return r.getChannel(ctx, b, labels.SelectorFromSet(TriggerChannelLabels(b.Name)))
}
return r.reconcileChannel(ctx, get, newTriggerChannel(b))
}
-func (r *reconciler) reconcileIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
+func (r *Reconciler) reconcileIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) {
get := func() (*v1alpha1.Channel, error) {
- return r.getChannel(ctx, b, labels.SelectorFromSet(IngressChannelLabels(b)))
+ return r.getChannel(ctx, b, labels.SelectorFromSet(IngressChannelLabels(b.Name)))
}
return r.reconcileChannel(ctx, get, newIngressChannel(b))
}
// reconcileChannel reconciles Broker's 'b' underlying channel.
-func (r *reconciler) reconcileChannel(ctx context.Context, get func() (*v1alpha1.Channel, error), newChan *v1alpha1.Channel) (*v1alpha1.Channel, error) {
+func (r *Reconciler) reconcileChannel(ctx context.Context, get func() (*v1alpha1.Channel, error), newChan *v1alpha1.Channel) (*v1alpha1.Channel, error) {
c, err := get()
// If the resource doesn't exist, we'll create it
- if k8serrors.IsNotFound(err) {
- c = newChan
- err = r.client.Create(ctx, c)
+ if apierrs.IsNotFound(err) {
+ c, err = r.EventingClientSet.EventingV1alpha1().Channels(newChan.Namespace).Create(newChan)
if err != nil {
return nil, err
}
@@ -340,35 +341,26 @@ func (r *reconciler) reconcileChannel(ctx context.Context, get func() (*v1alpha1
}
// getChannel returns the Channel object for Broker 'b' if exists, otherwise it returns an error.
-func (r *reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labels.Selector) (*v1alpha1.Channel, error) {
- list := &v1alpha1.ChannelList{}
- opts := &runtimeclient.ListOptions{
- Namespace: b.Namespace,
- LabelSelector: ls,
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
-
- err := r.client.List(ctx, opts, list)
+func (r *Reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labels.Selector) (*v1alpha1.Channel, error) {
+ channels, err := r.channelLister.Channels(b.Namespace).List(ls)
if err != nil {
return nil, err
}
- for _, c := range list.Items {
- if metav1.IsControlledBy(&c, b) {
- return &c, nil
+ for _, c := range channels {
+ if metav1.IsControlledBy(c, b) {
+ return c, nil
}
}
- return nil, k8serrors.NewNotFound(schema.GroupResource{}, "")
+ return nil, apierrs.NewNotFound(schema.GroupResource{}, "")
}
func newTriggerChannel(b *v1alpha1.Broker) *v1alpha1.Channel {
- return newChannel(b, TriggerChannelLabels(b))
+ return newChannel(b, TriggerChannelLabels(b.Name))
}
func newIngressChannel(b *v1alpha1.Broker) *v1alpha1.Channel {
- return newChannel(b, IngressChannelLabels(b))
+ return newChannel(b, IngressChannelLabels(b.Name))
}
// newChannel creates a new Channel for Broker 'b'.
@@ -395,45 +387,42 @@ func newChannel(b *v1alpha1.Broker, l map[string]string) *v1alpha1.Channel {
}
}
-// TriggerChannelLabels are all the labels placed on the Trigger Channel for the given Broker. This
+// TriggerChannelLabels are all the labels placed on the Trigger Channel for the given brokerName. This
// should only be used by Broker and Trigger code.
-func TriggerChannelLabels(b *v1alpha1.Broker) map[string]string {
+func TriggerChannelLabels(brokerName string) map[string]string {
return map[string]string{
- "eventing.knative.dev/broker": b.Name,
+ "eventing.knative.dev/broker": brokerName,
"eventing.knative.dev/brokerEverything": "true",
}
}
-// IngressChannelLabels are all the labels placed on the Ingress Channel for the given Broker. This
+// IngressChannelLabels are all the labels placed on the Ingress Channel for the given brokerName. This
// should only be used by Broker and Trigger code.
-func IngressChannelLabels(b *v1alpha1.Broker) map[string]string {
+func IngressChannelLabels(brokerName string) map[string]string {
return map[string]string{
- "eventing.knative.dev/broker": b.Name,
+ "eventing.knative.dev/broker": brokerName,
"eventing.knative.dev/brokerIngress": "true",
}
}
// reconcileDeployment reconciles the K8s Deployment 'd'.
-func (r *reconciler) reconcileDeployment(ctx context.Context, d *v1.Deployment) (*v1.Deployment, error) {
- name := types.NamespacedName{
- Namespace: d.Namespace,
- Name: d.Name,
- }
- current := &v1.Deployment{}
- err := r.client.Get(ctx, name, current)
- if k8serrors.IsNotFound(err) {
- err = r.client.Create(ctx, d)
+func (r *Reconciler) reconcileDeployment(ctx context.Context, d *v1.Deployment) (*v1.Deployment, error) {
+ current, err := r.deploymentLister.Deployments(d.Namespace).Get(d.Name)
+ if apierrs.IsNotFound(err) {
+ current, err = r.KubeClientSet.AppsV1().Deployments(d.Namespace).Create(d)
if err != nil {
return nil, err
}
- return d, nil
+ return current, nil
} else if err != nil {
return nil, err
}
if !equality.Semantic.DeepDerivative(d.Spec, current.Spec) {
- current.Spec = d.Spec
- err = r.client.Update(ctx, current)
+ // Don't modify the informers copy.
+ desired := current.DeepCopy()
+ desired.Spec = d.Spec
+ current, err = r.KubeClientSet.AppsV1().Deployments(current.Namespace).Update(desired)
if err != nil {
return nil, err
}
@@ -442,19 +431,14 @@ func (r *reconciler) reconcileDeployment(ctx context.Context, d *v1.Deployment)
}
// reconcileService reconciles the K8s Service 'svc'.
-func (r *reconciler) reconcileService(ctx context.Context, svc *corev1.Service) (*corev1.Service, error) {
- name := types.NamespacedName{
- Namespace: svc.Namespace,
- Name: svc.Name,
- }
- current := &corev1.Service{}
- err := r.client.Get(ctx, name, current)
- if k8serrors.IsNotFound(err) {
- err = r.client.Create(ctx, svc)
+func (r *Reconciler) reconcileService(ctx context.Context, svc *corev1.Service) (*corev1.Service, error) {
+ current, err := r.serviceLister.Services(svc.Namespace).Get(svc.Name)
+ if apierrs.IsNotFound(err) {
+ current, err = r.KubeClientSet.CoreV1().Services(svc.Namespace).Create(svc)
if err != nil {
return nil, err
}
- return svc, nil
+ return current, nil
} else if err != nil {
return nil, err
}
@@ -463,8 +447,10 @@ func (r *reconciler) reconcileService(ctx context.Context, svc *corev1.Service)
// encounter an error while updating.
svc.Spec.ClusterIP = current.Spec.ClusterIP
if !equality.Semantic.DeepDerivative(svc.Spec, current.Spec) {
- current.Spec = svc.Spec
- err = r.client.Update(ctx, current)
+ // Don't modify the informers copy.
+ desired := current.DeepCopy()
+ desired.Spec = svc.Spec
+ current, err = r.KubeClientSet.CoreV1().Services(current.Namespace).Update(desired)
if err != nil {
return nil, err
}
@@ -473,7 +459,7 @@ func (r *reconciler) reconcileService(ctx context.Context, svc *corev1.Service)
}
// reconcileIngressDeployment reconciles the Ingress Deployment.
-func (r *reconciler) reconcileIngressDeployment(ctx context.Context, b *v1alpha1.Broker, c *v1alpha1.Channel) (*v1.Deployment, error) {
+func (r *Reconciler) reconcileIngressDeployment(ctx context.Context, b *v1alpha1.Broker, c *v1alpha1.Channel) (*v1.Deployment, error) {
expected := resources.MakeIngress(&resources.IngressArgs{
Broker: b,
Image: r.ingressImage,
@@ -484,19 +470,18 @@ func (r *reconciler) reconcileIngressDeployment(ctx context.Context, b *v1alpha1
}
// reconcileIngressService reconciles the Ingress Service.
-func (r *reconciler) reconcileIngressService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) {
+func (r *Reconciler) reconcileIngressService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) {
expected := resources.MakeIngressService(b)
return r.reconcileService(ctx, expected)
}
-func (r *reconciler) reconcileIngressSubscription(ctx context.Context, b *v1alpha1.Broker, c *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) {
+func (r *Reconciler) reconcileIngressSubscription(ctx context.Context, b *v1alpha1.Broker, c *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) {
expected := makeSubscription(b, c, svc)
sub, err := r.getIngressSubscription(ctx, b)
// If the resource doesn't exist, we'll create it
- if k8serrors.IsNotFound(err) {
- sub = expected
- err = r.client.Create(ctx, sub)
+ if apierrs.IsNotFound(err) {
+ sub, err = r.EventingClientSet.EventingV1alpha1().Subscriptions(expected.Namespace).Create(expected)
if err != nil {
return nil, err
}
@@ -510,17 +495,16 @@ func (r *reconciler) reconcileIngressSubscription(ctx context.Context, b *v1alph
if !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) {
// Given that spec.channel is immutable, we cannot just update the subscription. We delete
// it instead, and re-create it.
- err = r.client.Delete(ctx, sub)
+ err = r.EventingClientSet.EventingV1alpha1().Subscriptions(sub.Namespace).Delete(sub.Name, &metav1.DeleteOptions{})
if err != nil {
logging.FromContext(ctx).Info("Cannot delete subscription", zap.Error(err))
- r.recorder.Eventf(b, corev1.EventTypeWarning, ingressSubscriptionDeleteFailed, "Delete Broker Ingress' subscription failed: %v", err)
+ r.Recorder.Eventf(b, corev1.EventTypeWarning, ingressSubscriptionDeleteFailed, "Delete Broker Ingress' subscription failed: %v", err)
return nil, err
}
- sub = expected
- err = r.client.Create(ctx, sub)
+ sub, err = r.EventingClientSet.EventingV1alpha1().Subscriptions(expected.Namespace).Create(expected)
if err != nil {
logging.FromContext(ctx).Info("Cannot create subscription", zap.Error(err))
- r.recorder.Eventf(b, corev1.EventTypeWarning, ingressSubscriptionCreateFailed, "Create Broker Ingress' subscription failed: %v", err)
+ r.Recorder.Eventf(b, corev1.EventTypeWarning, ingressSubscriptionCreateFailed, "Create Broker Ingress' subscription failed: %v", err)
return nil, err
}
}
@@ -529,27 +513,18 @@ func (r *reconciler) reconcileIngressSubscription(ctx context.Context, b *v1alph
// getSubscription returns the subscription of trigger 't' if exists,
// otherwise it returns an error.
-func (r *reconciler) getIngressSubscription(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Subscription, error) {
- list := &v1alpha1.SubscriptionList{}
- opts := &runtimeclient.ListOptions{
- Namespace: b.Namespace,
- LabelSelector: labels.SelectorFromSet(ingressSubscriptionLabels(b)),
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
-
- err := r.client.List(ctx, opts, list)
+func (r *Reconciler) getIngressSubscription(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Subscription, error) {
+ subscriptions, err := r.subscriptionLister.Subscriptions(b.Namespace).List(labels.SelectorFromSet(ingressSubscriptionLabels(b.Name)))
if err != nil {
return nil, err
}
- for _, s := range list.Items {
- if metav1.IsControlledBy(&s, b) {
- return &s, nil
+ for _, s := range subscriptions {
+ if metav1.IsControlledBy(s, b) {
+ return s, nil
}
}
- return nil, k8serrors.NewNotFound(schema.GroupResource{}, "")
+ return nil, apierrs.NewNotFound(schema.GroupResource{}, "")
}
// makeSubscription returns a placeholder subscription for trigger 't', channel 'c', and service 'svc'.
@@ -565,7 +540,7 @@ func makeSubscription(b *v1alpha1.Broker, c *v1alpha1.Channel, svc *corev1.Servi
Kind: "Broker",
}),
},
- Labels: ingressSubscriptionLabels(b),
+ Labels: ingressSubscriptionLabels(b.Name),
},
Spec: v1alpha1.SubscriptionSpec{
Channel: corev1.ObjectReference{
@@ -584,9 +559,9 @@ func makeSubscription(b *v1alpha1.Broker, c *v1alpha1.Channel, svc *corev1.Servi
}
}
-func ingressSubscriptionLabels(b *v1alpha1.Broker) map[string]string {
+func ingressSubscriptionLabels(brokerName string) map[string]string {
return map[string]string{
- "eventing.knative.dev/broker": b.Name,
+ "eventing.knative.dev/broker": brokerName,
"eventing.knative.dev/brokerIngress": "true",
}
}
diff --git a/pkg/reconciler/v1alpha1/broker/broker_test.go b/pkg/reconciler/v1alpha1/broker/broker_test.go
index 615912947a7..524ce825420 100644
--- a/pkg/reconciler/v1alpha1/broker/broker_test.go
+++ b/pkg/reconciler/v1alpha1/broker/broker_test.go
@@ -17,27 +17,25 @@ limitations under the License.
package broker
import (
- "context"
- "errors"
"fmt"
- "strings"
"testing"
- "github.com/google/go-cmp/cmp"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "k8s.io/apimachinery/pkg/util/intstr"
+
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
+ "github.com/knative/eventing/pkg/reconciler"
+ . "github.com/knative/eventing/pkg/reconciler/testing"
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources"
"github.com/knative/eventing/pkg/utils"
- duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
- "go.uber.org/zap"
- appsv1 "k8s.io/api/apps/v1"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+ . "github.com/knative/pkg/reconciler/testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
+ clientgotesting "k8s.io/client-go/testing"
)
const (
@@ -48,33 +46,43 @@ const (
filterSA = "filter-SA"
ingressImage = "ingress-image"
ingressSA = "ingress-SA"
+
+ filterContainerName = "filter"
+ ingressContainerName = "ingress"
)
var (
trueVal = true
- channelProvisioner = &corev1.ObjectReference{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "ClusterChannelProvisioner",
- Name: "my-provisioner",
- }
+ testKey = fmt.Sprintf("%s/%s", testNS, brokerName)
+ channelGenerateName = fmt.Sprintf("%s-broker-", brokerName)
+ subscriptionChannelName = fmt.Sprintf("%s-broker", brokerName)
triggerChannelHostname = fmt.Sprintf("foo.bar.svc.%s", utils.GetClusterDomainName())
ingressChannelHostname = fmt.Sprintf("baz.qux.svc.%s", utils.GetClusterDomainName())
- ingressChannelName = "ingress-channel"
+ filterDeploymentName = fmt.Sprintf("%s-broker-filter", brokerName)
+ filterServiceName = fmt.Sprintf("%s-broker-filter", brokerName)
+ ingressDeploymentName = fmt.Sprintf("%s-broker-ingress", brokerName)
+ ingressServiceName = fmt.Sprintf("%s-broker", brokerName)
- // deletionTime is used when objects are marked as deleted. Rfc3339Copy()
- // truncates to seconds to match the loss of precision during serialization.
- deletionTime = metav1.Now().Rfc3339Copy()
+ ingressSubscriptionGenerateName = fmt.Sprintf("internal-ingress-%s-", brokerName)
- // Map of events to set test cases' expectations easier.
- events = map[string]corev1.Event{
- brokerReadinessChanged: {Reason: brokerReadinessChanged, Type: corev1.EventTypeNormal},
- brokerReconcileError: {Reason: brokerReconcileError, Type: corev1.EventTypeWarning},
- brokerUpdateStatusFailed: {Reason: brokerUpdateStatusFailed, Type: corev1.EventTypeWarning},
- ingressSubscriptionDeleteFailed: {Reason: ingressSubscriptionDeleteFailed, Type: corev1.EventTypeWarning},
- ingressSubscriptionCreateFailed: {Reason: ingressSubscriptionCreateFailed, Type: corev1.EventTypeWarning},
+ channelGVK = metav1.GroupVersionKind{
+ Group: "eventing.knative.dev",
+ Version: "v1alpha1",
+ Kind: "Channel",
+ }
+
+ serviceGVK = metav1.GroupVersionKind{
+ Version: "v1",
+ Kind: "Service",
+ }
+
+ provisionerGVK = metav1.GroupVersionKind{
+ Group: "eventing.knative.dev",
+ Version: "v1alpha1",
+ Kind: "ClusterChannelProvisioner",
}
)
@@ -83,971 +91,920 @@ func init() {
_ = v1alpha1.AddToScheme(scheme.Scheme)
}
-func TestProvideController(t *testing.T) {
- // TODO(grantr) This needs a mock of manager.Manager. Creating a manager
- // with a fake Config fails because the Manager tries to contact the
- // apiserver.
-
- // cfg := &rest.Config{
- // Host: "http://foo:80",
- // }
- //
- // mgr, err := manager.New(cfg, manager.Options{})
- // if err != nil {
- // t.Fatalf("Error creating manager: %v", err)
- // }
- //
- // _, err = ProvideController(mgr)
- // if err != nil {
- // t.Fatalf("Error in ProvideController: %v", err)
- // }
-}
-
-func TestInjectClient(t *testing.T) {
- r := &reconciler{}
- orig := r.client
- n := fake.NewFakeClient()
- if orig == n {
- t.Errorf("Original and new clients are identical: %v", orig)
- }
- err := r.InjectClient(n)
- if err != nil {
- t.Errorf("Unexpected error injecting the client: %v", err)
- }
- if n != r.client {
- t.Errorf("Unexpected client. Expected: '%v'. Actual: '%v'", n, r.client)
- }
-}
-
func TestReconcile(t *testing.T) {
- testCases := []controllertesting.TestCase{
- {
- Name: "Broker not found",
- },
- {
- Name: "Broker.Get fails",
- Scheme: scheme.Scheme,
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Broker); ok {
- return controllertesting.Handled, errors.New("test error getting the Broker")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error getting the Broker",
- },
- {
- Name: "Broker is being deleted",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeDeletingBroker(),
- },
- },
- {
- Name: "Trigger Channel.List error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- func(_ client.Client, _ context.Context, opts *client.ListOptions, list runtime.Object) (controllertesting.MockHandled, error) {
- // Only match the Trigger Channel labels.
- ls := labels.FormatLabels(TriggerChannelLabels(makeBroker()))
- l, _ := labels.ConvertSelectorToLabelsMap(ls)
-
- if _, ok := list.(*v1alpha1.ChannelList); ok && opts.LabelSelector.Matches(l) {
- return controllertesting.Handled, errors.New("test error getting Trigger Channel")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error getting Trigger Channel",
- },
- {
- Name: "Trigger Channel.Create error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if c, ok := obj.(*v1alpha1.Channel); ok {
- if cmp.Equal(c.Labels, TriggerChannelLabels(makeBroker())) {
- return controllertesting.Handled, errors.New("test error creating Trigger Channel")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error creating Trigger Channel",
- },
- {
- Name: "Trigger Channel is different than expected",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeDifferentTriggerChannel(),
- },
- WantPresent: []runtime.Object{
- // This is special because the Channel is not updated, unlike most things that
- // differ from expected.
- // TODO uncomment the following line once our test framework supports searching for
- // GenerateName.
- // makeDifferentTriggerChannel(),
- },
- },
- {
- Name: "Trigger Channel is not yet Addressable",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeNonAddressableTriggerChannel(),
- },
- },
- {
- Name: "Filter Deployment.Get error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*appsv1.Deployment); ok {
- if strings.Contains(key.Name, "filter") {
- return controllertesting.Handled, errors.New("test error getting filter Deployment")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error getting filter Deployment",
- },
- {
- Name: "Filter Deployment.Create error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if d, ok := obj.(*appsv1.Deployment); ok {
- if d.Labels["eventing.knative.dev/brokerRole"] == "filter" {
- return controllertesting.Handled, errors.New("test error creating filter Deployment")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error creating filter Deployment",
- },
- {
- Name: "Filter Deployment.Update error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeDifferentFilterDeployment(),
- },
- Mocks: controllertesting.Mocks{
- MockUpdates: []controllertesting.MockUpdate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if d, ok := obj.(*appsv1.Deployment); ok {
- if d.Labels["eventing.knative.dev/brokerRole"] == "filter" {
- return controllertesting.Handled, errors.New("test error updating filter Deployment")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error updating filter Deployment",
- },
- {
- Name: "Filter Service.Get error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*corev1.Service); ok {
- if strings.Contains(key.Name, "filter") {
- return controllertesting.Handled, errors.New("test error getting filter Service")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error getting filter Service",
- },
+ table := TableTest{
{
- Name: "Filter Service.Create error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if svc, ok := obj.(*corev1.Service); ok {
- if svc.Labels["eventing.knative.dev/brokerRole"] == "filter" {
- return controllertesting.Handled, errors.New("test error creating filter Service")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error creating filter Service",
+ Name: "bad workqueue key",
+ // Make sure Reconcile handles bad keys.
+ Key: "too/many/parts",
+ }, {
+ Name: "key not found",
+ // Make sure Reconcile handles good keys that don't exist.
+ Key: "foo/not-found",
},
{
- Name: "Filter Service.Update error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeDifferentFilterService(),
- },
- Mocks: controllertesting.Mocks{
- MockUpdates: []controllertesting.MockUpdate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if svc, ok := obj.(*corev1.Service); ok {
- if svc.Labels["eventing.knative.dev/brokerRole"] == "filter" {
- return controllertesting.Handled, errors.New("test error updating filter Service")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error updating filter Service",
- },
- {
- Name: "Ingress Deployment.Get error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*appsv1.Deployment); ok {
- if strings.Contains(key.Name, "ingress") {
- return controllertesting.Handled, errors.New("test error getting ingress Deployment")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error getting ingress Deployment",
+ Name: "Broker not found",
+ Key: testKey,
},
{
- Name: "Ingress Deployment.Create error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if d, ok := obj.(*appsv1.Deployment); ok {
- if d.Labels["eventing.knative.dev/brokerRole"] == "ingress" {
- return controllertesting.Handled, errors.New("test error creating ingress Deployment")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
+ Name: "Broker is being deleted",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithBrokerDeletionTimestamp),
},
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error creating ingress Deployment",
},
{
- Name: "Ingress Deployment.Update error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeDifferentIngressDeployment(),
- },
- Mocks: controllertesting.Mocks{
- MockUpdates: []controllertesting.MockUpdate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if d, ok := obj.(*appsv1.Deployment); ok {
- if d.Labels["eventing.knative.dev/brokerRole"] == "ingress" {
- return controllertesting.Handled, errors.New("test error updating ingress Deployment")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error updating ingress Deployment",
+ Name: "Trigger Channel.Create error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ },
+ WantCreates: []metav1.Object{
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner")),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithInitBrokerConditions,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithTriggerChannelFailed("ChannelFailure", "inducing failure for create channels")),
+ }},
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "channels"),
+ },
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for create channels"),
+ },
+ WantErr: true,
},
{
- Name: "Ingress Service.Get error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- func(_ client.Client, _ context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*corev1.Service); ok {
- if key.Name == fmt.Sprintf("%s-broker", brokerName) {
- return controllertesting.Handled, errors.New("test error getting ingress Service")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error getting ingress Service",
+ Name: "Trigger Channel is not yet Addressable",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithInitChannelConditions,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelAddress("")),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelFailed("NoAddress", "Channel does not have an address.")),
+ }},
},
{
- Name: "Ingress Service.Create error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if svc, ok := obj.(*corev1.Service); ok {
- if svc.Labels["eventing.knative.dev/brokerRole"] == "ingress" {
- return controllertesting.Handled, errors.New("test error creating ingress Service")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error creating ingress Service",
- },
- {
- Name: "Ingress Service.Update error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeDifferentIngressService(),
- },
- Mocks: controllertesting.Mocks{
- MockUpdates: []controllertesting.MockUpdate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if svc, ok := obj.(*corev1.Service); ok {
- if svc.Labels["eventing.knative.dev/brokerRole"] == "ingress" {
- return controllertesting.Handled, errors.New("test error updating ingress Service")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error updating ingress Service",
+ Name: "Filter Deployment.Create error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "deployments"),
+ },
+ WantCreates: []metav1.Object{
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterFailed("DeploymentFailure", "inducing failure for create deployments")),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for create deployments"),
+ },
+ WantErr: true,
},
{
- Name: "Ingress Channel.List error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- func(_ client.Client, _ context.Context, opts *client.ListOptions, list runtime.Object) (controllertesting.MockHandled, error) {
- // Only match the Ingress Channel labels.
- ls := labels.FormatLabels(IngressChannelLabels(makeBroker()))
- l, _ := labels.ConvertSelectorToLabelsMap(ls)
-
- if _, ok := list.(*v1alpha1.ChannelList); ok && opts.LabelSelector.Matches(l) {
- return controllertesting.Handled, errors.New("test error getting Ingress Channel")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error getting Ingress Channel",
+ Name: "Filter Deployment.Update error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, "some-other-image", envVars(filterContainerName), nil)),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("update", "deployments"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterFailed("DeploymentFailure", "inducing failure for update deployments")),
+ }},
+ WantUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for update deployments"),
+ },
+ WantErr: true,
},
{
- Name: "Ingress Channel.Create error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- // Controller Runtime's fake client totally ignores the opts.LabelSelector, so
- // picks up the Trigger Channel while listing the Ingress Channel. Use a mock to
- // force the correct behavior.
- func(innerClient client.Client, ctx context.Context, opts *client.ListOptions, list runtime.Object) (handled controllertesting.MockHandled, e error) {
- if _, ok := list.(*v1alpha1.ChannelList); ok {
- // Only match the Ingress Channel labels.
- ls := labels.FormatLabels(IngressChannelLabels(makeBroker()))
- l, _ := labels.ConvertSelectorToLabelsMap(ls)
- if opts.LabelSelector.Matches(l) {
- return controllertesting.Handled, nil
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if c, ok := obj.(*v1alpha1.Channel); ok {
- if cmp.Equal(c.Labels, IngressChannelLabels(makeBroker())) {
- return controllertesting.Handled, errors.New("test error creating Ingress Channel")
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error creating Ingress Channel",
+ Name: "Filter Service.Create error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "services"),
+ },
+ WantCreates: []metav1.Object{
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterFailed("ServiceFailure", "inducing failure for create services")),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for create services"),
+ },
+ WantErr: true,
},
{
- Name: "Ingress Channel is different than expected",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeDifferentIngressChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- // Controller Runtime's fake client totally ignores the opts.LabelSelector, so
- // picks up the Trigger Channel while listing the Ingress Channel. Use a mock to
- // force the correct behavior.
- func(innerClient client.Client, ctx context.Context, opts *client.ListOptions, list runtime.Object) (handled controllertesting.MockHandled, e error) {
- if cl, ok := list.(*v1alpha1.ChannelList); ok {
- // Only match the Ingress Channel labels.
- ls := labels.FormatLabels(IngressChannelLabels(makeBroker()))
- l, _ := labels.ConvertSelectorToLabelsMap(ls)
- if opts.LabelSelector.Matches(l) {
- cl.Items = append(cl.Items, *makeDifferentIngressChannel())
- return controllertesting.Handled, nil
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantPresent: []runtime.Object{
- // This is special because the Channel is not updated, unlike most things that
- // differ from expected.
- // TODO uncomment the following line once our test framework supports searching for
- // GenerateName.
- // makeDifferentIngressChannel(),
- },
+ Name: "Filter Service.Update error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 9090))),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("update", "services"),
+ },
+ WantUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ }},
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterFailed("ServiceFailure", "inducing failure for update services")),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for update services"),
+ },
+ WantErr: true,
},
{
- Name: "Ingress Channel is not yet Addressable",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeNonAddressableIngressChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- // Controller Runtime's fake client totally ignores the opts.LabelSelector, so
- // picks up the Trigger Channel while listing the Ingress Channel. Use a mock to
- // force the correct behavior.
- func(innerClient client.Client, ctx context.Context, opts *client.ListOptions, list runtime.Object) (handled controllertesting.MockHandled, e error) {
- if cl, ok := list.(*v1alpha1.ChannelList); ok {
- // Only match the Ingress Channel labels.
- ls := labels.FormatLabels(IngressChannelLabels(makeBroker()))
- l, _ := labels.ConvertSelectorToLabelsMap(ls)
- if opts.LabelSelector.Matches(l) {
- cl.Items = append(cl.Items, *makeNonAddressableIngressChannel())
- return controllertesting.Handled, nil
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
+ Name: "Ingress Deployment.Create error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "deployments"),
+ },
+ WantCreates: []metav1.Object{
+ NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080)),
+ ),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterDeploymentAvailable(),
+ WithIngressFailed("DeploymentFailure", "inducing failure for create deployments")),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for create deployments"),
+ },
+ WantErr: true,
},
{
- Name: "Subscription.List error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeIngressChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- func(_ client.Client, _ context.Context, opts *client.ListOptions, list runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := list.(*v1alpha1.SubscriptionList); ok {
- return controllertesting.Handled, errors.New("test error getting Subscription")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error getting Subscription",
+ Name: "Ingress Deployment.Update error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(9090))),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("update", "deployments"),
+ },
+ WantUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
+ }},
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterDeploymentAvailable(),
+ WithIngressFailed("DeploymentFailure", "inducing failure for update deployments")),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for update deployments"),
+ },
+ WantErr: true,
},
{
- Name: "Subscription.Create error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeIngressChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Subscription); ok {
- return controllertesting.Handled, errors.New("test error creating Subscription")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[brokerReconcileError]},
- WantErrMsg: "test error creating Subscription",
+ Name: "Ingress Service.Create error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "services"),
+ },
+ WantCreates: []metav1.Object{
+ NewService(ingressServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.IngressLabels(brokerName)),
+ WithServicePorts(servicePorts(ingressContainerName, 8080))),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterDeploymentAvailable(),
+ WithIngressFailed("ServiceFailure", "inducing failure for create services")),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for create services"),
+ },
+ WantErr: true,
},
{
- Name: "Subscription is different than expected",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeIngressChannel(),
- },
- WantPresent: []runtime.Object{
- // This is special because the Channel is not updated, unlike most things that
- // differ from expected.
- // TODO uncomment the following line once our test framework supports searching for
- // GenerateName.
- // makeDifferentSubscription(),
- },
+ Name: "Ingress Service.Update error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
+ NewService(ingressServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.IngressLabels(brokerName)),
+ WithServicePorts(servicePorts(ingressContainerName, 9090))),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("update", "services"),
+ },
+ WantUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewService(ingressServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.IngressLabels(brokerName)),
+ WithServicePorts(servicePorts(ingressContainerName, 8080))),
+ }},
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterDeploymentAvailable(),
+ WithIngressFailed("ServiceFailure", "inducing failure for update services")),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for update services"),
+ },
+ WantErr: true,
},
{
- Name: "Subscription.Delete error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeIngressChannel(),
- makeDifferentSubscription(),
- },
- Mocks: controllertesting.Mocks{
- MockDeletes: []controllertesting.MockDelete{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Subscription); ok {
- return controllertesting.Handled, errors.New("test error deleting Subscription")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[ingressSubscriptionDeleteFailed], events[brokerReconcileError]},
- WantErrMsg: "test error deleting Subscription",
+ Name: "Ingress Channel.Create error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
+ NewService(ingressServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.IngressLabels(brokerName)),
+ WithServicePorts(servicePorts(ingressContainerName, 8080))),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "channels"),
+ },
+ WantCreates: []metav1.Object{
+ NewChannel("", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(IngressChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner")),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterDeploymentAvailable(),
+ WithIngressDeploymentAvailable(),
+ WithBrokerAddress(fmt.Sprintf("%s.%s.svc.%s", ingressServiceName, testNS, utils.GetClusterDomainName())),
+ WithIngressChannelFailed("ChannelFailure", "inducing failure for create channels")),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for create channels"),
+ },
+ WantErr: true,
},
{
- Name: "Subscription.Create error when recreating",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeIngressChannel(),
- makeDifferentSubscription(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Subscription); ok {
- return controllertesting.Handled, errors.New("test error creating Subscription")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantEvent: []corev1.Event{events[ingressSubscriptionCreateFailed], events[brokerReconcileError]},
- WantErrMsg: "test error creating Subscription",
+ Name: "Subscription.Create error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ // Use the channel name to avoid conflicting with the ingress one.
+ NewChannel("filter-channel", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
+ NewService(ingressServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.IngressLabels(brokerName)),
+ WithServicePorts(servicePorts(ingressContainerName, 8080))),
+ // Use the channel name to avoid conflicting with the filter one.
+ NewChannel("ingress-channel", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(IngressChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(ingressChannelHostname)),
+ },
+ WantCreates: []metav1.Object{
+ NewSubscription("", testNS,
+ WithSubscriptionGenerateName(ingressSubscriptionGenerateName),
+ WithSubscriptionOwnerReferences(ownerReferences()),
+ WithSubscriptionLabels(ingressSubscriptionLabels(brokerName)),
+ WithSubscriptionChannel(channelGVK, "ingress-channel"),
+ WithSubscriptionSubscriberRef(serviceGVK, ingressServiceName)),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterDeploymentAvailable(),
+ WithIngressDeploymentAvailable(),
+ WithBrokerAddress(fmt.Sprintf("%s.%s.svc.%s", ingressServiceName, testNS, utils.GetClusterDomainName())),
+ WithBrokerIngressChannelReady(),
+ WithBrokerIngressSubscriptionFailed("SubscriptionFailure", "inducing failure for create subscriptions"),
+ ),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for create subscriptions"),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "subscriptions"),
+ },
+ WantErr: true,
},
{
- Name: "Broker.Get for status update fails",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeIngressChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockGets: []controllertesting.MockGet{
- // The first Get works.
- func(innerClient client.Client, ctx context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Broker); ok {
- return controllertesting.Handled, innerClient.Get(ctx, key, obj)
- }
- return controllertesting.Unhandled, nil
- },
- // The second Get fails.
- func(_ client.Client, _ context.Context, _ client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Broker); ok {
- return controllertesting.Handled, errors.New("test error getting the Broker for status update")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error getting the Broker for status update",
- WantEvent: []corev1.Event{events[brokerUpdateStatusFailed]},
+ Name: "Subscription.Delete error",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ // Use the channel name to avoid conflicting with the ingress one.
+ NewChannel("filter-channel", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
+ NewService(ingressServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.IngressLabels(brokerName)),
+ WithServicePorts(servicePorts(ingressContainerName, 8080))),
+ // Use the channel name to avoid conflicting with the filter one.
+ NewChannel("ingress-channel", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(IngressChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(ingressChannelHostname)),
+ NewSubscription("subs", testNS,
+ WithSubscriptionGenerateName(ingressSubscriptionGenerateName),
+ WithSubscriptionOwnerReferences(ownerReferences()),
+ WithSubscriptionLabels(ingressSubscriptionLabels(brokerName)),
+ WithSubscriptionChannel(channelGVK, "ingress-channel"),
+ WithSubscriptionSubscriberRef(serviceGVK, "")),
+ },
+ WantDeletes: []clientgotesting.DeleteActionImpl{{
+ Name: "subs",
+ }},
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterDeploymentAvailable(),
+ WithIngressDeploymentAvailable(),
+ WithBrokerAddress(fmt.Sprintf("%s.%s.svc.%s", ingressServiceName, testNS, utils.GetClusterDomainName())),
+ WithBrokerIngressChannelReady(),
+ WithBrokerIngressSubscriptionFailed("SubscriptionFailure", "inducing failure for delete subscriptions"),
+ ),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, ingressSubscriptionDeleteFailed, "%v", "Delete Broker Ingress' subscription failed: inducing failure for delete subscriptions"),
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for delete subscriptions"),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("delete", "subscriptions"),
+ },
+ WantErr: true,
},
{
- Name: "Broker.Status.Update error",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- makeTriggerChannel(),
- makeIngressChannel(),
- },
- Mocks: controllertesting.Mocks{
- MockStatusUpdates: []controllertesting.MockStatusUpdate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*v1alpha1.Broker); ok {
- return controllertesting.Handled, errors.New("test error updating the Broker status")
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantErrMsg: "test error updating the Broker status",
- WantEvent: []corev1.Event{events[brokerUpdateStatusFailed]},
+ Name: "Subscription.Create error when recreating",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ // Use the channel name to avoid conflicting with the ingress one.
+ NewChannel("filter-channel", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
+ NewService(ingressServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.IngressLabels(brokerName)),
+ WithServicePorts(servicePorts(ingressContainerName, 8080))),
+ // Use the channel name to avoid conflicting with the filter one.
+ NewChannel("ingress-channel", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(IngressChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(ingressChannelHostname)),
+ NewSubscription("subs", testNS,
+ WithSubscriptionGenerateName(ingressSubscriptionGenerateName),
+ WithSubscriptionOwnerReferences(ownerReferences()),
+ WithSubscriptionLabels(ingressSubscriptionLabels(brokerName)),
+ WithSubscriptionChannel(channelGVK, "ingress-channel"),
+ WithSubscriptionSubscriberRef(serviceGVK, "")),
+ },
+ WantDeletes: []clientgotesting.DeleteActionImpl{{
+ Name: "subs",
+ }},
+ WantCreates: []metav1.Object{
+ NewSubscription("", testNS,
+ WithSubscriptionGenerateName(ingressSubscriptionGenerateName),
+ WithSubscriptionOwnerReferences(ownerReferences()),
+ WithSubscriptionLabels(ingressSubscriptionLabels(brokerName)),
+ WithSubscriptionChannel(channelGVK, "ingress-channel"),
+ WithSubscriptionSubscriberRef(serviceGVK, ingressServiceName)),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions,
+ WithTriggerChannelReady(),
+ WithFilterDeploymentAvailable(),
+ WithIngressDeploymentAvailable(),
+ WithBrokerAddress(fmt.Sprintf("%s.%s.svc.%s", ingressServiceName, testNS, utils.GetClusterDomainName())),
+ WithBrokerIngressChannelReady(),
+ WithBrokerIngressSubscriptionFailed("SubscriptionFailure", "inducing failure for create subscriptions"),
+ ),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, ingressSubscriptionCreateFailed, "%v", "Create Broker Ingress' subscription failed: inducing failure for create subscriptions"),
+ Eventf(corev1.EventTypeWarning, brokerReconcileError, "Broker reconcile error: %v", "inducing failure for create subscriptions"),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "subscriptions"),
+ },
+ WantErr: true,
},
{
- Name: "Successful reconcile",
- Scheme: scheme.Scheme,
- InitialState: []runtime.Object{
- makeBroker(),
- // The Channel needs to be addressable for the reconcile to succeed.
- makeTriggerChannel(),
- makeIngressChannel(),
- makeTestSubscription(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: []controllertesting.MockList{
- // Controller Runtime's fake client totally ignores the opts.LabelSelector, so
- // picks up the Trigger Channel while listing the Ingress Channel. Use a mock to
- // force the correct behavior.
- func(innerClient client.Client, ctx context.Context, opts *client.ListOptions, list runtime.Object) (handled controllertesting.MockHandled, e error) {
- if cl, ok := list.(*v1alpha1.ChannelList); ok {
- // Only match the Ingress Channel labels.
- ls := labels.FormatLabels(IngressChannelLabels(makeBroker()))
- l, _ := labels.ConvertSelectorToLabelsMap(ls)
- if opts.LabelSelector.Matches(l) {
- cl.Items = append(cl.Items, *makeIngressChannel())
- return controllertesting.Handled, nil
- }
- }
- return controllertesting.Unhandled, nil
- },
- },
- },
- WantPresent: []runtime.Object{
- makeReadyBroker(),
- // TODO Uncomment makeTriggerChannel() when our test framework handles generateName.
- // makeTriggerChannel(),
- makeFilterDeployment(),
- makeFilterService(),
- makeIngressDeployment(),
- makeIngressService(),
- // TODO Uncomment makeIngressChannel() when our test framework handles generateName.
- // makeIngressChannel(),
- makeTestSubscription(),
- },
- WantEvent: []corev1.Event{
- events[brokerReadinessChanged],
+ Name: "Successful Reconciliation",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithInitBrokerConditions),
+ // Use the channel name to avoid conflicting with the ingress one.
+ NewChannel("filter-channel", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(TriggerChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(triggerChannelHostname)),
+ NewDeployment(filterDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.FilterLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(filterSA),
+ WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
+ NewService(filterServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.FilterLabels(brokerName)),
+ WithServicePorts(servicePorts(filterContainerName, 8080))),
+ NewDeployment(ingressDeploymentName, testNS,
+ WithDeploymentOwnerReferences(ownerReferences()),
+ WithDeploymentLabels(resources.IngressLabels(brokerName)),
+ WithDeploymentAnnotations(annotations()),
+ WithDeploymentServiceAccount(ingressSA),
+ WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
+ NewService(ingressServiceName, testNS,
+ WithServiceOwnerReferences(ownerReferences()),
+ WithServiceLabels(resources.IngressLabels(brokerName)),
+ WithServicePorts(servicePorts(ingressContainerName, 8080))),
+ // Use the channel name to avoid conflicting with the filter one.
+ NewChannel("ingress-channel", testNS,
+ WithChannelGenerateName(channelGenerateName),
+ WithChannelLabels(IngressChannelLabels(brokerName)),
+ WithChannelOwnerReferences(ownerReferences()),
+ WithChannelProvisioner(provisionerGVK, "my-provisioner"),
+ WithChannelReady,
+ WithChannelAddress(ingressChannelHostname)),
+ NewSubscription("", testNS,
+ WithSubscriptionGenerateName(ingressSubscriptionGenerateName),
+ WithSubscriptionOwnerReferences(ownerReferences()),
+ WithSubscriptionLabels(ingressSubscriptionLabels(brokerName)),
+ WithSubscriptionChannel(channelGVK, "ingress-channel"),
+ WithSubscriptionSubscriberRef(serviceGVK, ingressServiceName),
+ WithSubscriptionReady),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewBroker(brokerName, testNS,
+ WithBrokerChannelProvisioner(channelProvisioner("my-provisioner")),
+ WithBrokerReady,
+ WithBrokerAddress(fmt.Sprintf("%s.%s.svc.%s", ingressServiceName, testNS, utils.GetClusterDomainName())),
+ ),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, brokerReadinessChanged, "Broker %q became ready", brokerName),
},
},
}
- for _, tc := range testCases {
- c := tc.GetClient()
- recorder := tc.GetEventRecorder()
-
- r := &reconciler{
- client: c,
- recorder: recorder,
- logger: zap.NewNop(),
+ defer logtesting.ClearAll()
+ table.Test(t, MakeFactory(func(listers *Listers, opt reconciler.Options) controller.Reconciler {
+ return &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ subscriptionLister: listers.GetSubscriptionLister(),
+ brokerLister: listers.GetBrokerLister(),
+ channelLister: listers.GetChannelLister(),
+ serviceLister: listers.GetK8sServiceLister(),
+ deploymentLister: listers.GetDeploymentLister(),
filterImage: filterImage,
filterServiceAccountName: filterSA,
ingressImage: ingressImage,
ingressServiceAccountName: ingressSA,
}
- tc.ReconcileKey = fmt.Sprintf("%s/%s", testNS, brokerName)
- tc.IgnoreTimes = true
- t.Run(tc.Name, tc.Runner(t, r, c, recorder))
- }
+ }))
}
-func makeBroker() *v1alpha1.Broker {
- return &v1alpha1.Broker{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "Broker",
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNS,
- Name: brokerName,
- },
- Spec: v1alpha1.BrokerSpec{
- ChannelTemplate: &v1alpha1.ChannelSpec{
- Provisioner: channelProvisioner,
- },
- },
- }
+func ownerReferences() []metav1.OwnerReference {
+ return []metav1.OwnerReference{{
+ APIVersion: v1alpha1.SchemeGroupVersion.String(),
+ Kind: "Broker",
+ Name: brokerName,
+ Controller: &trueVal,
+ BlockOwnerDeletion: &trueVal,
+ }}
}
-func makeReadyBroker() *v1alpha1.Broker {
- b := makeBroker()
- b.Status = *v1alpha1.TestHelper.ReadyBrokerStatus()
- b.Status.SetAddress(fmt.Sprintf("%s-broker.%s.svc.%s", brokerName, testNS, utils.GetClusterDomainName()))
- return b
+func channelProvisioner(name string) *corev1.ObjectReference {
+ return &corev1.ObjectReference{
+ APIVersion: "eventing.knative.dev/v1alpha1",
+ Kind: "ClusterChannelProvisioner",
+ Name: name,
+ }
}
-func makeDeletingBroker() *v1alpha1.Broker {
- b := makeReadyBroker()
- b.DeletionTimestamp = &deletionTime
- return b
+// TODO remove this once we get rid of istio.
+func annotations() map[string]string {
+ return map[string]string{
+ "sidecar.istio.io/inject": "true",
+ }
}
-func makeTriggerChannel() *v1alpha1.Channel {
- c := &v1alpha1.Channel{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNS,
- GenerateName: fmt.Sprintf("%s-broker-", brokerName),
- Labels: map[string]string{
- "eventing.knative.dev/broker": brokerName,
- "eventing.knative.dev/brokerEverything": "true",
+func envVars(containerName string) []corev1.EnvVar {
+ switch containerName {
+ case filterContainerName:
+ return []corev1.EnvVar{
+ {
+ Name: "NAMESPACE",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.namespace",
+ },
+ },
},
- OwnerReferences: []metav1.OwnerReference{
- getOwnerReference(),
+ }
+ case ingressContainerName:
+ return []corev1.EnvVar{
+ {
+ Name: "FILTER",
+ Value: "",
},
- },
- Spec: v1alpha1.ChannelSpec{
- Provisioner: channelProvisioner,
- },
- }
- c.Status.MarkProvisionerInstalled()
- c.Status.MarkProvisioned()
- c.Status.SetAddress(triggerChannelHostname)
- return c
-}
-
-func makeNonAddressableTriggerChannel() *v1alpha1.Channel {
- c := makeTriggerChannel()
- c.Status.Address = duckv1alpha1.Addressable{}
- return c
-}
-
-func makeDifferentTriggerChannel() *v1alpha1.Channel {
- c := makeTriggerChannel()
- c.Spec.Provisioner.Name = "some-other-provisioner"
- return c
-}
-
-func makeIngressChannel() *v1alpha1.Channel {
- c := &v1alpha1.Channel{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNS,
- GenerateName: fmt.Sprintf("%s-broker-ingress-", brokerName),
- // The Fake library doesn't understand GenerateName, so give this a name so it doesn't
- // collide with the Trigger Channel.
- Name: ingressChannelName,
- Labels: map[string]string{
- "eventing.knative.dev/broker": brokerName,
- "eventing.knative.dev/brokerIngress": "true",
+ {
+ Name: "CHANNEL",
+ Value: triggerChannelHostname,
},
- OwnerReferences: []metav1.OwnerReference{
- getOwnerReference(),
+ {
+ Name: "BROKER",
+ Value: brokerName,
},
- },
- Spec: v1alpha1.ChannelSpec{
- Provisioner: channelProvisioner,
- },
- }
- c.Status.MarkProvisionerInstalled()
- c.Status.MarkProvisioned()
- c.Status.SetAddress(ingressChannelHostname)
- return c
-}
-
-func makeNonAddressableIngressChannel() *v1alpha1.Channel {
- c := makeIngressChannel()
- c.Status.Address = duckv1alpha1.Addressable{}
- return c
-}
-
-func makeDifferentIngressChannel() *v1alpha1.Channel {
- c := makeIngressChannel()
- c.Spec.Provisioner.Name = "some-other-provisioner"
- return c
-}
-
-func makeFilterDeployment() *appsv1.Deployment {
- d := resources.MakeFilterDeployment(&resources.FilterArgs{
- Broker: makeBroker(),
- Image: filterImage,
- ServiceAccountName: filterSA,
- })
- d.TypeMeta = metav1.TypeMeta{
- APIVersion: "apps/v1",
- Kind: "Deployment",
- }
- return d
-}
-
-func makeDifferentFilterDeployment() *appsv1.Deployment {
- d := makeFilterDeployment()
- d.Spec.Template.Spec.Containers[0].Image = "some-other-image"
- return d
-}
-
-func makeFilterService() *corev1.Service {
- svc := resources.MakeFilterService(makeBroker())
- svc.TypeMeta = metav1.TypeMeta{
- APIVersion: "v1",
- Kind: "Service",
- }
- return svc
-}
-
-func makeDifferentFilterService() *corev1.Service {
- s := makeFilterService()
- s.Spec.Selector["eventing.knative.dev/broker"] = "some-other-value"
- return s
-}
-
-func makeIngressDeployment() *appsv1.Deployment {
- d := resources.MakeIngress(&resources.IngressArgs{
- Broker: makeBroker(),
- Image: ingressImage,
- ServiceAccountName: ingressSA,
- ChannelAddress: triggerChannelHostname,
- })
- d.TypeMeta = metav1.TypeMeta{
- APIVersion: "apps/v1",
- Kind: "Deployment",
- }
- return d
-}
-
-func makeDifferentIngressDeployment() *appsv1.Deployment {
- d := makeIngressDeployment()
- d.Spec.Template.Spec.Containers[0].Image = "some-other-image"
- return d
-}
-
-func makeIngressService() *corev1.Service {
- svc := resources.MakeIngressService(makeBroker())
- svc.TypeMeta = metav1.TypeMeta{
- APIVersion: "v1",
- Kind: "Service",
+ }
}
- return svc
-}
-
-func makeDifferentIngressService() *corev1.Service {
- s := makeIngressService()
- s.Spec.Selector["eventing.knative.dev/broker"] = "some-other-value"
- return s
+ return []corev1.EnvVar{}
}
-func makeTestSubscription() *v1alpha1.Subscription {
- s := &v1alpha1.Subscription{
- TypeMeta: metav1.TypeMeta{
- APIVersion: "eventing.knative.dev/v1alpha1",
- Kind: "Subscription",
- },
- ObjectMeta: metav1.ObjectMeta{
- Namespace: testNS,
- GenerateName: fmt.Sprintf("internal-ingress-%s-", brokerName),
- Labels: map[string]string{
- "eventing.knative.dev/broker": brokerName,
- "eventing.knative.dev/brokerIngress": "true",
- },
- OwnerReferences: []metav1.OwnerReference{
- getOwnerReference(),
- },
+func containerPorts(httpInternal int32) []corev1.ContainerPort {
+ return []corev1.ContainerPort{
+ {
+ Name: "http",
+ ContainerPort: httpInternal,
},
- Spec: v1alpha1.SubscriptionSpec{
- Channel: corev1.ObjectReference{
- APIVersion: v1alpha1.SchemeGroupVersion.String(),
- Kind: "Channel",
- Name: ingressChannelName,
- },
- Subscriber: &v1alpha1.SubscriberSpec{
- Ref: &corev1.ObjectReference{
- APIVersion: "v1",
- Kind: "Service",
- Name: makeIngressService().Name,
- },
- },
+ {
+ Name: "metrics",
+ ContainerPort: 9090,
},
}
- s.Status.MarkChannelReady()
- s.Status.MarkReferencesResolved()
- return s
}
-func makeDifferentSubscription() *v1alpha1.Subscription {
- s := makeTestSubscription()
- s.Spec.Subscriber.Ref = nil
- url := "http://example.com/"
- s.Spec.Subscriber.URI = &url
- return s
-}
-
-func getOwnerReference() metav1.OwnerReference {
- return metav1.OwnerReference{
- APIVersion: v1alpha1.SchemeGroupVersion.String(),
- Kind: "Broker",
- Name: brokerName,
- Controller: &trueVal,
- BlockOwnerDeletion: &trueVal,
+func servicePorts(containerName string, httpInternal int) []corev1.ServicePort {
+ svcPorts := []corev1.ServicePort{
+ {
+ Name: "http",
+ Port: 80,
+ TargetPort: intstr.FromInt(httpInternal),
+ },
+ }
+ // TODO remove this if once we add metrics to the filter container.
+ if containerName == ingressContainerName {
+ svcPorts = append(svcPorts, corev1.ServicePort{
+ Name: "metrics",
+ Port: 9090,
+ })
}
+ return svcPorts
}
diff --git a/pkg/reconciler/v1alpha1/broker/resources/filter.go b/pkg/reconciler/v1alpha1/broker/resources/filter.go
index 73903b986eb..74e641badd1 100644
--- a/pkg/reconciler/v1alpha1/broker/resources/filter.go
+++ b/pkg/reconciler/v1alpha1/broker/resources/filter.go
@@ -48,15 +48,16 @@ func MakeFilterDeployment(args *FilterArgs) *appsv1.Deployment {
Kind: "Broker",
}),
},
- Labels: FilterLabels(args.Broker),
+ Labels: FilterLabels(args.Broker.Name),
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
- MatchLabels: FilterLabels(args.Broker),
+ MatchLabels: FilterLabels(args.Broker.Name),
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: FilterLabels(args.Broker),
+ Labels: FilterLabels(args.Broker.Name),
+ // TODO remove this once we get rid of istio.
Annotations: map[string]string{
"sidecar.istio.io/inject": "true",
},
@@ -91,7 +92,7 @@ func MakeFilterService(b *eventingv1alpha1.Broker) *corev1.Service {
ObjectMeta: metav1.ObjectMeta{
Namespace: b.Namespace,
Name: fmt.Sprintf("%s-broker-filter", b.Name),
- Labels: FilterLabels(b),
+ Labels: FilterLabels(b.Name),
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(b, schema.GroupVersionKind{
Group: eventingv1alpha1.SchemeGroupVersion.Group,
@@ -101,7 +102,7 @@ func MakeFilterService(b *eventingv1alpha1.Broker) *corev1.Service {
},
},
Spec: corev1.ServiceSpec{
- Selector: FilterLabels(b),
+ Selector: FilterLabels(b.Name),
Ports: []corev1.ServicePort{
{
Name: "http",
@@ -115,9 +116,9 @@ func MakeFilterService(b *eventingv1alpha1.Broker) *corev1.Service {
// FilterLabels generates the labels present on all resources representing the filter of the given
// Broker.
-func FilterLabels(b *eventingv1alpha1.Broker) map[string]string {
+func FilterLabels(brokerName string) map[string]string {
return map[string]string{
- "eventing.knative.dev/broker": b.Name,
+ "eventing.knative.dev/broker": brokerName,
"eventing.knative.dev/brokerRole": "filter",
}
}
diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go
index 3bde11755e9..5721d5654a2 100644
--- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go
+++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go
@@ -49,15 +49,15 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment {
Kind: "Broker",
}),
},
- Labels: ingressLabels(args.Broker),
+ Labels: IngressLabels(args.Broker.Name),
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
- MatchLabels: ingressLabels(args.Broker),
+ MatchLabels: IngressLabels(args.Broker.Name),
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: ingressLabels(args.Broker),
+ Labels: IngressLabels(args.Broker.Name),
// TODO: Remove this annotation once all channels stop using istio virtual service
// https://github.com/knative/eventing/issues/294
Annotations: map[string]string{
@@ -107,8 +107,9 @@ func MakeIngressService(b *eventingv1alpha1.Broker) *corev1.Service {
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: b.Namespace,
- Name: fmt.Sprintf("%s-broker", b.Name),
- Labels: ingressLabels(b),
+ // TODO add -ingress to the name to be consistent with the filter service naming.
+ Name: fmt.Sprintf("%s-broker", b.Name),
+ Labels: IngressLabels(b.Name),
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(b, schema.GroupVersionKind{
Group: eventingv1alpha1.SchemeGroupVersion.Group,
@@ -118,7 +119,7 @@ func MakeIngressService(b *eventingv1alpha1.Broker) *corev1.Service {
},
},
Spec: corev1.ServiceSpec{
- Selector: ingressLabels(b),
+ Selector: IngressLabels(b.Name),
Ports: []corev1.ServicePort{
{
Name: "http",
@@ -134,9 +135,11 @@ func MakeIngressService(b *eventingv1alpha1.Broker) *corev1.Service {
}
}
-func ingressLabels(b *eventingv1alpha1.Broker) map[string]string {
+// IngressLables generates the labels present on all resources representing the ingress of the given
+// Broker.
+func IngressLabels(brokerName string) map[string]string {
return map[string]string{
- "eventing.knative.dev/broker": b.Name,
+ "eventing.knative.dev/broker": brokerName,
"eventing.knative.dev/brokerRole": "ingress",
}
}
From 6c549cc79433bcd1cce5bc4155a85583e59449bd Mon Sep 17 00:00:00 2001
From: Adam Harwayne
Date: Thu, 25 Apr 2019 10:45:29 -0700
Subject: [PATCH 59/76] Add OWNERS for the Channel implementations, based on
those who volunteered during the WG call. (#1033)
---
OWNERS_ALIASES | 21 +++++++++++++++++++++
contrib/gcppubsub/OWNERS | 9 +++++++++
contrib/kafka/OWNERS | 9 +++++++++
contrib/natss/OWNERS | 9 +++++++++
4 files changed, 48 insertions(+)
create mode 100644 OWNERS_ALIASES
create mode 100644 contrib/gcppubsub/OWNERS
create mode 100644 contrib/kafka/OWNERS
create mode 100644 contrib/natss/OWNERS
diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES
new file mode 100644
index 00000000000..f73c14d87e1
--- /dev/null
+++ b/OWNERS_ALIASES
@@ -0,0 +1,21 @@
+aliases:
+ # These aliases are for OWNERS of the various Channel implementations. These
+ # Are in addition to the repo level OWNERS.
+
+ gcp-pubsub-approvers:
+ - Harwayne
+ gcp-pubsub-reviewers:
+ - Harwayne
+
+ kafka-approvers:
+ - bbrowning
+ - matzew
+ kafka-reviewers:
+ - bbrowning
+ - matzew
+
+ natss-approvers:
+ - Abd4llA
+ natss-reviewers:
+ - Abd4llA
+
diff --git a/contrib/gcppubsub/OWNERS b/contrib/gcppubsub/OWNERS
new file mode 100644
index 00000000000..44ca4ed92eb
--- /dev/null
+++ b/contrib/gcppubsub/OWNERS
@@ -0,0 +1,9 @@
+approvers:
+ - gcp-pubsub-approvers
+
+reviewers:
+ - gcp-pubsub-reviewers
+
+labels:
+ - area/GCP-PubSub
+
diff --git a/contrib/kafka/OWNERS b/contrib/kafka/OWNERS
new file mode 100644
index 00000000000..1a54e054c85
--- /dev/null
+++ b/contrib/kafka/OWNERS
@@ -0,0 +1,9 @@
+approvers:
+ - kafka-approvers
+
+reviewers:
+ - kafka-reviewers
+
+labels:
+ - area/Kafka
+
diff --git a/contrib/natss/OWNERS b/contrib/natss/OWNERS
new file mode 100644
index 00000000000..8cb9fe32d91
--- /dev/null
+++ b/contrib/natss/OWNERS
@@ -0,0 +1,9 @@
+approvers:
+ - natss-approvers
+
+reviewers:
+ - natss-reviewers
+
+labels:
+ - area/NATSS
+
From a5a24f50c21a77d2c7e0c68c1512ad63f1b23d26 Mon Sep 17 00:00:00 2001
From: Scott Nichols <32305648+n3wscott@users.noreply.github.com>
Date: Thu, 25 Apr 2019 13:48:30 -0700
Subject: [PATCH 60/76] Move Container source to Eventing, migrate to
pkg/controller (#1099)
* Move ContainerSource API.
* stage this.
* Move ContainerSource API.
* ported rec.
* stash.
* working cron job source.
* add rbac
* update deps.
* update codegen.
* add deepcopy.
* clean copyright.
* updating to have a source controller.
* fix yaml.
* use sources.
* 2019
* use the real word, not core.
* source -> sources.
* fix yaml.
* api for container source start.
* rec done first pass. tests next.
* stagging for test writing.:
* first test.
* more tests.
* working tests.
* rbac.
---
cmd/sources-controller/main.go | 16 +-
config/200-controller-clusterrole.yaml | 3 +
config/300-containersource.yaml | 89 +++
.../v1alpha1/containersource_lifecycle.go | 92 ++++
.../containersource_lifecycle_test.go | 448 +++++++++++++++
.../sources/v1alpha1/containersource_types.go | 97 ++++
pkg/apis/sources/v1alpha1/register.go | 2 +
pkg/apis/sources/v1alpha1/register_test.go | 2 +
.../sources/v1alpha1/zz_generated.deepcopy.go | 111 ++++
.../typed/sources/v1alpha1/containersource.go | 174 ++++++
.../v1alpha1/fake/fake_containersource.go | 140 +++++
.../v1alpha1/fake/fake_sources_client.go | 4 +
.../sources/v1alpha1/generated_expansion.go | 2 +
.../typed/sources/v1alpha1/sources_client.go | 5 +
.../informers/externalversions/generic.go | 2 +
.../sources/v1alpha1/containersource.go | 89 +++
.../sources/v1alpha1/interface.go | 7 +
.../sources/v1alpha1/containersource.go | 94 ++++
.../sources/v1alpha1/expansion_generated.go | 8 +
.../containersource/containersource.go | 316 +++++++++++
.../containersource/containersource_test.go | 509 ++++++++++++++++++
pkg/reconciler/containersource/doc.go | 18 +
.../containersource/resources/arguments.go | 36 ++
.../containersource/resources/deployment.go | 117 ++++
.../resources/deployment_test.go | 329 +++++++++++
pkg/reconciler/cronjobsource/cronjobsource.go | 3 +-
.../cronjobsource/cronjobsource_test.go | 38 +-
pkg/reconciler/testing/containersource.go | 112 ++++
pkg/reconciler/testing/listers.go | 4 +
29 files changed, 2835 insertions(+), 32 deletions(-)
create mode 100644 config/300-containersource.yaml
create mode 100644 pkg/apis/sources/v1alpha1/containersource_lifecycle.go
create mode 100644 pkg/apis/sources/v1alpha1/containersource_lifecycle_test.go
create mode 100644 pkg/apis/sources/v1alpha1/containersource_types.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/containersource.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_containersource.go
create mode 100644 pkg/client/informers/externalversions/sources/v1alpha1/containersource.go
create mode 100644 pkg/client/listers/sources/v1alpha1/containersource.go
create mode 100644 pkg/reconciler/containersource/containersource.go
create mode 100644 pkg/reconciler/containersource/containersource_test.go
create mode 100644 pkg/reconciler/containersource/doc.go
create mode 100644 pkg/reconciler/containersource/resources/arguments.go
create mode 100644 pkg/reconciler/containersource/resources/deployment.go
create mode 100644 pkg/reconciler/containersource/resources/deployment_test.go
create mode 100644 pkg/reconciler/testing/containersource.go
diff --git a/cmd/sources-controller/main.go b/cmd/sources-controller/main.go
index 6abe075e348..000cbf04c4a 100644
--- a/cmd/sources-controller/main.go
+++ b/cmd/sources-controller/main.go
@@ -18,6 +18,7 @@ package main
import (
"flag"
+ "github.com/knative/eventing/pkg/reconciler/containersource"
"k8s.io/client-go/tools/clientcmd"
"log"
@@ -63,7 +64,7 @@ func main() {
logger = logger.With(zap.String("controller/impl", "pkg"))
logger.Info("Starting the controller")
- const numControllers = 1
+ const numControllers = 2
cfg.QPS = numControllers * rest.DefaultQPS
cfg.Burst = numControllers * rest.DefaultBurst
opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
@@ -72,7 +73,8 @@ func main() {
eventingInformerFactory := informers.NewSharedInformerFactory(opt.EventingClientSet, opt.ResyncPeriod)
// Eventing
- cronjobsourceInformer := eventingInformerFactory.Sources().V1alpha1().CronJobSources()
+ cronJobSourceInformer := eventingInformerFactory.Sources().V1alpha1().CronJobSources()
+ containerSourceInformer := eventingInformerFactory.Sources().V1alpha1().ContainerSources()
// Kube
deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
@@ -83,7 +85,12 @@ func main() {
controllers := []*kncontroller.Impl{
cronjobsource.NewController(
opt,
- cronjobsourceInformer,
+ cronJobSourceInformer,
+ deploymentInformer,
+ ),
+ containersource.NewController(
+ opt,
+ containerSourceInformer,
deploymentInformer,
),
}
@@ -104,7 +111,8 @@ func main() {
if err := kncontroller.StartInformers(
stopCh,
// Eventing
- cronjobsourceInformer.Informer(),
+ cronJobSourceInformer.Informer(),
+ containerSourceInformer.Informer(),
// Kube
deploymentInformer.Informer(),
); err != nil {
diff --git a/config/200-controller-clusterrole.yaml b/config/200-controller-clusterrole.yaml
index 44a8d3f1408..5be03648e78 100644
--- a/config/200-controller-clusterrole.yaml
+++ b/config/200-controller-clusterrole.yaml
@@ -79,4 +79,7 @@ rules:
- "cronjobsources"
- "cronjobsources/status"
- "cronjobsources/finalizers"
+ - "containersources"
+ - "containersources/status"
+ - "containersources/finalizers"
verbs: *everything
diff --git a/config/300-containersource.yaml b/config/300-containersource.yaml
new file mode 100644
index 00000000000..ecf50df75ce
--- /dev/null
+++ b/config/300-containersource.yaml
@@ -0,0 +1,89 @@
+# Copyright 2019 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ labels:
+ eventing.knative.dev/source: "true"
+ knative.dev/crd-install: "true"
+ name: containersources.sources.eventing.knative.dev
+spec:
+ group: sources.eventing.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - eventing
+ - sources
+ kind: ContainerSource
+ plural: containersources
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ args:
+ items:
+ type: string
+ type: array
+ env:
+ items:
+ type: object
+ type: array
+ image:
+ minLength: 1
+ type: string
+ serviceAccountName:
+ type: string
+ sink:
+ type: object
+ type: object
+ status:
+ properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ # we use a string in the stored object but a wrapper object
+ # at runtime.
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ severity:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ - status
+ type: object
+ type: array
+ sinkUri:
+ type: string
+ type: object
+ version: v1alpha1
diff --git a/pkg/apis/sources/v1alpha1/containersource_lifecycle.go b/pkg/apis/sources/v1alpha1/containersource_lifecycle.go
new file mode 100644
index 00000000000..5848f9511ba
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/containersource_lifecycle.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+)
+
+const (
+ // ContainerSourceConditionReady has status True when the ContainerSource is ready to send events.
+ ContainerConditionReady = duckv1alpha1.ConditionReady
+
+ // ContainerConditionSinkProvided has status True when the ContainerSource has been configured with a sink target.
+ ContainerConditionSinkProvided duckv1alpha1.ConditionType = "SinkProvided"
+
+ // ContainerConditionDeployed has status True when the ContainerSource has had it's deployment created.
+ ContainerConditionDeployed duckv1alpha1.ConditionType = "Deployed"
+)
+
+var containerCondSet = duckv1alpha1.NewLivingConditionSet(
+ ContainerConditionSinkProvided,
+ ContainerConditionDeployed,
+)
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (s *ContainerSourceStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha1.Condition {
+ return containerCondSet.Manage(s).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (s *ContainerSourceStatus) IsReady() bool {
+ return containerCondSet.Manage(s).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *ContainerSourceStatus) InitializeConditions() {
+ containerCondSet.Manage(s).InitializeConditions()
+}
+
+// MarkSink sets the condition that the source has a sink configured.
+func (s *ContainerSourceStatus) MarkSink(uri string) {
+ s.SinkURI = uri
+ if len(uri) > 0 {
+ containerCondSet.Manage(s).MarkTrue(ContainerConditionSinkProvided)
+ } else {
+ containerCondSet.Manage(s).MarkUnknown(ContainerConditionSinkProvided, "SinkEmpty", "Sink has resolved to empty.%s", "")
+ }
+}
+
+// MarkNoSink sets the condition that the source does not have a sink configured.
+func (s *ContainerSourceStatus) MarkNoSink(reason, messageFormat string, messageA ...interface{}) {
+ containerCondSet.Manage(s).MarkFalse(ContainerConditionSinkProvided, reason, messageFormat, messageA...)
+}
+
+// IsDeployed returns true if the Deployed condition has status true, otherwise
+// false.
+func (s *ContainerSourceStatus) IsDeployed() bool {
+ c := containerCondSet.Manage(s).GetCondition(ContainerConditionDeployed)
+ if c != nil {
+ return c.IsTrue()
+ }
+ return false
+}
+
+// MarkDeployed sets the condition that the source has been deployed.
+func (s *ContainerSourceStatus) MarkDeployed() {
+ containerCondSet.Manage(s).MarkTrue(ContainerConditionDeployed)
+}
+
+// MarkDeploying sets the condition that the source is deploying.
+func (s *ContainerSourceStatus) MarkDeploying(reason, messageFormat string, messageA ...interface{}) {
+ containerCondSet.Manage(s).MarkUnknown(ContainerConditionDeployed, reason, messageFormat, messageA...)
+}
+
+// MarkNotDeployed sets the condition that the source has not been deployed.
+func (s *ContainerSourceStatus) MarkNotDeployed(reason, messageFormat string, messageA ...interface{}) {
+ containerCondSet.Manage(s).MarkFalse(ContainerConditionDeployed, reason, messageFormat, messageA...)
+}
diff --git a/pkg/apis/sources/v1alpha1/containersource_lifecycle_test.go b/pkg/apis/sources/v1alpha1/containersource_lifecycle_test.go
new file mode 100644
index 00000000000..cde7b66f270
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/containersource_lifecycle_test.go
@@ -0,0 +1,448 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp/cmpopts"
+
+ "github.com/google/go-cmp/cmp"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+)
+
+func TestContainerSourceStatusIsReady(t *testing.T) {
+ tests := []struct {
+ name string
+ s *ContainerSourceStatus
+ want bool
+ }{{
+ name: "uninitialized",
+ s: &ContainerSourceStatus{},
+ want: false,
+ }, {
+ name: "initialized",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkDeployed()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink and deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: true,
+ }, {
+ name: "mark sink and deployed then no sink",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkNoSink("Testing", "")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink and deployed then deploying",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkDeploying("Testing", "")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink and deployed then not deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkNotDeployed("Testing", "")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink and not deployed then deploying then deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkNotDeployed("MarkNotDeployed", "")
+ s.MarkDeploying("MarkDeploying", "")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: true,
+ }, {
+ name: "mark sink empty and deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink empty and deployed then sink",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("")
+ s.MarkDeployed()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ want: true,
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.s.IsReady()
+ if diff := cmp.Diff(test.want, got); diff != "" {
+ t.Errorf("%s: unexpected condition (-want, +got) = %v", test.name, diff)
+ }
+ })
+ }
+}
+
+func TestContainerSourceStatusGetCondition(t *testing.T) {
+ tests := []struct {
+ name string
+ s *ContainerSourceStatus
+ condQuery duckv1alpha1.ConditionType
+ want *duckv1alpha1.Condition
+ }{{
+ name: "uninitialized",
+ s: &ContainerSourceStatus{},
+ condQuery: ContainerConditionReady,
+ want: nil,
+ }, {
+ name: "initialized",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark sink",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark sink and deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ }, {
+ name: "mark sink and deployed then no sink",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkNoSink("Testing", "hi%s", "")
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionFalse,
+ Reason: "Testing",
+ Message: "hi",
+ },
+ }, {
+ name: "mark sink and deployed then deploying",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkDeploying("Testing", "hi%s", "")
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionUnknown,
+ Reason: "Testing",
+ Message: "hi",
+ },
+ }, {
+ name: "mark sink and deployed then not deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkNotDeployed("Testing", "hi%s", "")
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionFalse,
+ Reason: "Testing",
+ Message: "hi",
+ },
+ }, {
+ name: "mark sink and not deployed then deploying then deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkNotDeployed("MarkNotDeployed", "%s", "")
+ s.MarkDeploying("MarkDeploying", "%s", "")
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ }, {
+ name: "mark sink empty and deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("")
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionUnknown,
+ Reason: "SinkEmpty",
+ Message: "Sink has resolved to empty.",
+ },
+ }, {
+ name: "mark sink empty and deployed then sink",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("")
+ s.MarkDeployed()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ condQuery: ContainerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ContainerConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.s.GetCondition(test.condQuery)
+ ignoreTime := cmpopts.IgnoreFields(duckv1alpha1.Condition{},
+ "LastTransitionTime", "Severity")
+ if diff := cmp.Diff(test.want, got, ignoreTime); diff != "" {
+ t.Errorf("unexpected condition (-want, +got) = %v", diff)
+ }
+ })
+ }
+}
+
+func TestContainerSourceStatusIsDeployed(t *testing.T) {
+ tests := []struct {
+ name string
+ s *ContainerSourceStatus
+ want bool
+ }{{
+ name: "uninitialized",
+ s: &ContainerSourceStatus{},
+ want: false,
+ }, {
+ name: "initialized",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkDeployed()
+ return s
+ }(),
+ want: true,
+ }, {
+ name: "mark sink",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink and deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: true,
+ }, {
+ name: "mark sink and deployed then no sink",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkNoSink("Testing", "")
+ return s
+ }(),
+ want: true,
+ }, {
+ name: "mark sink and deployed then deploying",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkDeploying("Testing", "")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink and deployed then not deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ s.MarkNotDeployed("Testing", "")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink and not deployed then deploying then deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkNotDeployed("MarkNotDeployed", "")
+ s.MarkDeploying("MarkDeploying", "")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: true,
+ }, {
+ name: "mark sink empty and deployed",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: true,
+ }, {
+ name: "mark sink empty and deployed then sink",
+ s: func() *ContainerSourceStatus {
+ s := &ContainerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("")
+ s.MarkDeployed()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ want: true,
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.s.IsDeployed()
+ if diff := cmp.Diff(test.want, got); diff != "" {
+ t.Errorf("%s: unexpected condition (-want, +got) = %v", test.name, diff)
+ }
+ })
+ }
+}
diff --git a/pkg/apis/sources/v1alpha1/containersource_types.go b/pkg/apis/sources/v1alpha1/containersource_types.go
new file mode 100644
index 00000000000..bad9c1b2639
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/containersource_types.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "github.com/knative/pkg/apis/duck"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:defaulter-gen=true
+
+// ContainerSource is the Schema for the containersources API
+type ContainerSource struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ContainerSourceSpec `json:"spec,omitempty"`
+ Status ContainerSourceStatus `json:"status,omitempty"`
+}
+
+// Check that ContainerSource can be validated and can be defaulted.
+var _ runtime.Object = (*ContainerSource)(nil)
+
+// Check that ContainerSource implements the Conditions duck type.
+var _ = duck.VerifyType(&ContainerSource{}, &duckv1alpha1.Conditions{})
+
+// ContainerSourceSpec defines the desired state of ContainerSource
+type ContainerSourceSpec struct {
+ // Image is the image to run inside of the container.
+ // +kubebuilder:validation:MinLength=1
+ Image string `json:"image,omitempty"`
+
+ // Args are passed to the ContainerSpec as they are.
+ Args []string `json:"args,omitempty"`
+
+ // Env is the list of environment variables to set in the container.
+ // Cannot be updated.
+ // +optional
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
+
+ // ServiceAccountName is the name of the ServiceAccount to use to run this
+ // source.
+ // +optional
+ ServiceAccountName string `json:"serviceAccountName,omitempty"`
+
+ // Sink is a reference to an object that will resolve to a domain name to use as the sink.
+ // +optional
+ Sink *corev1.ObjectReference `json:"sink,omitempty"`
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (s *ContainerSource) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("ContainerSource")
+}
+
+// ContainerSourceStatus defines the observed state of ContainerSource
+type ContainerSourceStatus struct {
+ // inherits duck/v1alpha1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1alpha1.Status `json:",inline"`
+
+ // SinkURI is the current active sink URI that has been configured for the ContainerSource.
+ // +optional
+ SinkURI string `json:"sinkUri,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ContainerSourceList contains a list of ContainerSource
+type ContainerSourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ContainerSource `json:"items"`
+}
diff --git a/pkg/apis/sources/v1alpha1/register.go b/pkg/apis/sources/v1alpha1/register.go
index a170bc79c54..83556830ff3 100644
--- a/pkg/apis/sources/v1alpha1/register.go
+++ b/pkg/apis/sources/v1alpha1/register.go
@@ -47,6 +47,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CronJobSource{},
&CronJobSourceList{},
+ &ContainerSource{},
+ &ContainerSourceList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
diff --git a/pkg/apis/sources/v1alpha1/register_test.go b/pkg/apis/sources/v1alpha1/register_test.go
index 2c4e17d8b5e..c111fda185c 100644
--- a/pkg/apis/sources/v1alpha1/register_test.go
+++ b/pkg/apis/sources/v1alpha1/register_test.go
@@ -62,6 +62,8 @@ func TestKnownTypes(t *testing.T) {
for _, name := range []string{
"CronJobSource",
"CronJobSourceList",
+ "ContainerSource",
+ "ContainerSourceList",
} {
if _, ok := types[name]; !ok {
t.Errorf("Did not find %q as registered type", name)
diff --git a/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
index 686c603a5e3..29ee5bbe035 100644
--- a/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
@@ -25,6 +25,117 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerSource) DeepCopyInto(out *ContainerSource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSource.
+func (in *ContainerSource) DeepCopy() *ContainerSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ContainerSource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerSourceList) DeepCopyInto(out *ContainerSourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ContainerSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSourceList.
+func (in *ContainerSourceList) DeepCopy() *ContainerSourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerSourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ContainerSourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerSourceSpec) DeepCopyInto(out *ContainerSourceSpec) {
+ *out = *in
+ if in.Args != nil {
+ in, out := &in.Args, &out.Args
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]v1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Sink != nil {
+ in, out := &in.Sink, &out.Sink
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSourceSpec.
+func (in *ContainerSourceSpec) DeepCopy() *ContainerSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerSourceStatus) DeepCopyInto(out *ContainerSourceStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSourceStatus.
+func (in *ContainerSourceStatus) DeepCopy() *ContainerSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CronJobSource) DeepCopyInto(out *CronJobSource) {
*out = *in
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/containersource.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/containersource.go
new file mode 100644
index 00000000000..d0f935e2a85
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/containersource.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ scheme "github.com/knative/eventing/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ContainerSourcesGetter has a method to return a ContainerSourceInterface.
+// A group's client should implement this interface.
+type ContainerSourcesGetter interface {
+ ContainerSources(namespace string) ContainerSourceInterface
+}
+
+// ContainerSourceInterface has methods to work with ContainerSource resources.
+type ContainerSourceInterface interface {
+ Create(*v1alpha1.ContainerSource) (*v1alpha1.ContainerSource, error)
+ Update(*v1alpha1.ContainerSource) (*v1alpha1.ContainerSource, error)
+ UpdateStatus(*v1alpha1.ContainerSource) (*v1alpha1.ContainerSource, error)
+ Delete(name string, options *v1.DeleteOptions) error
+ DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+ Get(name string, options v1.GetOptions) (*v1alpha1.ContainerSource, error)
+ List(opts v1.ListOptions) (*v1alpha1.ContainerSourceList, error)
+ Watch(opts v1.ListOptions) (watch.Interface, error)
+ Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ContainerSource, err error)
+ ContainerSourceExpansion
+}
+
+// containerSources implements ContainerSourceInterface
+type containerSources struct {
+ client rest.Interface
+ ns string
+}
+
+// newContainerSources returns a ContainerSources
+func newContainerSources(c *SourcesV1alpha1Client, namespace string) *containerSources {
+ return &containerSources{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the containerSource, and returns the corresponding containerSource object, and an error if there is any.
+func (c *containerSources) Get(name string, options v1.GetOptions) (result *v1alpha1.ContainerSource, err error) {
+ result = &v1alpha1.ContainerSource{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("containersources").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ContainerSources that match those selectors.
+func (c *containerSources) List(opts v1.ListOptions) (result *v1alpha1.ContainerSourceList, err error) {
+ result = &v1alpha1.ContainerSourceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("containersources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested containerSources.
+func (c *containerSources) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("containersources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Watch()
+}
+
+// Create takes the representation of a containerSource and creates it. Returns the server's representation of the containerSource, and an error, if there is any.
+func (c *containerSources) Create(containerSource *v1alpha1.ContainerSource) (result *v1alpha1.ContainerSource, err error) {
+ result = &v1alpha1.ContainerSource{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("containersources").
+ Body(containerSource).
+ Do().
+ Into(result)
+ return
+}
+
+// Update takes the representation of a containerSource and updates it. Returns the server's representation of the containerSource, and an error, if there is any.
+func (c *containerSources) Update(containerSource *v1alpha1.ContainerSource) (result *v1alpha1.ContainerSource, err error) {
+ result = &v1alpha1.ContainerSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("containersources").
+ Name(containerSource.Name).
+ Body(containerSource).
+ Do().
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *containerSources) UpdateStatus(containerSource *v1alpha1.ContainerSource) (result *v1alpha1.ContainerSource, err error) {
+ result = &v1alpha1.ContainerSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("containersources").
+ Name(containerSource.Name).
+ SubResource("status").
+ Body(containerSource).
+ Do().
+ Into(result)
+ return
+}
+
+// Delete takes name of the containerSource and deletes it. Returns an error if one occurs.
+func (c *containerSources) Delete(name string, options *v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("containersources").
+ Name(name).
+ Body(options).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *containerSources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("containersources").
+ VersionedParams(&listOptions, scheme.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Patch applies the patch and returns the patched containerSource.
+func (c *containerSources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ContainerSource, err error) {
+ result = &v1alpha1.ContainerSource{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("containersources").
+ SubResource(subresources...).
+ Name(name).
+ Body(data).
+ Do().
+ Into(result)
+ return
+}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_containersource.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_containersource.go
new file mode 100644
index 00000000000..18ca2d1555e
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_containersource.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeContainerSources implements ContainerSourceInterface
+type FakeContainerSources struct {
+ Fake *FakeSourcesV1alpha1
+ ns string
+}
+
+var containersourcesResource = schema.GroupVersionResource{Group: "sources.eventing.knative.dev", Version: "v1alpha1", Resource: "containersources"}
+
+var containersourcesKind = schema.GroupVersionKind{Group: "sources.eventing.knative.dev", Version: "v1alpha1", Kind: "ContainerSource"}
+
+// Get takes name of the containerSource, and returns the corresponding containerSource object, and an error if there is any.
+func (c *FakeContainerSources) Get(name string, options v1.GetOptions) (result *v1alpha1.ContainerSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(containersourcesResource, c.ns, name), &v1alpha1.ContainerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ContainerSource), err
+}
+
+// List takes label and field selectors, and returns the list of ContainerSources that match those selectors.
+func (c *FakeContainerSources) List(opts v1.ListOptions) (result *v1alpha1.ContainerSourceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(containersourcesResource, containersourcesKind, c.ns, opts), &v1alpha1.ContainerSourceList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ContainerSourceList{ListMeta: obj.(*v1alpha1.ContainerSourceList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ContainerSourceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested containerSources.
+func (c *FakeContainerSources) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(containersourcesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a containerSource and creates it. Returns the server's representation of the containerSource, and an error, if there is any.
+func (c *FakeContainerSources) Create(containerSource *v1alpha1.ContainerSource) (result *v1alpha1.ContainerSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(containersourcesResource, c.ns, containerSource), &v1alpha1.ContainerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ContainerSource), err
+}
+
+// Update takes the representation of a containerSource and updates it. Returns the server's representation of the containerSource, and an error, if there is any.
+func (c *FakeContainerSources) Update(containerSource *v1alpha1.ContainerSource) (result *v1alpha1.ContainerSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(containersourcesResource, c.ns, containerSource), &v1alpha1.ContainerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ContainerSource), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeContainerSources) UpdateStatus(containerSource *v1alpha1.ContainerSource) (*v1alpha1.ContainerSource, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(containersourcesResource, "status", c.ns, containerSource), &v1alpha1.ContainerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ContainerSource), err
+}
+
+// Delete takes name of the containerSource and deletes it. Returns an error if one occurs.
+func (c *FakeContainerSources) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(containersourcesResource, c.ns, name), &v1alpha1.ContainerSource{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeContainerSources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(containersourcesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ContainerSourceList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched containerSource.
+func (c *FakeContainerSources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ContainerSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(containersourcesResource, c.ns, name, data, subresources...), &v1alpha1.ContainerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ContainerSource), err
+}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go
index c2d9faeb94a..2742a8e7195 100644
--- a/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go
@@ -28,6 +28,10 @@ type FakeSourcesV1alpha1 struct {
*testing.Fake
}
+func (c *FakeSourcesV1alpha1) ContainerSources(namespace string) v1alpha1.ContainerSourceInterface {
+ return &FakeContainerSources{c, namespace}
+}
+
func (c *FakeSourcesV1alpha1) CronJobSources(namespace string) v1alpha1.CronJobSourceInterface {
return &FakeCronJobSources{c, namespace}
}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go
index e3e7bf27492..b250cd0c5e3 100644
--- a/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go
@@ -18,4 +18,6 @@ limitations under the License.
package v1alpha1
+type ContainerSourceExpansion interface{}
+
type CronJobSourceExpansion interface{}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go
index 31733ad3ea4..dbbdbdf0392 100644
--- a/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go
@@ -27,6 +27,7 @@ import (
type SourcesV1alpha1Interface interface {
RESTClient() rest.Interface
+ ContainerSourcesGetter
CronJobSourcesGetter
}
@@ -35,6 +36,10 @@ type SourcesV1alpha1Client struct {
restClient rest.Interface
}
+func (c *SourcesV1alpha1Client) ContainerSources(namespace string) ContainerSourceInterface {
+ return newContainerSources(c, namespace)
+}
+
func (c *SourcesV1alpha1Client) CronJobSources(namespace string) CronJobSourceInterface {
return newCronJobSources(c, namespace)
}
diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go
index 013b0b1e9fc..48066085062 100644
--- a/pkg/client/informers/externalversions/generic.go
+++ b/pkg/client/informers/externalversions/generic.go
@@ -66,6 +66,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().Triggers().Informer()}, nil
// Group=sources.eventing.knative.dev, Version=v1alpha1
+ case sourcesv1alpha1.SchemeGroupVersion.WithResource("containersources"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1alpha1().ContainerSources().Informer()}, nil
case sourcesv1alpha1.SchemeGroupVersion.WithResource("cronjobsources"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1alpha1().CronJobSources().Informer()}, nil
diff --git a/pkg/client/informers/externalversions/sources/v1alpha1/containersource.go b/pkg/client/informers/externalversions/sources/v1alpha1/containersource.go
new file mode 100644
index 00000000000..e5b9f70653b
--- /dev/null
+++ b/pkg/client/informers/externalversions/sources/v1alpha1/containersource.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ time "time"
+
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ versioned "github.com/knative/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/knative/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/knative/eventing/pkg/client/listers/sources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ContainerSourceInformer provides access to a shared informer and lister for
+// ContainerSources.
+type ContainerSourceInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.ContainerSourceLister
+}
+
+type containerSourceInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewContainerSourceInformer constructs a new informer for ContainerSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewContainerSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredContainerSourceInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredContainerSourceInformer constructs a new informer for ContainerSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredContainerSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1alpha1().ContainerSources(namespace).List(options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1alpha1().ContainerSources(namespace).Watch(options)
+ },
+ },
+ &sourcesv1alpha1.ContainerSource{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *containerSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredContainerSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *containerSourceInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&sourcesv1alpha1.ContainerSource{}, f.defaultInformer)
+}
+
+func (f *containerSourceInformer) Lister() v1alpha1.ContainerSourceLister {
+ return v1alpha1.NewContainerSourceLister(f.Informer().GetIndexer())
+}
diff --git a/pkg/client/informers/externalversions/sources/v1alpha1/interface.go b/pkg/client/informers/externalversions/sources/v1alpha1/interface.go
index 244340025a8..9647603c828 100644
--- a/pkg/client/informers/externalversions/sources/v1alpha1/interface.go
+++ b/pkg/client/informers/externalversions/sources/v1alpha1/interface.go
@@ -24,6 +24,8 @@ import (
// Interface provides access to all the informers in this group version.
type Interface interface {
+ // ContainerSources returns a ContainerSourceInformer.
+ ContainerSources() ContainerSourceInformer
// CronJobSources returns a CronJobSourceInformer.
CronJobSources() CronJobSourceInformer
}
@@ -39,6 +41,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
+// ContainerSources returns a ContainerSourceInformer.
+func (v *version) ContainerSources() ContainerSourceInformer {
+ return &containerSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
// CronJobSources returns a CronJobSourceInformer.
func (v *version) CronJobSources() CronJobSourceInformer {
return &cronJobSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
diff --git a/pkg/client/listers/sources/v1alpha1/containersource.go b/pkg/client/listers/sources/v1alpha1/containersource.go
new file mode 100644
index 00000000000..de44136ec26
--- /dev/null
+++ b/pkg/client/listers/sources/v1alpha1/containersource.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ContainerSourceLister helps list ContainerSources.
+type ContainerSourceLister interface {
+ // List lists all ContainerSources in the indexer.
+ List(selector labels.Selector) (ret []*v1alpha1.ContainerSource, err error)
+ // ContainerSources returns an object that can list and get ContainerSources.
+ ContainerSources(namespace string) ContainerSourceNamespaceLister
+ ContainerSourceListerExpansion
+}
+
+// containerSourceLister implements the ContainerSourceLister interface.
+type containerSourceLister struct {
+ indexer cache.Indexer
+}
+
+// NewContainerSourceLister returns a new ContainerSourceLister.
+func NewContainerSourceLister(indexer cache.Indexer) ContainerSourceLister {
+ return &containerSourceLister{indexer: indexer}
+}
+
+// List lists all ContainerSources in the indexer.
+func (s *containerSourceLister) List(selector labels.Selector) (ret []*v1alpha1.ContainerSource, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.ContainerSource))
+ })
+ return ret, err
+}
+
+// ContainerSources returns an object that can list and get ContainerSources.
+func (s *containerSourceLister) ContainerSources(namespace string) ContainerSourceNamespaceLister {
+ return containerSourceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ContainerSourceNamespaceLister helps list and get ContainerSources.
+type ContainerSourceNamespaceLister interface {
+ // List lists all ContainerSources in the indexer for a given namespace.
+ List(selector labels.Selector) (ret []*v1alpha1.ContainerSource, err error)
+ // Get retrieves the ContainerSource from the indexer for a given namespace and name.
+ Get(name string) (*v1alpha1.ContainerSource, error)
+ ContainerSourceNamespaceListerExpansion
+}
+
+// containerSourceNamespaceLister implements the ContainerSourceNamespaceLister
+// interface.
+type containerSourceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all ContainerSources in the indexer for a given namespace.
+func (s containerSourceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ContainerSource, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.ContainerSource))
+ })
+ return ret, err
+}
+
+// Get retrieves the ContainerSource from the indexer for a given namespace and name.
+func (s containerSourceNamespaceLister) Get(name string) (*v1alpha1.ContainerSource, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("containersource"), name)
+ }
+ return obj.(*v1alpha1.ContainerSource), nil
+}
diff --git a/pkg/client/listers/sources/v1alpha1/expansion_generated.go b/pkg/client/listers/sources/v1alpha1/expansion_generated.go
index 444c53f7032..49fe2bab0ae 100644
--- a/pkg/client/listers/sources/v1alpha1/expansion_generated.go
+++ b/pkg/client/listers/sources/v1alpha1/expansion_generated.go
@@ -18,6 +18,14 @@ limitations under the License.
package v1alpha1
+// ContainerSourceListerExpansion allows custom methods to be added to
+// ContainerSourceLister.
+type ContainerSourceListerExpansion interface{}
+
+// ContainerSourceNamespaceListerExpansion allows custom methods to be added to
+// ContainerSourceNamespaceLister.
+type ContainerSourceNamespaceListerExpansion interface{}
+
// CronJobSourceListerExpansion allows custom methods to be added to
// CronJobSourceLister.
type CronJobSourceListerExpansion interface{}
diff --git a/pkg/reconciler/containersource/containersource.go b/pkg/reconciler/containersource/containersource.go
new file mode 100644
index 00000000000..9535349ee7a
--- /dev/null
+++ b/pkg/reconciler/containersource/containersource.go
@@ -0,0 +1,316 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package containersource
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/api/errors"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ appsv1informers "k8s.io/client-go/informers/apps/v1"
+ appsv1listers "k8s.io/client-go/listers/apps/v1"
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ sourceinformers "github.com/knative/eventing/pkg/client/informers/externalversions/sources/v1alpha1"
+ listers "github.com/knative/eventing/pkg/client/listers/sources/v1alpha1"
+ "github.com/knative/eventing/pkg/duck"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/containersource/resources"
+ "github.com/knative/pkg/controller"
+ "github.com/knative/pkg/logging"
+ "go.uber.org/zap"
+)
+
+const (
+ // ReconcilerName is the name of the reconciler
+ ReconcilerName = "ContainerSources"
+ // controllerAgentName is the string used by this controller to identify
+ // itself when creating events.
+ controllerAgentName = "container-source-controller"
+
+ // Name of the corev1.Events emitted from the reconciliation process
+ sourceReconciled = "ContainerSourceReconciled"
+ sourceUpdateStatusFailed = "ContainerSourceUpdateStatusFailed"
+)
+
+type Reconciler struct {
+ *reconciler.Base
+
+ // listers index properties about resources
+ containerSourceLister listers.ContainerSourceLister
+ deploymentLister appsv1listers.DeploymentLister
+}
+
+// Check that our Reconciler implements controller.Reconciler
+var _ controller.Reconciler = (*Reconciler)(nil)
+
+// NewController initializes the controller and is called by the generated code
+// Registers event handlers to enqueue events
+func NewController(
+ opt reconciler.Options,
+ containerSourceInformer sourceinformers.ContainerSourceInformer,
+ deploymentInformer appsv1informers.DeploymentInformer,
+) *controller.Impl {
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ containerSourceLister: containerSourceInformer.Lister(),
+ deploymentLister: deploymentInformer.Lister(),
+ }
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ r.Logger.Info("Setting up event handlers")
+ containerSourceInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
+
+ deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
+ FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("ContainerSource")),
+ Handler: reconciler.Handler(impl.EnqueueControllerOf),
+ })
+
+ return impl
+}
+
+// Reconcile compares the actual state with the desired, and attempts to
+// converge the two. It then updates the Status block of the CronJobSource
+// resource with the current status of the resource.
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ // Convert the namespace/name string into a distinct namespace and name
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
+ }
+
+ // Get the CronJobSource resource with this namespace/name
+ original, err := r.containerSourceLister.ContainerSources(namespace).Get(name)
+ if apierrors.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ logging.FromContext(ctx).Error("ContainerSource key in work queue no longer exists", zap.Any("key", key))
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // Don't modify the informers copy
+ source := original.DeepCopy()
+
+ // Reconcile this copy of the ContainerSource and then write back any status
+ // updates regardless of whether the reconcile error out.
+ err = r.reconcile(ctx, source)
+ if err != nil {
+ logging.FromContext(ctx).Warn("Error reconciling ContainerSource", zap.Error(err))
+ } else {
+ logging.FromContext(ctx).Debug("ContainerSource reconciled")
+ r.Recorder.Eventf(source, corev1.EventTypeNormal, sourceReconciled, `ContainerSource reconciled: "%s/%s"`, source.Namespace, source.Name)
+ }
+
+ if _, updateStatusErr := r.updateStatus(ctx, source.DeepCopy()); updateStatusErr != nil {
+ logging.FromContext(ctx).Warn("Failed to update the ContainerSource", zap.Error(err))
+ r.Recorder.Eventf(source, corev1.EventTypeWarning, sourceUpdateStatusFailed, "Failed to update ContainerSource's status: %v", err)
+ return updateStatusErr
+ }
+
+ // Requeue if the resource is not ready:
+ return err
+}
+
+func (r *Reconciler) reconcile(ctx context.Context, source *v1alpha1.ContainerSource) error {
+ // No need to reconcile if the source has been marked for deletion.
+ if source.DeletionTimestamp != nil {
+ return nil
+ }
+
+ source.Status.InitializeConditions()
+
+ annotations := make(map[string]string)
+ // Then wire through any annotations / labels from the Source
+ if source.ObjectMeta.Annotations != nil {
+ for k, v := range source.ObjectMeta.Annotations {
+ annotations[k] = v
+ }
+ }
+ labels := make(map[string]string)
+ if source.ObjectMeta.Labels != nil {
+ for k, v := range source.ObjectMeta.Labels {
+ labels[k] = v
+ }
+ }
+
+ args := resources.ContainerArguments{
+ Source: source,
+ Name: source.Name,
+ Namespace: source.Namespace,
+ Image: source.Spec.Image,
+ Args: source.Spec.Args,
+ Env: source.Spec.Env,
+ ServiceAccountName: source.Spec.ServiceAccountName,
+ Annotations: annotations,
+ Labels: labels,
+ }
+
+ err := r.setSinkURIArg(ctx, source, &args)
+ if err != nil {
+ r.Recorder.Eventf(source, corev1.EventTypeWarning, "SetSinkURIFailed", "Failed to set Sink URI: %v", err)
+ return err
+ }
+
+ deploy, err := r.getDeployment(ctx, source)
+ if err != nil {
+ if errors.IsNotFound(err) {
+ deploy, err = r.createDeployment(ctx, source, nil, args)
+ if err != nil {
+ r.markNotDeployedRecordEvent(source, corev1.EventTypeWarning, "DeploymentCreateFailed", "Could not create deployment: %v", err)
+ return err
+ }
+ r.markDeployingAndRecordEvent(source, corev1.EventTypeNormal, "DeploymentCreated", "Created deployment %q", deploy.Name)
+ // Since the Deployment has just been created, there's nothing more
+ // to do until it gets a status. This ContainerSource will be reconciled
+ // again when the Deployment is updated.
+ return nil
+ }
+ // Something unexpected happened getting the deployment.
+ r.markDeployingAndRecordEvent(source, corev1.EventTypeWarning, "DeploymentGetFailed", "Error getting deployment: %v", err)
+ return err
+ }
+
+ // Update Deployment spec if it's changed
+ expected := resources.MakeDeployment(args)
+ // Since the Deployment spec has fields defaulted by the webhook, it won't
+ // be equal to expected. Use DeepDerivative to compare only the fields that
+ // are set in expected.
+ if !equality.Semantic.DeepDerivative(expected.Spec, deploy.Spec) {
+ deploy.Spec = expected.Spec
+ deploy, err := r.KubeClientSet.AppsV1().Deployments(deploy.Namespace).Update(deploy)
+ if err != nil {
+ r.markDeployingAndRecordEvent(source, corev1.EventTypeWarning, "DeploymentUpdateFailed", "Failed to update deployment %q: %v", deploy.Name, err)
+ } else {
+ r.markDeployingAndRecordEvent(source, corev1.EventTypeNormal, "DeploymentUpdated", "Updated deployment %q", deploy.Name)
+ }
+ // Return after this update or error and reconcile again
+ return err
+ }
+
+ // Update source status
+ if deploy.Status.ReadyReplicas > 0 && !source.Status.IsDeployed() {
+ source.Status.MarkDeployed()
+ r.Recorder.Eventf(source, corev1.EventTypeNormal, "DeploymentReady", "Deployment %q has %d ready replicas", deploy.Name, deploy.Status.ReadyReplicas)
+ }
+
+ return nil
+}
+
+// setSinkURIArg attempts to get the sink URI from the sink reference and
+// set it in the source status. On failure, the source's Sink condition is
+// updated to reflect the error.
+// If an error is returned from this function, the caller should also record
+// an Event containing the error string.
+func (r *Reconciler) setSinkURIArg(ctx context.Context, source *v1alpha1.ContainerSource, args *resources.ContainerArguments) error {
+ if uri, ok := sinkArg(source); ok {
+ args.SinkInArgs = true
+ source.Status.MarkSink(uri)
+ return nil
+ }
+
+ if source.Spec.Sink == nil {
+ source.Status.MarkNoSink("Missing", "Sink missing from spec")
+ return fmt.Errorf("Sink missing from spec")
+ }
+
+ uri, err := duck.GetSinkURI(ctx, r.DynamicClientSet, source.Spec.Sink, source.Namespace)
+ if err != nil {
+ source.Status.MarkNoSink("NotFound", `Couldn't get Sink URI from "%s/%s": %v"`, source.Spec.Sink.Namespace, source.Spec.Sink.Name, err)
+ return err
+ }
+ source.Status.MarkSink(uri)
+ args.Sink = uri
+
+ return nil
+}
+
+func sinkArg(source *v1alpha1.ContainerSource) (string, bool) {
+ for _, a := range source.Spec.Args {
+ if strings.HasPrefix(a, "--sink=") {
+ return strings.Replace(a, "--sink=", "", -1), true
+ }
+ }
+ return "", false
+}
+
+func (r *Reconciler) getDeployment(ctx context.Context, source *v1alpha1.ContainerSource) (*appsv1.Deployment, error) {
+ dl, err := r.KubeClientSet.AppsV1().Deployments(source.Namespace).List(metav1.ListOptions{})
+ if err != nil {
+ r.Logger.Errorf("Unable to list deployments: %v", zap.Error(err))
+ return nil, err
+ }
+ for _, c := range dl.Items {
+ if metav1.IsControlledBy(&c, source) {
+ return &c, nil
+ }
+ }
+ return nil, errors.NewNotFound(schema.GroupResource{}, "")
+}
+
+func (r *Reconciler) createDeployment(ctx context.Context, source *v1alpha1.ContainerSource, org *appsv1.Deployment, args resources.ContainerArguments) (*appsv1.Deployment, error) {
+ deployment := resources.MakeDeployment(args)
+ return r.KubeClientSet.AppsV1().Deployments(source.Namespace).Create(deployment)
+}
+
+func (r *Reconciler) markDeployingAndRecordEvent(source *v1alpha1.ContainerSource, evType string, reason string, messageFmt string, args ...interface{}) {
+ r.Recorder.Eventf(source, evType, reason, messageFmt, args...)
+ source.Status.MarkDeploying(reason, messageFmt, args...)
+}
+
+func (r *Reconciler) markNotDeployedRecordEvent(source *v1alpha1.ContainerSource, evType string, reason string, messageFmt string, args ...interface{}) {
+ r.Recorder.Eventf(source, evType, reason, messageFmt, args...)
+ source.Status.MarkNotDeployed(reason, messageFmt, args...)
+}
+
+func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.ContainerSource) (*v1alpha1.ContainerSource, error) {
+ source, err := r.containerSourceLister.ContainerSources(desired.Namespace).Get(desired.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If there's nothing to update, just return.
+ if reflect.DeepEqual(source.Status, desired.Status) {
+ return source, nil
+ }
+
+ becomesReady := desired.Status.IsReady() && !source.Status.IsReady()
+
+ // Don't modify the informers copy.
+ existing := source.DeepCopy()
+ existing.Status = desired.Status
+
+ cj, err := r.EventingClientSet.SourcesV1alpha1().ContainerSources(desired.Namespace).UpdateStatus(existing)
+ if err == nil && becomesReady {
+ duration := time.Since(cj.ObjectMeta.CreationTimestamp.Time)
+ r.Logger.Infof("ContainerSource %q became ready after %v", source.Name, duration)
+ //r.StatsReporter.ReportServiceReady(subscription.Namespace, subscription.Name, duration) // TODO: stats
+ }
+
+ return cj, err
+}
diff --git a/pkg/reconciler/containersource/containersource_test.go b/pkg/reconciler/containersource/containersource_test.go
new file mode 100644
index 00000000000..eb5a9d8b02b
--- /dev/null
+++ b/pkg/reconciler/containersource/containersource_test.go
@@ -0,0 +1,509 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Veroute.on 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package containersource
+
+import (
+ "fmt"
+ "testing"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ kubeinformers "k8s.io/client-go/informers"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/kubernetes/scheme"
+ clientgotesting "k8s.io/client-go/testing"
+
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ informers "github.com/knative/eventing/pkg/client/informers/externalversions"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/utils"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+
+ . "github.com/knative/eventing/pkg/reconciler/testing"
+ . "github.com/knative/pkg/reconciler/testing"
+)
+
+const (
+ image = "github.com/knative/test/image"
+ sourceName = "test-container-source"
+ sourceUID = "1234-5678-90"
+ testNS = "testnamespace"
+ sinkName = "testsink"
+)
+
+var (
+ trueVal = true
+
+ sinkRef = corev1.ObjectReference{
+ Name: sinkName,
+ Kind: "Channel",
+ APIVersion: "eventing.knative.dev/v1alpha1",
+ }
+ nonsinkRef = corev1.ObjectReference{
+ Name: sinkName,
+ Kind: "Trigger",
+ APIVersion: "eventing.knative.dev/v1alpha1",
+ }
+ sinkDNS = "sink.mynamespace.svc." + utils.GetClusterDomainName()
+ sinkURI = "http://" + sinkDNS + "/"
+
+ // TODO: k8s service does not work, fix.
+ //serviceRef = corev1.ObjectReference{
+ // Name: sinkName,
+ // Kind: "Service",
+ // APIVersion: "v1",
+ //}
+ //serviceURI = "http://service.sink.svc.cluster.local/"
+)
+
+func init() {
+ // Add types to scheme
+ _ = appsv1.AddToScheme(scheme.Scheme)
+ _ = corev1.AddToScheme(scheme.Scheme)
+ _ = duckv1alpha1.AddToScheme(scheme.Scheme)
+}
+
+func TestNew(t *testing.T) {
+ defer logtesting.ClearAll()
+ kubeClient := fakekubeclientset.NewSimpleClientset()
+ eventingClient := fakeclientset.NewSimpleClientset()
+ eventingInformer := informers.NewSharedInformerFactory(eventingClient, 0)
+ kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
+
+ containerSourceInformer := eventingInformer.Sources().V1alpha1().ContainerSources()
+ deploymentInformer := kubeInformer.Apps().V1().Deployments()
+
+ c := NewController(reconciler.Options{
+ KubeClientSet: kubeClient,
+ EventingClientSet: eventingClient,
+ Logger: logtesting.TestLogger(t),
+ },
+ containerSourceInformer,
+ deploymentInformer,
+ )
+
+ if c == nil {
+ t.Fatal("Expected NewController to return a non-nil value")
+ }
+}
+
+func TestAllCases(t *testing.T) {
+ table := TableTest{
+ {
+ Name: "bad workqueue key",
+ // Make sure Reconcile handles bad keys.
+ Key: "too/many/parts",
+ }, {
+ Name: "key not found",
+ // Make sure Reconcile handles good keys that don't exist.
+ Key: "foo/not-found",
+ }, {
+ Name: "missing sink",
+ Objects: []runtime.Object{
+ NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SetSinkURIFailed", `Failed to set Sink URI: channels.eventing.knative.dev "testsink" not found`),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ // Status Update:
+ WithInitContainerSourceConditions,
+ WithContainerSourceSinkNotFound(`Couldn't get Sink URI from "/testsink": channels.eventing.knative.dev "testsink" not found"`),
+ ),
+ }},
+ }, {
+ Name: "sink not addressable",
+ Objects: []runtime.Object{
+ NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &nonsinkRef,
+ }),
+ ),
+ NewTrigger(sinkName, testNS, ""),
+ },
+ Key: testNS + "/" + sourceName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SetSinkURIFailed", `Failed to set Sink URI: sink "testnamespace/testsink" (eventing.knative.dev/v1alpha1, Kind=Trigger) does not contain address`),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &nonsinkRef,
+ }),
+ // Status Update:
+ WithInitContainerSourceConditions,
+ WithContainerSourceSinkNotFound(`Couldn't get Sink URI from "/testsink": sink "testnamespace/testsink" (eventing.knative.dev/v1alpha1, Kind=Trigger) does not contain address"`),
+ ),
+ }},
+ }, {
+ Name: "sink not ready",
+ Objects: []runtime.Object{
+ NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ ),
+ NewChannel(sinkName, testNS),
+ },
+ Key: testNS + "/" + sourceName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SetSinkURIFailed", `Failed to set Sink URI: sink "testnamespace/testsink" (eventing.knative.dev/v1alpha1, Kind=Channel) contains an empty hostname`),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ // Status Update:
+ WithInitContainerSourceConditions,
+ WithContainerSourceSinkNotFound(`Couldn't get Sink URI from "/testsink": sink "testnamespace/testsink" (eventing.knative.dev/v1alpha1, Kind=Channel) contains an empty hostname"`),
+ ),
+ }},
+ }, {
+ Name: "sink is nil",
+ Objects: []runtime.Object{
+ NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ }),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "SetSinkURIFailed", `Failed to set Sink URI: Sink missing from spec`),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ }),
+ // Status Update:
+ WithInitContainerSourceConditions,
+ WithContainerSourceSinkMissing("Sink missing from spec"),
+ ),
+ }},
+ }, {
+ Name: "valid first pass",
+ Objects: []runtime.Object{
+ NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ WithContainerSourceUID(sourceUID),
+ ),
+ NewChannel(sinkName, testNS,
+ WithChannelAddress(sinkDNS),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "DeploymentCreated", `Created deployment ""`), // TODO on noes
+ Eventf(corev1.EventTypeNormal, "ContainerSourceReconciled", `ContainerSource reconciled: "testnamespace/test-container-source"`),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ WithContainerSourceUID(sourceUID),
+ // Status Update:
+ WithInitContainerSourceConditions,
+ WithContainerSourceSink(sinkURI),
+ WithContainerSourceDeploying(`Created deployment ""`),
+ ),
+ }},
+ WantCreates: []metav1.Object{
+ makeDeployment(NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ }),
+ WithContainerSourceUID(sourceUID),
+ ), 0, nil, nil),
+ },
+ }, {
+ Name: "valid, with ready deployment",
+ Objects: []runtime.Object{
+ NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ WithContainerSourceUID(sourceUID),
+ WithInitContainerSourceConditions,
+ WithContainerSourceSink(sinkURI),
+ WithContainerSourceDeploying(`Created deployment ""`),
+ ),
+ NewChannel(sinkName, testNS,
+ WithChannelAddress(sinkDNS),
+ ),
+ makeDeployment(NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ }),
+ WithContainerSourceUID(sourceUID),
+ ), 1, nil, nil),
+ },
+ Key: testNS + "/" + sourceName,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "DeploymentReady", `Deployment "" has 1 ready replicas`),
+ Eventf(corev1.EventTypeNormal, "ContainerSourceReconciled", `ContainerSource reconciled: "testnamespace/test-container-source"`),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ WithContainerSourceUID(sourceUID),
+ WithInitContainerSourceConditions,
+ WithContainerSourceSink(sinkURI),
+ // Status Update:
+ WithContainerSourceDeployed,
+ ),
+ }},
+ }, {
+ Name: "valid first pass, with annotations and labels",
+ Objects: []runtime.Object{
+ NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ WithContainerSourceUID(sourceUID),
+ WithContainerSourceLabels(map[string]string{"label": "labeled"}),
+ WithContainerSourceAnnotations(map[string]string{"annotation": "annotated"}),
+ ),
+ NewChannel(sinkName, testNS,
+ WithChannelAddress(sinkDNS),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "DeploymentCreated", `Created deployment ""`), // TODO on noes
+ Eventf(corev1.EventTypeNormal, "ContainerSourceReconciled", `ContainerSource reconciled: "testnamespace/test-container-source"`),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ WithContainerSourceUID(sourceUID),
+ WithContainerSourceLabels(map[string]string{"label": "labeled"}),
+ WithContainerSourceAnnotations(map[string]string{"annotation": "annotated"}),
+ // Status Update:
+ WithInitContainerSourceConditions,
+ WithContainerSourceSink(sinkURI),
+ WithContainerSourceDeploying(`Created deployment ""`),
+ ),
+ }},
+ WantCreates: []metav1.Object{
+ makeDeployment(NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ }),
+ WithContainerSourceUID(sourceUID),
+ ), 0, map[string]string{"label": "labeled"}, map[string]string{"annotation": "annotated"}),
+ },
+ }, {
+ Name: "error for create deployment",
+ Objects: []runtime.Object{
+ NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ WithContainerSourceUID(sourceUID),
+ ),
+ NewChannel(sinkName, testNS,
+ WithChannelAddress(sinkDNS),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, "DeploymentCreateFailed", "Could not create deployment: inducing failure for create deployments"),
+ },
+ WithReactors: []clientgotesting.ReactionFunc{
+ InduceFailure("create", "deployments"),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ Sink: &sinkRef,
+ }),
+ WithContainerSourceUID(sourceUID),
+ // Status Update:
+ WithInitContainerSourceConditions,
+ WithContainerSourceSink(sinkURI),
+ WithContainerSourceDeployFailed(`Could not create deployment: inducing failure for create deployments`),
+ ),
+ }},
+ WantCreates: []metav1.Object{
+ makeDeployment(NewContainerSource(sourceName, testNS,
+ WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ Image: image,
+ }),
+ WithContainerSourceUID(sourceUID),
+ ), 0, nil, nil),
+ },
+ },
+ //{ // TODO: k8s service does not work, fix.
+ // Name: "valid, with sink as service",
+ // Objects: []runtime.Object{
+ // NewContainerSource(sourceName, testNS,
+ // WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ // Image: image,
+ // Sink: &serviceRef,
+ // }),
+ // WithContainerSourceUID(sourceUID),
+ // ),
+ // NewService(sinkName, testNS),
+ // },
+ // Key: testNS + "/" + sourceName,
+ // WantEvents: []string{
+ // Eventf(corev1.EventTypeNormal, "ContainerSourceReconciled", `ContainerSource reconciled: "testnamespace/test-container-source"`),
+ // },
+ // WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ // Object: NewContainerSource(sourceName, testNS,
+ // WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ // Image: image,
+ // Sink: &serviceRef,
+ // }),
+ // WithContainerSourceUID(sourceUID),
+ // // Status Update:
+ // WithInitContainerSourceConditions,
+ // WithContainerSourceSink(serviceURI),
+ // WithContainerSourceDeploying(`Created deployment ""`),
+ // ),
+ // }},
+ // WantCreates: []metav1.Object{
+ // makeDeployment(NewContainerSource(sourceName, testNS,
+ // WithContainerSourceSpec(sourcesv1alpha1.ContainerSourceSpec{
+ // Image: image,
+ // }),
+ // WithContainerSourceUID(sourceUID),
+ // ), 0),
+ // },
+ //},
+ }
+
+ defer logtesting.ClearAll()
+ table.Test(t, MakeFactory(func(listers *Listers, opt reconciler.Options) controller.Reconciler {
+ return &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ containerSourceLister: listers.GetContainerSourceLister(),
+ deploymentLister: listers.GetDeploymentLister(),
+ }
+ }))
+
+}
+
+func makeDeployment(source *sourcesv1alpha1.ContainerSource, replicas int32, labels map[string]string, annotations map[string]string) *appsv1.Deployment {
+ args := append(source.Spec.Args, fmt.Sprintf("--sink=%s", sinkURI))
+ env := append(source.Spec.Env, corev1.EnvVar{Name: "SINK", Value: sinkURI})
+
+ annos := map[string]string{
+ "sidecar.istio.io/inject": "true",
+ }
+ for k, v := range annotations {
+ annos[k] = v
+ }
+
+ labs := map[string]string{
+ "eventing.knative.dev/source": source.Name,
+ }
+ for k, v := range labels {
+ labs[k] = v
+ }
+
+ return &appsv1.Deployment{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: appsv1.SchemeGroupVersion.String(),
+ Kind: "Deployment",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: fmt.Sprintf("%s-", source.Name),
+ Namespace: source.Namespace,
+ OwnerReferences: getOwnerReferences(),
+ },
+ Spec: appsv1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "eventing.knative.dev/source": source.Name,
+ },
+ },
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: annos,
+ Labels: labs,
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{{
+ Name: "source",
+ Image: source.Spec.Image,
+ Args: args,
+ Env: env,
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ }},
+ ServiceAccountName: source.Spec.ServiceAccountName,
+ },
+ },
+ },
+ Status: appsv1.DeploymentStatus{
+ ReadyReplicas: replicas,
+ },
+ }
+}
+
+func getOwnerReferences() []metav1.OwnerReference {
+ return []metav1.OwnerReference{{
+ APIVersion: sourcesv1alpha1.SchemeGroupVersion.String(),
+ Kind: "ContainerSource",
+ Name: sourceName,
+ Controller: &trueVal,
+ BlockOwnerDeletion: &trueVal,
+ UID: sourceUID,
+ }}
+}
diff --git a/pkg/reconciler/containersource/doc.go b/pkg/reconciler/containersource/doc.go
new file mode 100644
index 00000000000..fa81f4b7843
--- /dev/null
+++ b/pkg/reconciler/containersource/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package containersource implements the ContainerSource controller.
+package containersource
diff --git a/pkg/reconciler/containersource/resources/arguments.go b/pkg/reconciler/containersource/resources/arguments.go
new file mode 100644
index 00000000000..37fc1aef7ef
--- /dev/null
+++ b/pkg/reconciler/containersource/resources/arguments.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+)
+
+type ContainerArguments struct {
+ Source *v1alpha1.ContainerSource
+ Name string
+ Namespace string
+ Image string
+ Args []string
+ Env []corev1.EnvVar
+ ServiceAccountName string
+ SinkInArgs bool
+ Sink string
+ Annotations map[string]string
+ Labels map[string]string
+}
diff --git a/pkg/reconciler/containersource/resources/deployment.go b/pkg/reconciler/containersource/resources/deployment.go
new file mode 100644
index 00000000000..5664a54dcfb
--- /dev/null
+++ b/pkg/reconciler/containersource/resources/deployment.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "fmt"
+ "github.com/knative/pkg/kmeta"
+ "strings"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const sourceLabelKey = "eventing.knative.dev/source"
+
+func MakeDeployment(args ContainerArguments) *appsv1.Deployment {
+
+ containerArgs := args.Args
+
+ // if sink is already in the provided args.Args, don't attempt to add
+ if !args.SinkInArgs {
+ remote := fmt.Sprintf("--sink=%s", args.Sink)
+ containerArgs = append(containerArgs, remote)
+ }
+
+ env := append(args.Env, corev1.EnvVar{Name: "SINK", Value: sinkArg(args)})
+
+ deploy := &appsv1.Deployment{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "apps/v1",
+ Kind: "Deployment",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: args.Name + "-",
+ Namespace: args.Namespace,
+ OwnerReferences: []metav1.OwnerReference{
+ *kmeta.NewControllerRef(args.Source),
+ },
+ },
+ Spec: appsv1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ sourceLabelKey: args.Name,
+ },
+ },
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "sidecar.istio.io/inject": "true",
+ },
+ Labels: map[string]string{
+ sourceLabelKey: args.Name,
+ },
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: args.ServiceAccountName,
+ Containers: []corev1.Container{
+ {
+ Name: "source",
+ Image: args.Image,
+ Args: containerArgs,
+ Env: env,
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ // Then wire through any annotations from the source. Not a bug by allowing
+ // the container to override Istio injection.
+ if args.Annotations != nil {
+ for k, v := range args.Annotations {
+ deploy.Spec.Template.ObjectMeta.Annotations[k] = v
+ }
+ }
+
+ // Then wire through any labels from the source. Do not allow to override
+ // our source name. This seems like it would be way errorprone by allowing
+ // the matchlabels then to not match, or we'd have to force them to match, etc.
+ // just don't allow it.
+ if args.Labels != nil {
+ for k, v := range args.Labels {
+ if k != sourceLabelKey {
+ deploy.Spec.Template.ObjectMeta.Labels[k] = v
+ }
+ }
+ }
+ return deploy
+}
+
+func sinkArg(args ContainerArguments) string {
+ if args.SinkInArgs {
+ for _, a := range args.Args {
+ if strings.HasPrefix(a, "--sink=") {
+ return strings.Replace(a, "--sink=", "", -1)
+ }
+ }
+ }
+ return args.Sink
+}
diff --git a/pkg/reconciler/containersource/resources/deployment_test.go b/pkg/reconciler/containersource/resources/deployment_test.go
new file mode 100644
index 00000000000..0ac4a82d2fe
--- /dev/null
+++ b/pkg/reconciler/containersource/resources/deployment_test.go
@@ -0,0 +1,329 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestMakeDeployment_sinkoverrideannotationlabelnotallowed(t *testing.T) {
+ yes := true
+ got := MakeDeployment(ContainerArguments{
+ Source: &v1alpha1.ContainerSource{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-name", UID: "TEST_UID"},
+ },
+ Name: "test-name",
+ Namespace: "test-namespace",
+ Image: "test-image",
+ Args: []string{"--test1=args1", "--test2=args2"},
+ Env: []corev1.EnvVar{{
+ Name: "test1",
+ Value: "arg1",
+ }, {
+ Name: "test2",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ Key: "test2-secret",
+ },
+ },
+ }},
+ ServiceAccountName: "test-service-account",
+ SinkInArgs: false,
+ Sink: "test-sink",
+ Labels: map[string]string{
+ "eventing.knative.dev/source": "not-allowed",
+ "anotherlabel": "extra-label",
+ },
+ Annotations: map[string]string{
+ "sidecar.istio.io/inject": "false",
+ "anotherannotation": "extra-annotation",
+ },
+ })
+
+ want := &appsv1.Deployment{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "apps/v1",
+ Kind: "Deployment",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "test-name-",
+ Namespace: "test-namespace",
+ OwnerReferences: []metav1.OwnerReference{{
+ APIVersion: "sources.eventing.knative.dev/v1alpha1",
+ Kind: "ContainerSource",
+ Name: "test-name",
+ UID: "TEST_UID",
+ Controller: &yes,
+ BlockOwnerDeletion: &yes,
+ }},
+ },
+ Spec: appsv1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "eventing.knative.dev/source": "test-name",
+ },
+ },
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "sidecar.istio.io/inject": "false",
+ "anotherannotation": "extra-annotation",
+ },
+ Labels: map[string]string{
+ "eventing.knative.dev/source": "test-name",
+ "anotherlabel": "extra-label",
+ },
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: "test-service-account",
+ Containers: []corev1.Container{
+ {
+ Name: "source",
+ Image: "test-image",
+ Args: []string{
+ "--test1=args1",
+ "--test2=args2",
+ "--sink=test-sink",
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: "test1",
+ Value: "arg1",
+ }, {
+ Name: "test2",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ Key: "test2-secret",
+ },
+ },
+ }, {
+ Name: "SINK",
+ Value: "test-sink",
+ }},
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unexpected deploy (-want, +got) = %v", diff)
+ }
+}
+
+func TestMakeDeployment_sink(t *testing.T) {
+ yes := true
+ got := MakeDeployment(ContainerArguments{
+ Source: &v1alpha1.ContainerSource{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-name", UID: "TEST_UID"},
+ },
+ Name: "test-name",
+ Namespace: "test-namespace",
+ Image: "test-image",
+ Args: []string{"--test1=args1", "--test2=args2"},
+ Env: []corev1.EnvVar{{
+ Name: "test1",
+ Value: "arg1",
+ }, {
+ Name: "test2",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ Key: "test2-secret",
+ },
+ },
+ }},
+ ServiceAccountName: "test-service-account",
+ SinkInArgs: false,
+ Sink: "test-sink",
+ })
+
+ want := &appsv1.Deployment{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "apps/v1",
+ Kind: "Deployment",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "test-name-",
+ Namespace: "test-namespace",
+ OwnerReferences: []metav1.OwnerReference{{
+ APIVersion: "sources.eventing.knative.dev/v1alpha1",
+ Kind: "ContainerSource",
+ Name: "test-name",
+ UID: "TEST_UID",
+ Controller: &yes,
+ BlockOwnerDeletion: &yes,
+ }},
+ },
+ Spec: appsv1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "eventing.knative.dev/source": "test-name",
+ },
+ },
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "sidecar.istio.io/inject": "true",
+ },
+ Labels: map[string]string{
+ "eventing.knative.dev/source": "test-name",
+ },
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: "test-service-account",
+ Containers: []corev1.Container{
+ {
+ Name: "source",
+ Image: "test-image",
+ Args: []string{
+ "--test1=args1",
+ "--test2=args2",
+ "--sink=test-sink",
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: "test1",
+ Value: "arg1",
+ }, {
+ Name: "test2",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ Key: "test2-secret",
+ },
+ },
+ }, {
+ Name: "SINK",
+ Value: "test-sink",
+ }},
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unexpected deploy (-want, +got) = %v", diff)
+ }
+}
+
+func TestMakeDeployment_sinkinargs(t *testing.T) {
+ yes := true
+ got := MakeDeployment(ContainerArguments{
+ Source: &v1alpha1.ContainerSource{
+ ObjectMeta: metav1.ObjectMeta{Name: "test-name", UID: "TEST_UID"},
+ },
+ Name: "test-name",
+ Namespace: "test-namespace",
+ Image: "test-image",
+ Args: []string{"--test1=args1", "--test2=args2", "--sink=test-sink"},
+ Env: []corev1.EnvVar{{
+ Name: "test1",
+ Value: "arg1",
+ }, {
+ Name: "test2",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ Key: "test2-secret",
+ },
+ },
+ }},
+ ServiceAccountName: "test-service-account",
+ SinkInArgs: true,
+ Labels: map[string]string{"eventing.knative.dev/source": "test-name"},
+ Annotations: map[string]string{"sidecar.istio.io/inject": "true"},
+ })
+
+ want := &appsv1.Deployment{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "apps/v1",
+ Kind: "Deployment",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "test-name-",
+ Namespace: "test-namespace",
+ OwnerReferences: []metav1.OwnerReference{{
+ APIVersion: "sources.eventing.knative.dev/v1alpha1",
+ Kind: "ContainerSource",
+ Name: "test-name",
+ UID: "TEST_UID",
+ Controller: &yes,
+ BlockOwnerDeletion: &yes,
+ }},
+ },
+ Spec: appsv1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "eventing.knative.dev/source": "test-name",
+ },
+ },
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "sidecar.istio.io/inject": "true",
+ },
+ Labels: map[string]string{
+ "eventing.knative.dev/source": "test-name",
+ },
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: "test-service-account",
+ Containers: []corev1.Container{
+ {
+ Name: "source",
+ Image: "test-image",
+ Args: []string{
+ "--test1=args1",
+ "--test2=args2",
+ "--sink=test-sink",
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: "test1",
+ Value: "arg1",
+ }, {
+ Name: "test2",
+ ValueFrom: &corev1.EnvVarSource{
+ SecretKeyRef: &corev1.SecretKeySelector{
+ Key: "test2-secret",
+ },
+ },
+ }, {
+ Name: "SINK",
+ Value: "test-sink",
+ }},
+ ImagePullPolicy: corev1.PullIfNotPresent,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unexpected deploy (-want, +got) = %v", diff)
+ }
+}
diff --git a/pkg/reconciler/cronjobsource/cronjobsource.go b/pkg/reconciler/cronjobsource/cronjobsource.go
index 488f144ca5b..9c678d26c04 100644
--- a/pkg/reconciler/cronjobsource/cronjobsource.go
+++ b/pkg/reconciler/cronjobsource/cronjobsource.go
@@ -28,7 +28,6 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
- apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -116,7 +115,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
// Get the CronJobSource resource with this namespace/name
original, err := r.cronjobLister.CronJobSources(namespace).Get(name)
- if apierrs.IsNotFound(err) {
+ if apierrors.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
logging.FromContext(ctx).Error("CronJobSource key in work queue no longer exists", zap.Any("key", key))
return nil
diff --git a/pkg/reconciler/cronjobsource/cronjobsource_test.go b/pkg/reconciler/cronjobsource/cronjobsource_test.go
index a971cb1e4e9..ce3ed0ca5fd 100644
--- a/pkg/reconciler/cronjobsource/cronjobsource_test.go
+++ b/pkg/reconciler/cronjobsource/cronjobsource_test.go
@@ -17,46 +17,33 @@ limitations under the License.
package cronjobsource
import (
- "github.com/knative/eventing/pkg/reconciler/cronjobsource/resources"
- "github.com/knative/eventing/pkg/utils"
- "k8s.io/apimachinery/pkg/runtime"
"os"
"testing"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ kubeinformers "k8s.io/client-go/informers"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/kubernetes/scheme"
clientgotesting "k8s.io/client-go/testing"
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
informers "github.com/knative/eventing/pkg/client/informers/externalversions"
"github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/cronjobsource/resources"
+ "github.com/knative/eventing/pkg/utils"
duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
"github.com/knative/pkg/controller"
logtesting "github.com/knative/pkg/logging/testing"
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- //"k8s.io/apimachinery/pkg/runtime"
- kubeinformers "k8s.io/client-go/informers"
- fakekubeclientset "k8s.io/client-go/kubernetes/fake"
- "k8s.io/client-go/kubernetes/scheme"
. "github.com/knative/eventing/pkg/reconciler/testing"
. "github.com/knative/pkg/reconciler/testing"
-
- sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
- v1 "k8s.io/api/apps/v1"
)
var (
- // deletionTime is used when objects are marked as deleted. Rfc3339Copy()
- // truncates to seconds to match the loss of precision during serialization.
- deletionTime = metav1.Now().Rfc3339Copy()
-
- trueVal = true
-
- sinkGVK = metav1.GroupVersionKind{
- Group: "eventing.knative.dev",
- Version: "v1alpha1",
- Kind: "Channel",
- }
sinkRef = corev1.ObjectReference{
Name: sinkName,
Kind: "Channel",
@@ -69,7 +56,6 @@ var (
const (
image = "github.com/knative/test/image"
sourceName = "test-cronjob-source"
- sourceUID = "1234-5678-90"
testNS = "testnamespace"
testSchedule = "*/2 * * * *"
testData = "data"
@@ -79,7 +65,7 @@ const (
func init() {
// Add types to scheme
- _ = v1.AddToScheme(scheme.Scheme)
+ _ = appsv1.AddToScheme(scheme.Scheme)
_ = corev1.AddToScheme(scheme.Scheme)
_ = duckv1alpha1.AddToScheme(scheme.Scheme)
@@ -282,7 +268,7 @@ func TestNew(t *testing.T) {
}
}
-func makeReceiveAdapter() *v1.Deployment {
+func makeReceiveAdapter() *appsv1.Deployment {
source := NewCronSourceJob(sourceName, testNS,
WithCronJobSourceSpec(sourcesv1alpha1.CronJobSourceSpec{
Schedule: testSchedule,
diff --git a/pkg/reconciler/testing/containersource.go b/pkg/reconciler/testing/containersource.go
new file mode 100644
index 00000000000..bc43e909f90
--- /dev/null
+++ b/pkg/reconciler/testing/containersource.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "k8s.io/apimachinery/pkg/types"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+)
+
+// ContainerSourceOption enables further configuration of a CronJob.
+type ContainerSourceOption func(*v1alpha1.ContainerSource)
+
+// NewCronJob creates a CronJob with CronJobOptions
+func NewContainerSource(name, namespace string, o ...ContainerSourceOption) *v1alpha1.ContainerSource {
+ c := &v1alpha1.ContainerSource{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ }
+ for _, opt := range o {
+ opt(c)
+ }
+ //c.SetDefaults(context.Background()) // TODO: We should add defaults and validation.
+ return c
+}
+
+func WithContainerSourceUID(uid types.UID) ContainerSourceOption {
+ return func(s *v1alpha1.ContainerSource) {
+ s.UID = uid
+ }
+}
+
+// WithInitContainerSourceConditions initializes the ContainerSource's conditions.
+func WithInitContainerSourceConditions(s *v1alpha1.ContainerSource) {
+ s.Status.InitializeConditions()
+}
+
+func WithContainerSourceSinkNotFound(msg string) ContainerSourceOption {
+ return func(s *v1alpha1.ContainerSource) {
+ s.Status.MarkNoSink("NotFound", msg)
+ }
+}
+
+func WithContainerSourceSinkMissing(msg string) ContainerSourceOption {
+ return func(s *v1alpha1.ContainerSource) {
+ s.Status.MarkNoSink("Missing", msg)
+ }
+}
+
+func WithContainerSourceSink(uri string) ContainerSourceOption {
+ return func(s *v1alpha1.ContainerSource) {
+ s.Status.MarkSink(uri)
+ }
+}
+
+func WithContainerSourceDeploying(msg string) ContainerSourceOption {
+ return func(s *v1alpha1.ContainerSource) {
+ s.Status.MarkDeploying("DeploymentCreated", msg)
+ }
+}
+
+func WithContainerSourceDeployFailed(msg string) ContainerSourceOption {
+ return func(s *v1alpha1.ContainerSource) {
+ s.Status.MarkNotDeployed("DeploymentCreateFailed", msg)
+ }
+}
+
+func WithContainerSourceDeployed(s *v1alpha1.ContainerSource) {
+ s.Status.MarkDeployed()
+}
+
+func WithContainerSourceDeleted(c *v1alpha1.ContainerSource) {
+ t := metav1.NewTime(time.Unix(1e9, 0))
+ c.ObjectMeta.SetDeletionTimestamp(&t)
+}
+
+func WithContainerSourceSpec(spec v1alpha1.ContainerSourceSpec) ContainerSourceOption {
+ return func(c *v1alpha1.ContainerSource) {
+ c.Spec = spec
+ }
+}
+
+func WithContainerSourceLabels(labels map[string]string) ContainerSourceOption {
+ return func(c *v1alpha1.ContainerSource) {
+ c.Labels = labels
+ }
+}
+
+func WithContainerSourceAnnotations(annotations map[string]string) ContainerSourceOption {
+ return func(c *v1alpha1.ContainerSource) {
+ c.Annotations = annotations
+ }
+}
diff --git a/pkg/reconciler/testing/listers.go b/pkg/reconciler/testing/listers.go
index 8d634d3a91c..bf17f6dd67f 100644
--- a/pkg/reconciler/testing/listers.go
+++ b/pkg/reconciler/testing/listers.go
@@ -120,6 +120,10 @@ func (l *Listers) GetCronJobSourceLister() sourcelisters.CronJobSourceLister {
return sourcelisters.NewCronJobSourceLister(l.indexerFor(&sourcesv1alpha1.CronJobSource{}))
}
+func (l *Listers) GetContainerSourceLister() sourcelisters.ContainerSourceLister {
+ return sourcelisters.NewContainerSourceLister(l.indexerFor(&sourcesv1alpha1.ContainerSource{}))
+}
+
// GetGatewayLister gets lister for Istio Gateway resource.
func (l *Listers) GetGatewayLister() istiolisters.GatewayLister {
return istiolisters.NewGatewayLister(l.indexerFor(&istiov1alpha3.Gateway{}))
From 88dcdf03ba8c7fc665f0fd21b3c4643f253ddb6b Mon Sep 17 00:00:00 2001
From: Ville Aikas
Date: Thu, 25 Apr 2019 14:12:29 -0700
Subject: [PATCH 61/76] make sure NewController doesn't return nil (#1103)
---
pkg/reconciler/trigger/trigger_test.go | 39 ++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/pkg/reconciler/trigger/trigger_test.go b/pkg/reconciler/trigger/trigger_test.go
index b88a3541df6..5328f3325b7 100644
--- a/pkg/reconciler/trigger/trigger_test.go
+++ b/pkg/reconciler/trigger/trigger_test.go
@@ -22,6 +22,8 @@ import (
"testing"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ informers "github.com/knative/eventing/pkg/client/informers/externalversions"
"github.com/knative/eventing/pkg/reconciler"
reconciletesting "github.com/knative/eventing/pkg/reconciler/testing"
"github.com/knative/eventing/pkg/reconciler/trigger/resources"
@@ -36,6 +38,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
+ kubeinformers "k8s.io/client-go/informers"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
clientgotesting "k8s.io/client-go/testing"
)
@@ -61,6 +65,41 @@ func init() {
_ = duckv1alpha1.AddToScheme(scheme.Scheme)
}
+func TestNewController(t *testing.T) {
+ kubeClient := fakekubeclientset.NewSimpleClientset()
+ eventingClient := fakeclientset.NewSimpleClientset()
+
+ // Create informer factories with fake clients. The second parameter sets the
+ // resync period to zero, disabling it.
+ kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
+ eventingInformerFactory := informers.NewSharedInformerFactory(eventingClient, 0)
+
+ // Eventing
+ triggerInformer := eventingInformerFactory.Eventing().V1alpha1().Triggers()
+ channelInformer := eventingInformerFactory.Eventing().V1alpha1().Channels()
+ subscriptionInformer := eventingInformerFactory.Eventing().V1alpha1().Subscriptions()
+ brokerInformer := eventingInformerFactory.Eventing().V1alpha1().Brokers()
+
+ // Kube
+ serviceInformer := kubeInformerFactory.Core().V1().Services()
+
+ c := NewController(
+ reconciler.Options{
+ KubeClientSet: kubeClient,
+ EventingClientSet: eventingClient,
+ Logger: logtesting.TestLogger(t),
+ },
+ triggerInformer,
+ channelInformer,
+ subscriptionInformer,
+ brokerInformer,
+ serviceInformer)
+
+ if c == nil {
+ t.Fatalf("Failed to create with NewController")
+ }
+}
+
func TestAllCases(t *testing.T) {
triggerKey := testNS + "/" + triggerName
table := TableTest{
From f707bfdc1b81148a64cc8367ed706a4a89701e4c Mon Sep 17 00:00:00 2001
From: Matthias Wessendorf
Date: Thu, 25 Apr 2019 23:33:30 +0200
Subject: [PATCH 62/76] Consistency is king, use "" for the filter (#1102)
---
config/200-broker-clusterrole.yaml | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/config/200-broker-clusterrole.yaml b/config/200-broker-clusterrole.yaml
index fc430c0dc8f..b3de29dc20c 100644
--- a/config/200-broker-clusterrole.yaml
+++ b/config/200-broker-clusterrole.yaml
@@ -18,11 +18,11 @@ metadata:
name: eventing-broker-filter
rules:
- apiGroups:
- - eventing.knative.dev
+ - "eventing.knative.dev"
resources:
- - triggers
- - triggers/status
+ - "triggers"
+ - "triggers/status"
verbs:
- - get
- - list
- - watch
+ - "get"
+ - "list"
+ - "watch"
From c4324b0ec64d27782eefd7343a0850a9c7147be8 Mon Sep 17 00:00:00 2001
From: Shashwathi
Date: Fri, 26 Apr 2019 08:46:30 -0700
Subject: [PATCH 63/76] Add data validation on subscription reconciler (#1100)
Fixes https://github.com/knative/eventing/issues/1081
- Update subscription error when reply strategy does not contain address
---
pkg/reconciler/subscription/subscription.go | 6 +++++-
pkg/reconciler/subscription/subscription_test.go | 8 +++-----
2 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/pkg/reconciler/subscription/subscription.go b/pkg/reconciler/subscription/subscription.go
index 9c0e9058469..4444539cc8a 100644
--- a/pkg/reconciler/subscription/subscription.go
+++ b/pkg/reconciler/subscription/subscription.go
@@ -139,6 +139,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
func (r *Reconciler) reconcile(ctx context.Context, subscription *v1alpha1.Subscription) error {
subscription.Status.InitializeConditions()
+ // Verify subscription is valid.
+ if err := subscription.Validate(ctx); err != nil {
+ return err
+ }
// See if the subscription has been deleted
if subscription.DeletionTimestamp != nil {
// If the subscription is Ready, then we have to remove it
@@ -282,7 +286,7 @@ func (r *Reconciler) resolveResult(ctx context.Context, namespace string, replyS
if s.Status.Address != nil {
return eventingduck.DomainToURL(s.Status.Address.Hostname), nil
}
- return "", fmt.Errorf("status does not contain address")
+ return "", fmt.Errorf("reply.status does not contain address")
}
func (r *Reconciler) syncPhysicalChannel(ctx context.Context, sub *v1alpha1.Subscription, isDeleted bool) error {
diff --git a/pkg/reconciler/subscription/subscription_test.go b/pkg/reconciler/subscription/subscription_test.go
index 1db1a001d8e..54ddc4e1f5f 100644
--- a/pkg/reconciler/subscription/subscription_test.go
+++ b/pkg/reconciler/subscription/subscription_test.go
@@ -70,7 +70,7 @@ var (
serviceURI = "http://" + serviceDNS + "/"
subscriberGVK = metav1.GroupVersionKind{
- Group: "testing.eventing.knative.dev",
+ Group: "eventing.knative.dev",
Version: "v1alpha1",
Kind: "Subscriber",
}
@@ -157,7 +157,7 @@ func TestAllCases(t *testing.T) {
Key: testNS + "/" + subscriptionName,
WantErr: true,
WantEvents: []string{
- Eventf(corev1.EventTypeWarning, "SubscriberResolveFailed", "Failed to resolve spec.subscriber: subscribers.testing.eventing.knative.dev %q not found", subscriberName),
+ Eventf(corev1.EventTypeWarning, "SubscriberResolveFailed", "Failed to resolve spec.subscriber: subscribers.eventing.knative.dev %q not found", subscriberName),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
@@ -221,8 +221,7 @@ func TestAllCases(t *testing.T) {
Key: testNS + "/" + subscriptionName,
WantErr: true,
WantEvents: []string{
- Eventf(corev1.EventTypeWarning, "ResultResolveFailed", "Failed to resolve spec.reply: status does not contain address"),
- Eventf(corev1.EventTypeWarning, "SubscriptionUpdateStatusFailed", "Failed to update Subscription's status: status does not contain address"), // TODO: BUGBUG THIS IS WEIRD
+ Eventf(corev1.EventTypeWarning, "SubscriptionUpdateStatusFailed", "Failed to update Subscription's status: invalid value: Subscriber: spec.reply.kind\nonly 'Channel' kind is allowed"),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: NewSubscription(subscriptionName, testNS,
@@ -232,7 +231,6 @@ func TestAllCases(t *testing.T) {
WithSubscriptionReply(subscriberGVK, replyName),
// The first reconciliation will initialize the status conditions.
WithInitSubscriptionConditions,
- WithSubscriptionPhysicalSubscriptionSubscriber(subscriberURI),
),
}},
}, {
From 54500a2fe4872243364acb81841fedf3b796267f Mon Sep 17 00:00:00 2001
From: Ignacio Cano
Date: Fri, 26 Apr 2019 09:24:30 -0700
Subject: [PATCH 64/76] Event Registry (#1105)
* Adding some tests to trigger
* More UTs
* More UTs
* More UTs
* Namespace reconciler automatically creates the Broker Filter's ServiceAccount and RBAC.
Sadly this doesn't work well because we have such an old version of controller-runtime that the Filter ends up trying to watch _all_ Triggers, not just those in its namespace. And it only gets permission for the Triggers in its own namespace.
* Remove no longer needed label.
* Broker and trigger types UTs
* WIP early E2E test
* Changes after code review. Adding trigger defaults and validation tests
* Cleaner trigger validation
* Adding dummy tests for broker validation...
Should be implemented
* Compiling and moving things around
* Updating test
* More updates
* Waiting for potentially multiple contents.
Removing check for corev1.Service ready.
Removing grant's great design. Just making it simpler for now.
* Compiling
* Fixing compilation
* Fixing compilation errors. Adding AnnotateNamespace function.
* Adding ns
* Adding logs. Changing to lowercase any otherwise the pod name is invalid
* Removing namespace when creating trigger subscriber spec.
Adding wait time constant for default broker creation.
* Checking if all triggers are ready
* Updated logs
* Working
* Adding logs... Still not receiving the events.
* More logs
* Adding build constraint
* Removing unnecessary stuff
* Removing ugly structs
* More logs
* Removing quotes
* More logs
* Adding delay
* Listing triggers in receiver when we create it, so that we don't miss
any message because the client couldn't find the existing trigger.
This is a problem in the in-memory-channel as it doesn't do retries.
Maybe the right solution is to add that functionality there.
* Adding delay to sender pod
* Removing withDelay method and just sleep for a while
* Improve log...
* Updates after code review.
* Adding some more logs and trailing dots.
* Switch import order.
* Updating comments.
* Updating comments.
* Replace the bad errgroup usage with the runnableServer.
* Namespace scoped the Broker Filter's client.
* Fix unit tests.
* Fix yaml
* Setting source to source not type.
Updating comment.
* Switch from annotating the namespace to labeling it, to match Istio.
* Adding EventType CRD
* Update to type
* Updates
* updating eventtypes to event-types
* Ingress policy
* Updating environment variables
* yamls to give event-type permissions
* General clean up.
* Adding namespace to ingress and reconciling service accounts in ns.
* More changes
* Service account in namespace
* Updating events registered
* Updating origin to source.
* to newer version
* moving the ingress policy to the broker itself...
should be an object instead of a string.
* making header a constant. Should use the SDK instead.
* Adding dummy change to trigger event
* Using cloudevents sdk but still needs to be updated.
We should change our APIs to receive Events instead of Messages.
* Adding cloudevents-sdk library
* Invalid eventtype
* Initial docs for the Broker.
* Fill some of the usage section.
* Add instructions for installing the Broker without using Namespace annotation.
* Create example_{brokers,triggers}.yaml to document how they can be used.
* Fix MD linter issues.
* Fix MD linter issues.
* Fix MD linter issues.
* Minor clean up.
* adding type as authoritative
* Should actually compare to lowercase
* listing all event types... we should use some sort of selectors.
label selectors won't work on spec.type as they may not conform to the
label format.
field selectors doesn't seem to work for CRDs beyond metadata.name and
namespace.
* Might not need origin after all, but adding it here
* adding origin to yaml as well
* eventtypes instead of event-types to maintain consistency.
* Clean up some spots the merge didn't catch.
* updating example
* Removing source
* cluster-scoped registry to simplify things.
* updating example
* removing source
* updating broker policy to create event types...
updating cloud event SDK version.
* adding a TODO to update to a cluster-level role binding for the ingress.
If we are planning on using cluster-scoped EventTypes
* namespaced EventTypes, i.e., registry per namespace
* adding create permission to ingress
* registry per namespace. Ingress policies working!
* Fix the bad merge by replacing logger.BaseLogger with logger.FormatLogger.
* Add extra columns when using kubectl get.
* MarkBrokerDoesNotExist
* Rename extra columns.
* cosmetic change
* Replace the Trigger reconciler's in-memory map with a simple list, utilizing the fact that Controller Runtime already has the results of the List cached.
* Accept v0.1 and v0.2 cloud events. Adding UTs.
Updating initClient as well, removing unnecessary paging.
* Change to resolve.SubscriberSpec().
* Remove restClient as it wasn't actually used.
* Only reconcile the Namespace if the specific resource we care about changes.
* changing origin to from
* lowercase
* Merge remote-tracking branch 'upstream/master' into registry-namespace
# Conflicts:
# cmd/broker/ingress/main.go
# cmd/webhook/main.go
# config/200-broker-clusterrole.yaml
# config/500-controller.yaml
# pkg/apis/eventing/v1alpha1/broker_types.go
# pkg/apis/eventing/v1alpha1/register.go
# pkg/reconciler/v1alpha1/broker/broker.go
# pkg/reconciler/v1alpha1/broker/resources/ingress.go
# pkg/reconciler/v1alpha1/namespace/namespace.go
# pkg/reconciler/v1alpha1/trigger/trigger.go
* updating event type to include broker
* additional columns
* just TODOs
* adding some TODOs
* Adding TODOs, updating cloudevents after my change
* Adding observability
* validation
* broker default
* event type controller... checks for broker existence and readiness
* adding extra columns
* message in columns
* Passing broker. Need to refactor this module
* broker immutable to avoid problem with label selector
* listening for broker changes
* removing broker label. Making broker mutable again.
* fixing dns subdomain
* ingress policy as an object.
adding some UTs.
still not convinced.
* removing DeprecatedGeneration
* Adding README
* yaml
* updating readme
* updating trigger
* all fields immutable in eventType.
this makes our life easier when reconciling them from sources.
* adding UTs
* Updating README for source
* moving ToDNSSubdomain to utils
* removing ommitempty
* Using github.com/kelseyhightower/envconfig for env variables.
* changing Triggers to also look for an extension attribute as part of its
source. If there, then we use that as source for exact matching.
* Updating ingress to read From custom extension, if present, and use that
Otherwise it uses source
* Updating comments
* It seems to complaint if I add an invalid URL in the markup.
Changing schema.
* typo
* lowercase from
* using new cloud event API
* addressing Grant's comment.
Debug instead of Info
* Bumping cloud-sdk
* Migrate to correct sdk usage.
* update README with other branch
* updating e2e tests
* Updating deepcopy
* naming changes
* disabling registry test to see if this is causing problems
* re-enabling these guys again
* Missing files
* adding description
* removing autoAdd
* some updates, still broken
* some updates, still broken
* removing broker ingress policies related stuff
* adding eventType controller
* eventType controller new serving structure
* bug
* removing commented code
* adding Gopkg.lock
* rollback source getter
* permissions!!! damn...
* permissions.
* removing formatting of imports in goland
* tracking changes to broker as trigger does.
---
cmd/controller/main.go | 11 +-
cmd/webhook/main.go | 1 +
config/200-controller-clusterrole.yaml | 2 +
config/200-webhook-clusterrole.yaml | 2 +
config/300-eventtype.yaml | 53 ++++
docs/registry/example_eventtype.yaml | 49 +++
.../eventing/v1alpha1/eventtype_defaults.go | 29 ++
.../v1alpha1/eventtype_defaults_test.go | 83 ++++++
.../eventing/v1alpha1/eventtype_lifecycle.go | 58 ++++
.../v1alpha1/eventtype_lifecycle_test.go | 247 ++++++++++++++++
pkg/apis/eventing/v1alpha1/eventtype_types.go | 83 ++++++
.../eventing/v1alpha1/eventtype_validation.go | 77 +++++
.../v1alpha1/eventtype_validation_test.go | 279 ++++++++++++++++++
pkg/apis/eventing/v1alpha1/register.go | 2 +
.../v1alpha1/zz_generated.deepcopy.go | 94 ++++++
.../eventing/v1alpha1/eventing_client.go | 5 +
.../typed/eventing/v1alpha1/eventtype.go | 174 +++++++++++
.../v1alpha1/fake/fake_eventing_client.go | 4 +
.../eventing/v1alpha1/fake/fake_eventtype.go | 140 +++++++++
.../eventing/v1alpha1/generated_expansion.go | 2 +
.../eventing/v1alpha1/eventtype.go | 89 ++++++
.../eventing/v1alpha1/interface.go | 7 +
.../informers/externalversions/generic.go | 2 +
.../listers/eventing/v1alpha1/eventtype.go | 94 ++++++
.../eventing/v1alpha1/expansion_generated.go | 8 +
pkg/reconciler/eventtype/eventtype.go | 211 +++++++++++++
pkg/reconciler/eventtype/eventtype_test.go | 165 +++++++++++
pkg/reconciler/testing/eventtype.go | 91 ++++++
pkg/reconciler/testing/listers.go | 4 +
pkg/reconciler/trigger/trigger.go | 24 +-
pkg/utils/utils.go | 17 ++
31 files changed, 2085 insertions(+), 22 deletions(-)
create mode 100644 config/300-eventtype.yaml
create mode 100644 docs/registry/example_eventtype.yaml
create mode 100644 pkg/apis/eventing/v1alpha1/eventtype_defaults.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventtype_defaults_test.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventtype_lifecycle.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventtype_lifecycle_test.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventtype_types.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventtype_validation.go
create mode 100644 pkg/apis/eventing/v1alpha1/eventtype_validation_test.go
create mode 100644 pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventtype.go
create mode 100644 pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventtype.go
create mode 100644 pkg/client/informers/externalversions/eventing/v1alpha1/eventtype.go
create mode 100644 pkg/client/listers/eventing/v1alpha1/eventtype.go
create mode 100644 pkg/reconciler/eventtype/eventtype.go
create mode 100644 pkg/reconciler/eventtype/eventtype_test.go
create mode 100644 pkg/reconciler/testing/eventtype.go
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 8bbdc8d81c3..1addab462cd 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -21,6 +21,8 @@ import (
"log"
"os"
+ "github.com/knative/eventing/pkg/reconciler/eventtype"
+
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/rest"
@@ -67,7 +69,7 @@ func main() {
logger.Info("Starting the controller")
- const numControllers = 5
+ const numControllers = 6
cfg.QPS = numControllers * rest.DefaultQPS
cfg.Burst = numControllers * rest.DefaultBurst
opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
@@ -80,6 +82,7 @@ func main() {
channelInformer := eventingInformerFactory.Eventing().V1alpha1().Channels()
subscriptionInformer := eventingInformerFactory.Eventing().V1alpha1().Subscriptions()
brokerInformer := eventingInformerFactory.Eventing().V1alpha1().Brokers()
+ eventTypeInformer := eventingInformerFactory.Eventing().V1alpha1().EventTypes()
// Kube
serviceInformer := kubeInformerFactory.Core().V1().Services()
@@ -125,6 +128,11 @@ func main() {
FilterServiceAccountName: getRequiredEnv("BROKER_FILTER_SERVICE_ACCOUNT"),
},
),
+ eventtype.NewController(
+ opt,
+ eventTypeInformer,
+ brokerInformer,
+ ),
}
if len(controllers) != numControllers {
logger.Fatalf("Number of controllers and QPS settings mismatch: %d != %d", len(controllers), numControllers)
@@ -147,6 +155,7 @@ func main() {
channelInformer.Informer(),
subscriptionInformer.Informer(),
triggerInformer.Informer(),
+ eventTypeInformer.Informer(),
// Kube
configMapInformer.Informer(),
serviceInformer.Informer(),
diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go
index b746e9ca24a..429ca099c03 100644
--- a/cmd/webhook/main.go
+++ b/cmd/webhook/main.go
@@ -101,6 +101,7 @@ func main() {
eventingv1alpha1.SchemeGroupVersion.WithKind("ClusterChannelProvisioner"): &eventingv1alpha1.ClusterChannelProvisioner{},
eventingv1alpha1.SchemeGroupVersion.WithKind("Subscription"): &eventingv1alpha1.Subscription{},
eventingv1alpha1.SchemeGroupVersion.WithKind("Trigger"): &eventingv1alpha1.Trigger{},
+ eventingv1alpha1.SchemeGroupVersion.WithKind("EventType"): &eventingv1alpha1.EventType{},
},
Logger: logger,
}
diff --git a/config/200-controller-clusterrole.yaml b/config/200-controller-clusterrole.yaml
index 5be03648e78..60afacc783d 100644
--- a/config/200-controller-clusterrole.yaml
+++ b/config/200-controller-clusterrole.yaml
@@ -70,6 +70,8 @@ rules:
- "subscriptions/status"
- "triggers"
- "triggers/status"
+ - "eventtypes"
+ - "eventtypes/status"
verbs: *everything
# Source resources and statuses we care about.
diff --git a/config/200-webhook-clusterrole.yaml b/config/200-webhook-clusterrole.yaml
index 89823ca6cb1..53176c1013a 100644
--- a/config/200-webhook-clusterrole.yaml
+++ b/config/200-webhook-clusterrole.yaml
@@ -79,6 +79,8 @@ rules:
- "subscriptions/status"
- "triggers"
- "triggers/status"
+ - "eventtypes"
+ - "eventtypes/status"
verbs:
- "get"
- "list"
diff --git a/config/300-eventtype.yaml b/config/300-eventtype.yaml
new file mode 100644
index 00000000000..1b83fd8cd11
--- /dev/null
+++ b/config/300-eventtype.yaml
@@ -0,0 +1,53 @@
+# Copyright 2019 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: eventtypes.eventing.knative.dev
+spec:
+ group: eventing.knative.dev
+ version: v1alpha1
+ names:
+ kind: EventType
+ plural: eventtypes
+ singular: eventtype
+ categories:
+ - all
+ - knative
+ - eventing
+ scope: Namespaced
+ subresources:
+ status: {}
+ additionalPrinterColumns:
+ - name: Type
+ type: string
+ JSONPath: ".spec.type"
+ - name: Source
+ type: string
+ JSONPath: ".spec.source"
+ - name: Schema
+ type: string
+ JSONPath: ".spec.schema"
+ - name: Broker
+ type: string
+ JSONPath: ".spec.broker"
+ - name: Description
+ type: string
+ JSONPath: ".spec.description"
+ - name: Ready
+ type: string
+ JSONPath: ".status.conditions[?(@.type==\"Ready\")].status"
+ - name: Reason
+ type: string
+ JSONPath: ".status.conditions[?(@.type==\"Ready\")].reason"
diff --git a/docs/registry/example_eventtype.yaml b/docs/registry/example_eventtype.yaml
new file mode 100644
index 00000000000..17b12a44545
--- /dev/null
+++ b/docs/registry/example_eventtype.yaml
@@ -0,0 +1,49 @@
+# Copyright 2019 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+
+# This EventType creates an event type with type 'com.github.pull_request', source
+# 'github.com', for the 'default' Broker.
+
+apiVersion: eventing.knative.dev/v1alpha1
+kind: EventType
+metadata:
+ name: com.github.pullrequest
+spec:
+ type: com.github.pull_request
+ source: github.com
+ broker: default
+ description: "GitHub Pull Request"
+
+---
+
+# This Trigger matches all events of type 'com.github.pull_request' and source
+# 'github.com', that are sent to the 'default' Broker.
+
+apiVersion: eventing.knative.dev/v1alpha1
+kind: Trigger
+metadata:
+ name: filtering-event-type
+spec:
+ filter:
+ sourceAndType:
+ type: com.github.pull_request
+ source: github.com
+ broker: default
+ subscriber:
+ ref:
+ apiVersion: serving.knative.dev/v1alpha1
+ kind: Service
+ name: message-dumper
diff --git a/pkg/apis/eventing/v1alpha1/eventtype_defaults.go b/pkg/apis/eventing/v1alpha1/eventtype_defaults.go
new file mode 100644
index 00000000000..eee2cec8c46
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventtype_defaults.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "context"
+
+func (et *EventType) SetDefaults(ctx context.Context) {
+ et.Spec.SetDefaults(ctx)
+}
+
+func (ets *EventTypeSpec) SetDefaults(ctx context.Context) {
+ if ets.Broker == "" {
+ ets.Broker = "default"
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventtype_defaults_test.go b/pkg/apis/eventing/v1alpha1/eventtype_defaults_test.go
new file mode 100644
index 00000000000..6ac447be495
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventtype_defaults_test.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+func TestEventTypeDefaults(t *testing.T) {
+ testCases := map[string]struct {
+ initial EventType
+ expected EventType
+ }{
+ "nil spec": {
+ initial: EventType{},
+ expected: EventType{
+ Spec: EventTypeSpec{
+ Broker: "default",
+ },
+ },
+ },
+ "broker empty": {
+ initial: EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "",
+ Schema: "test-schema",
+ },
+ },
+ expected: EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "default",
+ Schema: "test-schema",
+ },
+ },
+ },
+ "broker not set": {
+ initial: EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Schema: "test-schema",
+ },
+ },
+ expected: EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "default",
+ Schema: "test-schema",
+ },
+ },
+ },
+ }
+ for n, tc := range testCases {
+ t.Run(n, func(t *testing.T) {
+ tc.initial.SetDefaults(context.TODO())
+ if diff := cmp.Diff(tc.expected, tc.initial); diff != "" {
+ t.Fatalf("Unexpected defaults (-want, +got): %s", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventtype_lifecycle.go b/pkg/apis/eventing/v1alpha1/eventtype_lifecycle.go
new file mode 100644
index 00000000000..5cef6862a6c
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventtype_lifecycle.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+
+var eventTypeCondSet = duckv1alpha1.NewLivingConditionSet(EventTypeConditionBrokerExists, EventTypeConditionBrokerReady)
+
+const (
+ EventTypeConditionReady = duckv1alpha1.ConditionReady
+ EventTypeConditionBrokerExists duckv1alpha1.ConditionType = "BrokerExists"
+ EventTypeConditionBrokerReady duckv1alpha1.ConditionType = "BrokerReady"
+)
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (et *EventTypeStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha1.Condition {
+ return eventTypeCondSet.Manage(et).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (et *EventTypeStatus) IsReady() bool {
+ return eventTypeCondSet.Manage(et).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (et *EventTypeStatus) InitializeConditions() {
+ eventTypeCondSet.Manage(et).InitializeConditions()
+}
+
+func (et *EventTypeStatus) MarkBrokerExists() {
+ eventTypeCondSet.Manage(et).MarkTrue(EventTypeConditionBrokerExists)
+}
+
+func (et *EventTypeStatus) MarkBrokerDoesNotExist() {
+ eventTypeCondSet.Manage(et).MarkFalse(EventTypeConditionBrokerExists, "BrokerDoesNotExist", "Broker does not exist")
+}
+
+func (et *EventTypeStatus) MarkBrokerReady() {
+ eventTypeCondSet.Manage(et).MarkTrue(EventTypeConditionBrokerReady)
+}
+
+func (et *EventTypeStatus) MarkBrokerNotReady() {
+ eventTypeCondSet.Manage(et).MarkFalse(EventTypeConditionBrokerReady, "BrokerNotReady", "Broker is not ready")
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventtype_lifecycle_test.go b/pkg/apis/eventing/v1alpha1/eventtype_lifecycle_test.go
new file mode 100644
index 00000000000..d88c3fcb308
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventtype_lifecycle_test.go
@@ -0,0 +1,247 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+)
+
+var (
+ trueValue = true
+ falseValue = false
+)
+
+var (
+ eventTypeConditionReady = duckv1alpha1.Condition{
+ Type: EventTypeConditionReady,
+ Status: corev1.ConditionTrue,
+ }
+
+ eventTypeConditionBrokerExists = duckv1alpha1.Condition{
+ Type: EventTypeConditionBrokerExists,
+ Status: corev1.ConditionTrue,
+ }
+
+ eventTypeConditionBrokerReady = duckv1alpha1.Condition{
+ Type: EventTypeConditionBrokerReady,
+ Status: corev1.ConditionTrue,
+ }
+)
+
+func TestEventTypeGetCondition(t *testing.T) {
+ tests := []struct {
+ name string
+ ets *EventTypeStatus
+ condQuery duckv1alpha1.ConditionType
+ want *duckv1alpha1.Condition
+ }{{
+ name: "single condition",
+ ets: &EventTypeStatus{
+ Status: duckv1alpha1.Status{
+ Conditions: []duckv1alpha1.Condition{
+ eventTypeConditionReady,
+ },
+ },
+ },
+ condQuery: duckv1alpha1.ConditionReady,
+ want: &eventTypeConditionReady,
+ }, {
+ name: "broker exists condition",
+ ets: &EventTypeStatus{
+ Status: duckv1alpha1.Status{
+ Conditions: []duckv1alpha1.Condition{
+ eventTypeConditionBrokerExists,
+ },
+ },
+ },
+ condQuery: EventTypeConditionBrokerExists,
+ want: &eventTypeConditionBrokerExists,
+ }, {
+ name: "multiple conditions, condition true",
+ ets: &EventTypeStatus{
+ Status: duckv1alpha1.Status{
+ Conditions: []duckv1alpha1.Condition{
+ eventTypeConditionBrokerExists,
+ eventTypeConditionBrokerReady,
+ },
+ },
+ },
+ condQuery: EventTypeConditionBrokerReady,
+ want: &eventTypeConditionBrokerReady,
+ }, {
+ name: "unknown condition",
+ ets: &EventTypeStatus{
+ Status: duckv1alpha1.Status{
+ Conditions: []duckv1alpha1.Condition{
+ eventTypeConditionBrokerReady,
+ eventTypeConditionReady,
+ },
+ },
+ },
+ condQuery: duckv1alpha1.ConditionType("foo"),
+ want: nil,
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.ets.GetCondition(test.condQuery)
+ if diff := cmp.Diff(test.want, got); diff != "" {
+ t.Errorf("unexpected condition (-want, +got) = %v", diff)
+ }
+ })
+ }
+}
+
+func TestEventTypeInitializeConditions(t *testing.T) {
+ tests := []struct {
+ name string
+ ets *EventTypeStatus
+ want *EventTypeStatus
+ }{{
+ name: "empty",
+ ets: &EventTypeStatus{},
+ want: &EventTypeStatus{
+ Status: duckv1alpha1.Status{
+ Conditions: []duckv1alpha1.Condition{{
+ Type: EventTypeConditionBrokerExists,
+ Status: corev1.ConditionUnknown,
+ }, {
+ Type: EventTypeConditionBrokerReady,
+ Status: corev1.ConditionUnknown,
+ }, {
+ Type: EventTypeConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ },
+ },
+ },
+ }, {
+ name: "one false",
+ ets: &EventTypeStatus{
+ Status: duckv1alpha1.Status{
+ Conditions: []duckv1alpha1.Condition{{
+ Type: EventTypeConditionBrokerExists,
+ Status: corev1.ConditionFalse,
+ }},
+ },
+ },
+ want: &EventTypeStatus{
+ Status: duckv1alpha1.Status{
+ Conditions: []duckv1alpha1.Condition{{
+ Type: EventTypeConditionBrokerExists,
+ Status: corev1.ConditionFalse,
+ }, {
+ Type: EventTypeConditionBrokerReady,
+ Status: corev1.ConditionUnknown,
+ }, {
+ Type: EventTypeConditionReady,
+ Status: corev1.ConditionUnknown,
+ }},
+ },
+ },
+ }, {
+ name: "one true",
+ ets: &EventTypeStatus{
+ Status: duckv1alpha1.Status{
+ Conditions: []duckv1alpha1.Condition{{
+ Type: EventTypeConditionBrokerReady,
+ Status: corev1.ConditionTrue,
+ }},
+ },
+ },
+ want: &EventTypeStatus{
+ Status: duckv1alpha1.Status{
+ Conditions: []duckv1alpha1.Condition{{
+ Type: EventTypeConditionBrokerExists,
+ Status: corev1.ConditionUnknown,
+ }, {
+ Type: EventTypeConditionBrokerReady,
+ Status: corev1.ConditionTrue,
+ }, {
+ Type: EventTypeConditionReady,
+ Status: corev1.ConditionUnknown,
+ }},
+ },
+ }},
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ test.ets.InitializeConditions()
+ if diff := cmp.Diff(test.want, test.ets, ignoreAllButTypeAndStatus); diff != "" {
+ t.Errorf("unexpected conditions (-want, +got) = %v", diff)
+ }
+ })
+ }
+}
+
+func TestEventTypeIsReady(t *testing.T) {
+ tests := []struct {
+ name string
+ markBrokerExists *bool
+ markBrokerReady *bool
+ wantReady bool
+ }{{
+ name: "all happy",
+ markBrokerExists: &trueValue,
+ markBrokerReady: &trueValue,
+ wantReady: true,
+ }, {
+ name: "broker exist sad",
+ markBrokerExists: &falseValue,
+ markBrokerReady: &trueValue,
+ wantReady: false,
+ }, {
+ name: "broker ready sad",
+ markBrokerExists: &trueValue,
+ markBrokerReady: &falseValue,
+ wantReady: false,
+ }, {
+ name: "all sad",
+ markBrokerExists: &falseValue,
+ markBrokerReady: &falseValue,
+ wantReady: false,
+ }}
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ ets := &EventTypeStatus{}
+ if test.markBrokerExists != nil {
+ if *test.markBrokerExists {
+ ets.MarkBrokerExists()
+ } else {
+ ets.MarkBrokerDoesNotExist()
+ }
+ }
+ if test.markBrokerReady != nil {
+ if *test.markBrokerReady {
+ ets.MarkBrokerReady()
+ } else {
+ ets.MarkBrokerNotReady()
+ }
+ }
+
+ got := ets.IsReady()
+ if test.wantReady != got {
+ t.Errorf("unexpected readiness: want %v, got %v", test.wantReady, got)
+ }
+ })
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventtype_types.go b/pkg/apis/eventing/v1alpha1/eventtype_types.go
new file mode 100644
index 00000000000..51e3ac5c4fd
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventtype_types.go
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2019 The Knative Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package v1alpha1
+
+import (
+ "github.com/knative/pkg/apis"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ "github.com/knative/pkg/webhook"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type EventType struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the EventType.
+ Spec EventTypeSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the EventType.
+ // This data may be out of date.
+ // +optional
+ Status EventTypeStatus `json:"status,omitempty"`
+}
+
+// Check that EventType can be validated, can be defaulted, and has immutable fields.
+var _ apis.Validatable = (*EventType)(nil)
+var _ apis.Defaultable = (*EventType)(nil)
+var _ apis.Immutable = (*EventType)(nil)
+var _ runtime.Object = (*EventType)(nil)
+var _ webhook.GenericCRD = (*EventType)(nil)
+
+type EventTypeSpec struct {
+ // Type represents the CloudEvents type. It is authoritative.
+ Type string `json:"type"`
+ // Source is a URI, it represents the CloudEvents source.
+ Source string `json:"source"`
+ // Schema is a URI, it represents the CloudEvents schemaurl extension attribute.
+ // It may be a JSON schema, a protobuf schema, etc. It is optional.
+ // +optional
+ Schema string `json:"schema,omitempty"`
+ // Broker refers to the Broker that can provide the EventType.
+ Broker string `json:"broker"`
+ // Description is an optional field used to describe the EventType, in any meaningful way.
+ // +optional
+ Description string `json:description,omitempty`
+}
+
+// EventTypeStatus represents the current state of a EventType.
+type EventTypeStatus struct {
+ // inherits duck/v1alpha1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1alpha1.Status `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventTypeList is a collection of EventTypes.
+type EventTypeList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []EventType `json:"items"`
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventtype_validation.go b/pkg/apis/eventing/v1alpha1/eventtype_validation.go
new file mode 100644
index 00000000000..a162f7dc920
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventtype_validation.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+
+ "github.com/google/go-cmp/cmp/cmpopts"
+
+ "github.com/knative/pkg/apis"
+ "github.com/knative/pkg/kmp"
+)
+
+func (et *EventType) Validate(ctx context.Context) *apis.FieldError {
+ return et.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (ets *EventTypeSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+ if ets.Type == "" {
+ fe := apis.ErrMissingField("type")
+ errs = errs.Also(fe)
+ }
+ if ets.Source == "" {
+ // TODO validate is a valid URI.
+ fe := apis.ErrMissingField("source")
+ errs = errs.Also(fe)
+ }
+ if ets.Broker == "" {
+ fe := apis.ErrMissingField("broker")
+ errs = errs.Also(fe)
+ }
+ // TODO validate Schema is a valid URI.
+ return errs
+}
+
+func (et *EventType) CheckImmutableFields(ctx context.Context, og apis.Immutable) *apis.FieldError {
+ if og == nil {
+ return nil
+ }
+
+ original, ok := og.(*EventType)
+ if !ok {
+ return &apis.FieldError{Message: "The provided original was not an EventType"}
+ }
+
+ // All but Description field immutable.
+ ignoreArguments := cmpopts.IgnoreFields(EventTypeSpec{}, "Description")
+ if diff, err := kmp.ShortDiff(original.Spec, et.Spec, ignoreArguments); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff EventType",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
+ return &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: diff,
+ }
+ }
+ return nil
+}
diff --git a/pkg/apis/eventing/v1alpha1/eventtype_validation_test.go b/pkg/apis/eventing/v1alpha1/eventtype_validation_test.go
new file mode 100644
index 00000000000..142429fc826
--- /dev/null
+++ b/pkg/apis/eventing/v1alpha1/eventtype_validation_test.go
@@ -0,0 +1,279 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/knative/pkg/apis"
+)
+
+func TestEventTypeValidation(t *testing.T) {
+ name := "invalid type and source and broker"
+ broker := &EventType{Spec: EventTypeSpec{}}
+
+ want := &apis.FieldError{
+ Paths: []string{"spec.type", "spec.source", "spec.broker"},
+ Message: "missing field(s)",
+ }
+
+ t.Run(name, func(t *testing.T) {
+ got := broker.Validate(context.TODO())
+ if diff := cmp.Diff(want.Error(), got.Error()); diff != "" {
+ t.Errorf("EventType.Validate (-want, +got) = %v", diff)
+ }
+ })
+}
+
+func TestEventTypeSpecValidation(t *testing.T) {
+ tests := []struct {
+ name string
+ ets *EventTypeSpec
+ want *apis.FieldError
+ }{{
+ name: "invalid eventtype spec",
+ ets: &EventTypeSpec{},
+ want: func() *apis.FieldError {
+ fe := apis.ErrMissingField("type", "source", "broker")
+ return fe
+ }(),
+ }, {
+ name: "invalid eventtype type",
+ ets: &EventTypeSpec{
+ Source: "test-source",
+ Broker: "test-broker",
+ },
+ want: func() *apis.FieldError {
+ fe := apis.ErrMissingField("type")
+ return fe
+ }(),
+ }, {
+ name: "invalid eventtype source",
+ ets: &EventTypeSpec{
+ Type: "test-type",
+ Broker: "test-broker",
+ },
+ want: func() *apis.FieldError {
+ fe := apis.ErrMissingField("source")
+ return fe
+ }(),
+ }, {
+ name: "invalid eventtype broker",
+ ets: &EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ },
+ want: func() *apis.FieldError {
+ fe := apis.ErrMissingField("broker")
+ return fe
+ }(),
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.ets.Validate(context.TODO())
+ if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" {
+ t.Errorf("%s: Validate EventTypeSpec (-want, +got) = %v", test.name, diff)
+ }
+ })
+ }
+}
+
+func TestEventTypeImmutableFields(t *testing.T) {
+ tests := []struct {
+ name string
+ current apis.Immutable
+ original apis.Immutable
+ want *apis.FieldError
+ }{{
+ name: "good (no change)",
+ current: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ Schema: "test-schema",
+ },
+ },
+ original: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ Schema: "test-schema",
+ },
+ },
+ want: nil,
+ }, {
+ name: "new nil is ok",
+ current: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ Schema: "test-schema",
+ },
+ },
+ original: nil,
+ want: nil,
+ }, {
+ name: "invalid type",
+ current: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ },
+ },
+ original: &Trigger{},
+ want: &apis.FieldError{
+ Message: "The provided original was not an EventType",
+ },
+ }, {
+ name: "bad (broker change)",
+ current: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ },
+ },
+ original: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "original-broker",
+ },
+ },
+ want: &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: `{v1alpha1.EventTypeSpec}.Broker:
+ -: "original-broker"
+ +: "test-broker"
+`,
+ },
+ }, {
+ name: "bad (type change)",
+ current: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ },
+ },
+ original: &EventType{
+ Spec: EventTypeSpec{
+ Type: "original-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ },
+ },
+ want: &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: `{v1alpha1.EventTypeSpec}.Type:
+ -: "original-type"
+ +: "test-type"
+`,
+ },
+ }, {
+ name: "bad (source change)",
+ current: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ },
+ },
+ original: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "original-source",
+ Broker: "test-broker",
+ },
+ },
+ want: &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: `{v1alpha1.EventTypeSpec}.Source:
+ -: "original-source"
+ +: "test-source"
+`,
+ },
+ }, {
+ name: "bad (schema change)",
+ current: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ Schema: "test-schema",
+ },
+ },
+ original: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ Schema: "original-schema",
+ },
+ },
+ want: &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: `{v1alpha1.EventTypeSpec}.Schema:
+ -: "original-schema"
+ +: "test-schema"
+`,
+ },
+ }, {
+ name: "good (description change)",
+ current: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ Schema: "test-schema",
+ Description: "test-description",
+ },
+ },
+ original: &EventType{
+ Spec: EventTypeSpec{
+ Type: "test-type",
+ Source: "test-source",
+ Broker: "test-broker",
+ Schema: "test-schema",
+ Description: "original-description",
+ },
+ },
+ want: nil,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.current.CheckImmutableFields(context.TODO(), test.original)
+ if diff := cmp.Diff(test.want.Error(), got.Error()); diff != "" {
+ t.Errorf("CheckImmutableFields (-want, +got) = %v", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/apis/eventing/v1alpha1/register.go b/pkg/apis/eventing/v1alpha1/register.go
index fb3a5292623..977cad56aaf 100644
--- a/pkg/apis/eventing/v1alpha1/register.go
+++ b/pkg/apis/eventing/v1alpha1/register.go
@@ -55,6 +55,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&SubscriptionList{},
&Trigger{},
&TriggerList{},
+ &EventType{},
+ &EventTypeList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
diff --git a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
index 9807e6caf2a..efddd131c82 100644
--- a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go
@@ -342,6 +342,100 @@ func (in *ClusterChannelProvisionerStatus) DeepCopy() *ClusterChannelProvisioner
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventType) DeepCopyInto(out *EventType) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventType.
+func (in *EventType) DeepCopy() *EventType {
+ if in == nil {
+ return nil
+ }
+ out := new(EventType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventType) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeList) DeepCopyInto(out *EventTypeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EventType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeList.
+func (in *EventTypeList) DeepCopy() *EventTypeList {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventTypeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeSpec) DeepCopyInto(out *EventTypeSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeSpec.
+func (in *EventTypeSpec) DeepCopy() *EventTypeSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeStatus) DeepCopyInto(out *EventTypeStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeStatus.
+func (in *EventTypeStatus) DeepCopy() *EventTypeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplyStrategy) DeepCopyInto(out *ReplyStrategy) {
*out = *in
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
index 68722de4481..4af33411504 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
@@ -30,6 +30,7 @@ type EventingV1alpha1Interface interface {
BrokersGetter
ChannelsGetter
ClusterChannelProvisionersGetter
+ EventTypesGetter
SubscriptionsGetter
TriggersGetter
}
@@ -51,6 +52,10 @@ func (c *EventingV1alpha1Client) ClusterChannelProvisioners() ClusterChannelProv
return newClusterChannelProvisioners(c)
}
+func (c *EventingV1alpha1Client) EventTypes(namespace string) EventTypeInterface {
+ return newEventTypes(c, namespace)
+}
+
func (c *EventingV1alpha1Client) Subscriptions(namespace string) SubscriptionInterface {
return newSubscriptions(c, namespace)
}
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventtype.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventtype.go
new file mode 100644
index 00000000000..ff71fb35749
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventtype.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ scheme "github.com/knative/eventing/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// EventTypesGetter has a method to return a EventTypeInterface.
+// A group's client should implement this interface.
+type EventTypesGetter interface {
+ EventTypes(namespace string) EventTypeInterface
+}
+
+// EventTypeInterface has methods to work with EventType resources.
+type EventTypeInterface interface {
+ Create(*v1alpha1.EventType) (*v1alpha1.EventType, error)
+ Update(*v1alpha1.EventType) (*v1alpha1.EventType, error)
+ UpdateStatus(*v1alpha1.EventType) (*v1alpha1.EventType, error)
+ Delete(name string, options *v1.DeleteOptions) error
+ DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+ Get(name string, options v1.GetOptions) (*v1alpha1.EventType, error)
+ List(opts v1.ListOptions) (*v1alpha1.EventTypeList, error)
+ Watch(opts v1.ListOptions) (watch.Interface, error)
+ Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EventType, err error)
+ EventTypeExpansion
+}
+
+// eventTypes implements EventTypeInterface
+type eventTypes struct {
+ client rest.Interface
+ ns string
+}
+
+// newEventTypes returns a EventTypes
+func newEventTypes(c *EventingV1alpha1Client, namespace string) *eventTypes {
+ return &eventTypes{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the eventType, and returns the corresponding eventType object, and an error if there is any.
+func (c *eventTypes) Get(name string, options v1.GetOptions) (result *v1alpha1.EventType, err error) {
+ result = &v1alpha1.EventType{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of EventTypes that match those selectors.
+func (c *eventTypes) List(opts v1.ListOptions) (result *v1alpha1.EventTypeList, err error) {
+ result = &v1alpha1.EventTypeList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested eventTypes.
+func (c *eventTypes) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Watch()
+}
+
+// Create takes the representation of a eventType and creates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *eventTypes) Create(eventType *v1alpha1.EventType) (result *v1alpha1.EventType, err error) {
+ result = &v1alpha1.EventType{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Body(eventType).
+ Do().
+ Into(result)
+ return
+}
+
+// Update takes the representation of a eventType and updates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *eventTypes) Update(eventType *v1alpha1.EventType) (result *v1alpha1.EventType, err error) {
+ result = &v1alpha1.EventType{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(eventType.Name).
+ Body(eventType).
+ Do().
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *eventTypes) UpdateStatus(eventType *v1alpha1.EventType) (result *v1alpha1.EventType, err error) {
+ result = &v1alpha1.EventType{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(eventType.Name).
+ SubResource("status").
+ Body(eventType).
+ Do().
+ Into(result)
+ return
+}
+
+// Delete takes name of the eventType and deletes it. Returns an error if one occurs.
+func (c *eventTypes) Delete(name string, options *v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ Body(options).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *eventTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&listOptions, scheme.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Patch applies the patch and returns the patched eventType.
+func (c *eventTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EventType, err error) {
+ result = &v1alpha1.EventType{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("eventtypes").
+ SubResource(subresources...).
+ Name(name).
+ Body(data).
+ Do().
+ Into(result)
+ return
+}
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go
index 52759cda929..75da888a5e6 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventing_client.go
@@ -40,6 +40,10 @@ func (c *FakeEventingV1alpha1) ClusterChannelProvisioners() v1alpha1.ClusterChan
return &FakeClusterChannelProvisioners{c}
}
+func (c *FakeEventingV1alpha1) EventTypes(namespace string) v1alpha1.EventTypeInterface {
+ return &FakeEventTypes{c, namespace}
+}
+
func (c *FakeEventingV1alpha1) Subscriptions(namespace string) v1alpha1.SubscriptionInterface {
return &FakeSubscriptions{c, namespace}
}
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventtype.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventtype.go
new file mode 100644
index 00000000000..4958426561e
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/fake/fake_eventtype.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeEventTypes implements EventTypeInterface
+type FakeEventTypes struct {
+ Fake *FakeEventingV1alpha1
+ ns string
+}
+
+var eventtypesResource = schema.GroupVersionResource{Group: "eventing.knative.dev", Version: "v1alpha1", Resource: "eventtypes"}
+
+var eventtypesKind = schema.GroupVersionKind{Group: "eventing.knative.dev", Version: "v1alpha1", Kind: "EventType"}
+
+// Get takes name of the eventType, and returns the corresponding eventType object, and an error if there is any.
+func (c *FakeEventTypes) Get(name string, options v1.GetOptions) (result *v1alpha1.EventType, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(eventtypesResource, c.ns, name), &v1alpha1.EventType{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventType), err
+}
+
+// List takes label and field selectors, and returns the list of EventTypes that match those selectors.
+func (c *FakeEventTypes) List(opts v1.ListOptions) (result *v1alpha1.EventTypeList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(eventtypesResource, eventtypesKind, c.ns, opts), &v1alpha1.EventTypeList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.EventTypeList{ListMeta: obj.(*v1alpha1.EventTypeList).ListMeta}
+ for _, item := range obj.(*v1alpha1.EventTypeList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested eventTypes.
+func (c *FakeEventTypes) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(eventtypesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a eventType and creates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *FakeEventTypes) Create(eventType *v1alpha1.EventType) (result *v1alpha1.EventType, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(eventtypesResource, c.ns, eventType), &v1alpha1.EventType{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventType), err
+}
+
+// Update takes the representation of a eventType and updates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *FakeEventTypes) Update(eventType *v1alpha1.EventType) (result *v1alpha1.EventType, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(eventtypesResource, c.ns, eventType), &v1alpha1.EventType{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventType), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeEventTypes) UpdateStatus(eventType *v1alpha1.EventType) (*v1alpha1.EventType, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(eventtypesResource, "status", c.ns, eventType), &v1alpha1.EventType{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventType), err
+}
+
+// Delete takes name of the eventType and deletes it. Returns an error if one occurs.
+func (c *FakeEventTypes) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(eventtypesResource, c.ns, name), &v1alpha1.EventType{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeEventTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(eventtypesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.EventTypeList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched eventType.
+func (c *FakeEventTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EventType, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(eventtypesResource, c.ns, name, data, subresources...), &v1alpha1.EventType{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.EventType), err
+}
diff --git a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
index 00ba65313fd..c33c3d484f4 100644
--- a/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
+++ b/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
@@ -24,6 +24,8 @@ type ChannelExpansion interface{}
type ClusterChannelProvisionerExpansion interface{}
+type EventTypeExpansion interface{}
+
type SubscriptionExpansion interface{}
type TriggerExpansion interface{}
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/eventtype.go b/pkg/client/informers/externalversions/eventing/v1alpha1/eventtype.go
new file mode 100644
index 00000000000..52b41ce918e
--- /dev/null
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/eventtype.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ time "time"
+
+ eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ versioned "github.com/knative/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/knative/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// EventTypeInformer provides access to a shared informer and lister for
+// EventTypes.
+type EventTypeInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.EventTypeLister
+}
+
+type eventTypeInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewEventTypeInformer constructs a new informer for EventType type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewEventTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredEventTypeInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredEventTypeInformer constructs a new informer for EventType type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredEventTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1alpha1().EventTypes(namespace).List(options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1alpha1().EventTypes(namespace).Watch(options)
+ },
+ },
+ &eventingv1alpha1.EventType{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *eventTypeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredEventTypeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *eventTypeInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&eventingv1alpha1.EventType{}, f.defaultInformer)
+}
+
+func (f *eventTypeInformer) Lister() v1alpha1.EventTypeLister {
+ return v1alpha1.NewEventTypeLister(f.Informer().GetIndexer())
+}
diff --git a/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go b/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
index 004212a1b29..614c6d2e90a 100644
--- a/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
+++ b/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
@@ -30,6 +30,8 @@ type Interface interface {
Channels() ChannelInformer
// ClusterChannelProvisioners returns a ClusterChannelProvisionerInformer.
ClusterChannelProvisioners() ClusterChannelProvisionerInformer
+ // EventTypes returns a EventTypeInformer.
+ EventTypes() EventTypeInformer
// Subscriptions returns a SubscriptionInformer.
Subscriptions() SubscriptionInformer
// Triggers returns a TriggerInformer.
@@ -62,6 +64,11 @@ func (v *version) ClusterChannelProvisioners() ClusterChannelProvisionerInformer
return &clusterChannelProvisionerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}
+// EventTypes returns a EventTypeInformer.
+func (v *version) EventTypes() EventTypeInformer {
+ return &eventTypeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
// Subscriptions returns a SubscriptionInformer.
func (v *version) Subscriptions() SubscriptionInformer {
return &subscriptionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go
index 48066085062..fbcb3b3bcba 100644
--- a/pkg/client/informers/externalversions/generic.go
+++ b/pkg/client/informers/externalversions/generic.go
@@ -60,6 +60,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().Channels().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("clusterchannelprovisioners"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().ClusterChannelProvisioners().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("eventtypes"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().EventTypes().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("subscriptions"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().Subscriptions().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("triggers"):
diff --git a/pkg/client/listers/eventing/v1alpha1/eventtype.go b/pkg/client/listers/eventing/v1alpha1/eventtype.go
new file mode 100644
index 00000000000..ad549bd981b
--- /dev/null
+++ b/pkg/client/listers/eventing/v1alpha1/eventtype.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// EventTypeLister helps list EventTypes.
+type EventTypeLister interface {
+ // List lists all EventTypes in the indexer.
+ List(selector labels.Selector) (ret []*v1alpha1.EventType, err error)
+ // EventTypes returns an object that can list and get EventTypes.
+ EventTypes(namespace string) EventTypeNamespaceLister
+ EventTypeListerExpansion
+}
+
+// eventTypeLister implements the EventTypeLister interface.
+type eventTypeLister struct {
+ indexer cache.Indexer
+}
+
+// NewEventTypeLister returns a new EventTypeLister.
+func NewEventTypeLister(indexer cache.Indexer) EventTypeLister {
+ return &eventTypeLister{indexer: indexer}
+}
+
+// List lists all EventTypes in the indexer.
+func (s *eventTypeLister) List(selector labels.Selector) (ret []*v1alpha1.EventType, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.EventType))
+ })
+ return ret, err
+}
+
+// EventTypes returns an object that can list and get EventTypes.
+func (s *eventTypeLister) EventTypes(namespace string) EventTypeNamespaceLister {
+ return eventTypeNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// EventTypeNamespaceLister helps list and get EventTypes.
+type EventTypeNamespaceLister interface {
+ // List lists all EventTypes in the indexer for a given namespace.
+ List(selector labels.Selector) (ret []*v1alpha1.EventType, err error)
+ // Get retrieves the EventType from the indexer for a given namespace and name.
+ Get(name string) (*v1alpha1.EventType, error)
+ EventTypeNamespaceListerExpansion
+}
+
+// eventTypeNamespaceLister implements the EventTypeNamespaceLister
+// interface.
+type eventTypeNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all EventTypes in the indexer for a given namespace.
+func (s eventTypeNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.EventType, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.EventType))
+ })
+ return ret, err
+}
+
+// Get retrieves the EventType from the indexer for a given namespace and name.
+func (s eventTypeNamespaceLister) Get(name string) (*v1alpha1.EventType, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("eventtype"), name)
+ }
+ return obj.(*v1alpha1.EventType), nil
+}
diff --git a/pkg/client/listers/eventing/v1alpha1/expansion_generated.go b/pkg/client/listers/eventing/v1alpha1/expansion_generated.go
index 4bd2b3c78eb..5f34cbf332d 100644
--- a/pkg/client/listers/eventing/v1alpha1/expansion_generated.go
+++ b/pkg/client/listers/eventing/v1alpha1/expansion_generated.go
@@ -38,6 +38,14 @@ type ChannelNamespaceListerExpansion interface{}
// ClusterChannelProvisionerLister.
type ClusterChannelProvisionerListerExpansion interface{}
+// EventTypeListerExpansion allows custom methods to be added to
+// EventTypeLister.
+type EventTypeListerExpansion interface{}
+
+// EventTypeNamespaceListerExpansion allows custom methods to be added to
+// EventTypeNamespaceLister.
+type EventTypeNamespaceListerExpansion interface{}
+
// SubscriptionListerExpansion allows custom methods to be added to
// SubscriptionLister.
type SubscriptionListerExpansion interface{}
diff --git a/pkg/reconciler/eventtype/eventtype.go b/pkg/reconciler/eventtype/eventtype.go
new file mode 100644
index 00000000000..a1aa28113a8
--- /dev/null
+++ b/pkg/reconciler/eventtype/eventtype.go
@@ -0,0 +1,211 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package eventtype
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/knative/eventing/pkg/utils"
+ "github.com/knative/pkg/tracker"
+
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ listers "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/logging"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/pkg/controller"
+ "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
+)
+
+const (
+ // ReconcilerName is the name of the reconciler.
+ ReconcilerName = "EventTypes"
+ // controllerAgentName is the string used by this controller to identify
+ // itself when creating events.
+ controllerAgentName = "eventtype-controller"
+
+ // Name of the corev1.Events emitted from the reconciliation process.
+ eventTypeReadinessChanged = "EventTypeReadinessChanged"
+ eventTypeReconcileFailed = "EventTypeReconcileFailed"
+ eventTypeUpdateStatusFailed = "EventTypeUpdateStatusFailed"
+)
+
+type Reconciler struct {
+ *reconciler.Base
+
+ // listers index properties about resources
+ eventTypeLister listers.EventTypeLister
+ brokerLister listers.BrokerLister
+ tracker tracker.Interface
+}
+
+var brokerGVK = v1alpha1.SchemeGroupVersion.WithKind("Broker")
+
+// Check that our Reconciler implements controller.Reconciler
+var _ controller.Reconciler = (*Reconciler)(nil)
+
+// NewController initializes the controller and is called by the generated code
+// Registers event handlers to enqueue events
+func NewController(
+ opt reconciler.Options,
+ eventTypeInformer eventinginformers.EventTypeInformer,
+ brokerInformer eventinginformers.BrokerInformer,
+) *controller.Impl {
+
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ eventTypeLister: eventTypeInformer.Lister(),
+ brokerLister: brokerInformer.Lister(),
+ }
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ r.Logger.Info("Setting up event handlers")
+ eventTypeInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
+
+ // Tracker is used to notify us that a EventType's Broker has changed so that
+ // we can reconcile.
+ r.tracker = tracker.New(impl.EnqueueKey, opt.GetTrackerLease())
+ brokerInformer.Informer().AddEventHandler(reconciler.Handler(
+ controller.EnsureTypeMeta(
+ r.tracker.OnChanged,
+ v1alpha1.SchemeGroupVersion.WithKind("Broker"),
+ ),
+ ))
+
+ return impl
+}
+
+// Reconcile compares the actual state with the desired, and attempts to
+// converge the two. It then updates the Status block of the EventType resource
+// with the current status of the resource.
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ // Convert the namespace/name string into a distinct namespace and name
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
+ }
+ ctx = logging.WithLogger(ctx, r.Logger.Desugar().With(zap.String("key", key)))
+
+ // Get the EventType resource with this namespace/name
+ original, err := r.eventTypeLister.EventTypes(namespace).Get(name)
+ if apierrs.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ logging.FromContext(ctx).Info("eventType key in work queue no longer exists")
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // Don't modify the informers copy
+ eventType := original.DeepCopy()
+
+ // Reconcile this copy of the EventType and then write back any status
+ // updates regardless of whether the reconcile error out.
+ reconcileErr := r.reconcile(ctx, eventType)
+ if reconcileErr != nil {
+ logging.FromContext(ctx).Warn("Error reconciling Broker", zap.Error(err))
+ r.Recorder.Eventf(eventType, corev1.EventTypeWarning, eventTypeReconcileFailed, fmt.Sprintf("EventType reconcile error: %v", reconcileErr))
+ } else {
+ logging.FromContext(ctx).Debug("EventType reconciled")
+ }
+
+ if _, err = r.updateStatus(ctx, eventType); err != nil {
+ logging.FromContext(ctx).Warn("Failed to update the EventType status", zap.Error(err))
+ r.Recorder.Eventf(eventType, corev1.EventTypeWarning, eventTypeUpdateStatusFailed, "Failed to update Broker's status: %v", err)
+ return err
+ }
+
+ // Requeue if the resource is not ready:
+ return reconcileErr
+}
+
+func (r *Reconciler) reconcile(ctx context.Context, et *v1alpha1.EventType) error {
+ et.Status.InitializeConditions()
+
+ // 1. Verify the Broker exists.
+ // 2. Verify the Broker is ready.
+
+ if et.DeletionTimestamp != nil {
+ // Everything is cleaned up by the garbage collector.
+ return nil
+ }
+
+ b, err := r.getBroker(ctx, et)
+ if err != nil {
+ logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err))
+ et.Status.MarkBrokerDoesNotExist()
+ return err
+ }
+ et.Status.MarkBrokerExists()
+
+ // Tell tracker to reconcile this EventType whenever the Broker changes.
+ if err = r.tracker.Track(utils.ObjectRef(b, brokerGVK), et); err != nil {
+ logging.FromContext(ctx).Error("Unable to track changes to Broker", zap.Error(err))
+ return err
+ }
+
+ if !b.Status.IsReady() {
+ logging.FromContext(ctx).Error("Broker is not ready", zap.String("broker", b.Name))
+ et.Status.MarkBrokerNotReady()
+ return nil
+ }
+ et.Status.MarkBrokerReady()
+
+ return nil
+}
+
+// updateStatus updates the EventType's status.
+func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.EventType) (*v1alpha1.EventType, error) {
+ eventType, err := r.eventTypeLister.EventTypes(desired.Namespace).Get(desired.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If there's nothing to update, just return.
+ if reflect.DeepEqual(eventType.Status, desired.Status) {
+ return eventType, nil
+ }
+
+ becomesReady := desired.Status.IsReady() && !eventType.Status.IsReady()
+
+ // Don't modify the informers copy.
+ existing := eventType.DeepCopy()
+ existing.Status = desired.Status
+
+ et, err := r.EventingClientSet.EventingV1alpha1().EventTypes(desired.Namespace).UpdateStatus(existing)
+ if err == nil && becomesReady {
+ duration := time.Since(et.ObjectMeta.CreationTimestamp.Time)
+ logging.FromContext(ctx).Sugar().Infof("EventType %q became ready after %v", eventType.Name, duration)
+ r.Recorder.Event(eventType, corev1.EventTypeNormal, eventTypeReadinessChanged, fmt.Sprintf("EventType %q became ready", eventType.Name))
+ //r.StatsReporter.ReportServiceReady(eventType.Namespace, eventType.Name, duration) // TODO: stats
+ }
+
+ return et, err
+}
+
+// getBroker returns the Broker for EventType 'et' if it exists, otherwise it returns an error.
+func (r *Reconciler) getBroker(ctx context.Context, et *v1alpha1.EventType) (*v1alpha1.Broker, error) {
+ return r.brokerLister.Brokers(et.Namespace).Get(et.Spec.Broker)
+}
diff --git a/pkg/reconciler/eventtype/eventtype_test.go b/pkg/reconciler/eventtype/eventtype_test.go
new file mode 100644
index 00000000000..1ea039e539f
--- /dev/null
+++ b/pkg/reconciler/eventtype/eventtype_test.go
@@ -0,0 +1,165 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package eventtype
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/knative/pkg/tracker"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/reconciler"
+ . "github.com/knative/eventing/pkg/reconciler/testing"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+ . "github.com/knative/pkg/reconciler/testing"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/kubernetes/scheme"
+ clientgotesting "k8s.io/client-go/testing"
+)
+
+const (
+ testNS = "test-namespace"
+ eventTypeName = "test-eventtype"
+ eventTypeType = "test-type"
+ eventTypeBroker = "test-broker"
+ eventTypeSource = "/test-source"
+)
+
+var (
+ trueVal = true
+
+ testKey = fmt.Sprintf("%s/%s", testNS, eventTypeName)
+)
+
+func init() {
+ // Add types to scheme
+ _ = v1alpha1.AddToScheme(scheme.Scheme)
+}
+
+func TestReconcile(t *testing.T) {
+ table := TableTest{
+ {
+ Name: "bad workqueue key",
+ // Make sure Reconcile handles bad keys.
+ Key: "too/many/parts",
+ }, {
+ Name: "key not found",
+ // Make sure Reconcile handles good keys that don't exist.
+ Key: "foo/not-found",
+ },
+ {
+ Name: "EventType not found",
+ Key: testKey,
+ },
+ {
+ Name: "EventType being deleted",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewEventType(eventTypeName, testNS,
+ WithInitEventTypeConditions,
+ WithEventTypeDeletionTimestamp),
+ },
+ },
+ {
+ Name: "Broker not found",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewEventType(eventTypeName, testNS,
+ WithEventTypeType(eventTypeType),
+ WithEventTypeSource(eventTypeSource),
+ WithEventTypeBroker(eventTypeBroker),
+ ),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewEventType(eventTypeName, testNS,
+ WithEventTypeType(eventTypeType),
+ WithEventTypeSource(eventTypeSource),
+ WithEventTypeBroker(eventTypeBroker),
+ WithInitEventTypeConditions,
+ WithEventTypeBrokerDoesNotExist,
+ ),
+ }},
+ WantErr: true,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeWarning, eventTypeReconcileFailed, "EventType reconcile error: broker.eventing.knative.dev %q not found", eventTypeBroker),
+ },
+ },
+ {
+ Name: "Broker not ready",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewEventType(eventTypeName, testNS,
+ WithEventTypeType(eventTypeType),
+ WithEventTypeSource(eventTypeSource),
+ WithEventTypeBroker(eventTypeBroker),
+ ),
+ NewBroker(eventTypeBroker, testNS,
+ WithInitBrokerConditions,
+ ),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewEventType(eventTypeName, testNS,
+ WithEventTypeType(eventTypeType),
+ WithEventTypeSource(eventTypeSource),
+ WithEventTypeBroker(eventTypeBroker),
+ WithEventTypeBrokerExists,
+ WithEventTypeBrokerNotReady,
+ ),
+ }},
+ },
+ {
+ Name: "Successful reconcile, became ready",
+ Key: testKey,
+ Objects: []runtime.Object{
+ NewEventType(eventTypeName, testNS,
+ WithEventTypeType(eventTypeType),
+ WithEventTypeSource(eventTypeSource),
+ WithEventTypeBroker(eventTypeBroker),
+ ),
+ NewBroker(eventTypeBroker, testNS,
+ WithBrokerReady,
+ ),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewEventType(eventTypeName, testNS,
+ WithEventTypeType(eventTypeType),
+ WithEventTypeSource(eventTypeSource),
+ WithEventTypeBroker(eventTypeBroker),
+ WithEventTypeBrokerExists,
+ WithEventTypeBrokerReady,
+ ),
+ }},
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, eventTypeReadinessChanged, "EventType %q became ready", eventTypeName),
+ },
+ },
+ }
+
+ defer logtesting.ClearAll()
+ table.Test(t, MakeFactory(func(listers *Listers, opt reconciler.Options) controller.Reconciler {
+ return &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ eventTypeLister: listers.GetEventTypeLister(),
+ brokerLister: listers.GetBrokerLister(),
+ tracker: tracker.New(func(string) {}, 0),
+ }
+ }))
+
+}
diff --git a/pkg/reconciler/testing/eventtype.go b/pkg/reconciler/testing/eventtype.go
new file mode 100644
index 00000000000..828659684f6
--- /dev/null
+++ b/pkg/reconciler/testing/eventtype.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "context"
+ "time"
+
+ "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EventTypeOption enables further configuration of an EventType.
+type EventTypeOption func(*v1alpha1.EventType)
+
+// NewEventType creates a EventType with EventTypeOptions.
+func NewEventType(name, namespace string, o ...EventTypeOption) *v1alpha1.EventType {
+ et := &v1alpha1.EventType{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: name,
+ },
+ }
+ for _, opt := range o {
+ opt(et)
+ }
+ et.SetDefaults(context.Background())
+ return et
+}
+
+// WithInitEventTypeConditions initializes the EventType's conditions.
+func WithInitEventTypeConditions(et *v1alpha1.EventType) {
+ et.Status.InitializeConditions()
+}
+
+func WithEventTypeSource(source string) EventTypeOption {
+ return func(et *v1alpha1.EventType) {
+ et.Spec.Source = source
+ }
+}
+
+func WithEventTypeType(t string) EventTypeOption {
+ return func(et *v1alpha1.EventType) {
+ et.Spec.Type = t
+ }
+}
+
+func WithEventTypeBroker(broker string) EventTypeOption {
+ return func(et *v1alpha1.EventType) {
+ et.Spec.Broker = broker
+ }
+}
+
+func WithEventTypeDeletionTimestamp(et *v1alpha1.EventType) {
+ t := metav1.NewTime(time.Unix(1e9, 0))
+ et.ObjectMeta.SetDeletionTimestamp(&t)
+}
+
+// WithEventTypeBrokerNotFound calls .Status.MarkFilterFailed on the EventType.
+func WithEventTypeBrokerDoesNotExist(et *v1alpha1.EventType) {
+ et.Status.MarkBrokerDoesNotExist()
+}
+
+// WithEventTypeBrokerExists calls .Status.MarkBrokerExists on the EventType.
+func WithEventTypeBrokerExists(et *v1alpha1.EventType) {
+ et.Status.MarkBrokerExists()
+}
+
+// WithEventTypeBrokerNotReady calls .Status.MarkBrokerNotReady on the EventType.
+func WithEventTypeBrokerNotReady(et *v1alpha1.EventType) {
+ et.Status.MarkBrokerNotReady()
+}
+
+// WithEventTypeBrokerReady calls .Status.MarkBrokerReady on the EventType.
+func WithEventTypeBrokerReady(et *v1alpha1.EventType) {
+ et.Status.MarkBrokerReady()
+}
diff --git a/pkg/reconciler/testing/listers.go b/pkg/reconciler/testing/listers.go
index bf17f6dd67f..d84b97755ba 100644
--- a/pkg/reconciler/testing/listers.go
+++ b/pkg/reconciler/testing/listers.go
@@ -108,6 +108,10 @@ func (l *Listers) GetBrokerLister() eventinglisters.BrokerLister {
return eventinglisters.NewBrokerLister(l.indexerFor(&eventingv1alpha1.Broker{}))
}
+func (l *Listers) GetEventTypeLister() eventinglisters.EventTypeLister {
+ return eventinglisters.NewEventTypeLister(l.indexerFor(&eventingv1alpha1.EventType{}))
+}
+
func (l *Listers) GetChannelLister() eventinglisters.ChannelLister {
return eventinglisters.NewChannelLister(l.indexerFor(&eventingv1alpha1.Channel{}))
}
diff --git a/pkg/reconciler/trigger/trigger.go b/pkg/reconciler/trigger/trigger.go
index 473ce7c8e4f..d046ce10a9f 100644
--- a/pkg/reconciler/trigger/trigger.go
+++ b/pkg/reconciler/trigger/trigger.go
@@ -23,6 +23,8 @@ import (
"reflect"
"time"
+ "github.com/knative/eventing/pkg/utils"
+
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
listers "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
@@ -206,7 +208,7 @@ func (r *Reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error {
t.Status.PropagateBrokerStatus(&b.Status)
// Tell tracker to reconcile this Trigger whenever the Broker changes.
- if err = r.tracker.Track(objectRef(b, brokerGVK), t); err != nil {
+ if err = r.tracker.Track(utils.ObjectRef(b, brokerGVK), t); err != nil {
logging.FromContext(ctx).Error("Unable to track changes to Broker", zap.Error(err))
return err
}
@@ -406,23 +408,3 @@ func (r *Reconciler) getSubscription(ctx context.Context, t *v1alpha1.Trigger) (
return nil, apierrs.NewNotFound(schema.GroupResource{}, "")
}
-
-type accessor interface {
- GroupVersionKind() schema.GroupVersionKind
- GetNamespace() string
- GetName() string
-}
-
-func objectRef(a accessor, gvk schema.GroupVersionKind) corev1.ObjectReference {
- // We can't always rely on the TypeMeta being populated.
- // See: https://github.com/knative/serving/issues/2372
- // Also: https://github.com/kubernetes/apiextensions-apiserver/issues/29
- // gvk := a.GroupVersionKind()
- apiVersion, kind := gvk.ToAPIVersionAndKind()
- return corev1.ObjectReference{
- APIVersion: apiVersion,
- Kind: kind,
- Namespace: a.GetNamespace(),
- Name: a.GetName(),
- }
-}
diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go
index 33482fc0523..1f9bc2ec1e8 100644
--- a/pkg/utils/utils.go
+++ b/pkg/utils/utils.go
@@ -22,6 +22,10 @@ import (
"os"
"strings"
"sync"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
)
const (
@@ -67,3 +71,16 @@ func getClusterDomainName(r io.Reader) string {
// For all abnormal cases return default domain name
return defaultDomainName
}
+
+func ObjectRef(obj metav1.Object, gvk schema.GroupVersionKind) corev1.ObjectReference {
+ // We can't always rely on the TypeMeta being populated.
+ // See: https://github.com/knative/serving/issues/2372
+ // Also: https://github.com/kubernetes/apiextensions-apiserver/issues/29
+ apiVersion, kind := gvk.ToAPIVersionAndKind()
+ return corev1.ObjectReference{
+ APIVersion: apiVersion,
+ Kind: kind,
+ Namespace: obj.GetNamespace(),
+ Name: obj.GetName(),
+ }
+}
From 285b11a6366c0a745b3c7cf6528b63b372237fa4 Mon Sep 17 00:00:00 2001
From: Akash Verenkar <45154452+akashrv@users.noreply.github.com>
Date: Mon, 29 Apr 2019 10:06:31 -0700
Subject: [PATCH 65/76] Remove Istio dependency from Eventing (Part - 4) -
gcp-pubsub (#1107)
* WIP
* WIP - In-memory working with E2E tests
* WIP - remove istio dependency from in-memroy channel
* UTs pass, E2E tests pass with in-memory as well as kafka
* fixed uts that failed due to last K8s service change
* Removed unnecessary space from a line
* dding istio annotation to test POD. This will ve needed when running E2E
tests against channels other than in-memory
* Bug fix to set clusterIp of K8s service only when it is not of type ExternalName
* WIP kafka channel
* WIP kafka - UTs and E2E pass
More UTs needded
* Updated code based on PR comments
* WIP
* Updates based on PR comments
* Updates based on PR comments
* Fixed UTs
* Updated VENDOR_LICENSE
* WIP. Update fanout sidecar
* Merge from upstream master
* UTs pass, ITs passed. COde ready for PR
* Update natss to not use ISTIO. UTs and E2E tests pass.
* Updates based on PR comments
* REmoved permission to istio virtual service from controller
* WIP
* Changes based on PR comments
* Added back permission that was removed by mistake
* WIP
* Remove istio references
* WIP
* Removed one more reference of istio
* Revert kafka.yaml local change
* WIP
* Revert kafka dispatcher change
* Removing Mutex. No need to use Mutex when using atomic value for hostToChannelMap
* Removed named port from GCP dispatcher K8s service
* WIP
* FInal changes before validating E2E tests
* Updates based on PR comments
---
config/200-controller-clusterrole.yaml | 7 -
contrib/gcppubsub/config/gcppubsub.yaml | 17 +-
.../pkg/controller/channel/controller.go | 10 -
.../pkg/controller/channel/reconcile.go | 63 ++----
.../pkg/controller/channel/reconcile_test.go | 210 +++---------------
contrib/gcppubsub/pkg/controller/cmd/main.go | 7 +-
contrib/gcppubsub/pkg/dispatcher/cmd/main.go | 13 +-
.../pkg/dispatcher/dispatcher/controller.go | 11 +-
.../pkg/dispatcher/dispatcher/reconcile.go | 30 +--
.../dispatcher/dispatcher/reconcile_test.go | 67 ++++--
.../pkg/dispatcher/receiver/receiver.go | 55 ++++-
.../pkg/dispatcher/receiver/receiver_test.go | 158 ++++++++++---
.../pkg/dispatcher/dispatcher/dispatcher.go | 21 +-
pkg/channelwatcher/channel_watcher.go | 2 +-
pkg/provisioners/channel_util.go | 19 ++
.../inmemory/channel/reconcile_test.go | 2 -
pkg/provisioners/inmemory/controller/main.go | 2 -
17 files changed, 346 insertions(+), 348 deletions(-)
diff --git a/config/200-controller-clusterrole.yaml b/config/200-controller-clusterrole.yaml
index 60afacc783d..d6772c6d6cd 100644
--- a/config/200-controller-clusterrole.yaml
+++ b/config/200-controller-clusterrole.yaml
@@ -35,13 +35,6 @@ rules:
- "patch"
- "watch"
- # Channels and Triggers both manipulate VirtualServices.
- - apiGroups:
- - "networking.istio.io"
- resources:
- - "virtualservices"
- verbs: *everything
-
# Brokers and the namespace annotation controllers manipulate Deployments.
- apiGroups:
- "apps"
diff --git a/contrib/gcppubsub/config/gcppubsub.yaml b/contrib/gcppubsub/config/gcppubsub.yaml
index c05493180c8..a1807aa9851 100644
--- a/contrib/gcppubsub/config/gcppubsub.yaml
+++ b/contrib/gcppubsub/config/gcppubsub.yaml
@@ -61,16 +61,6 @@ rules:
verbs:
- create
- update
- - apiGroups:
- - networking.istio.io
- resources:
- - virtualservices
- verbs:
- - get
- - list
- - watch
- - create
- - update
- apiGroups:
- "" # Core API Group.
resources:
@@ -240,15 +230,12 @@ spec:
clusterChannelProvisioner: gcp-pubsub
role: dispatcher
ports:
- - name: http
- protocol: TCP
+ - protocol: TCP
port: 80
targetPort: 8080
---
-
-# Needed by the GCP PubSub Channel to communicate with GCP PubSub.
-
+# Needed by the GCP PubSub Channel to communicate with GCP PubSub.
apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
diff --git a/contrib/gcppubsub/pkg/controller/channel/controller.go b/contrib/gcppubsub/pkg/controller/channel/controller.go
index 847419fd60c..3248d2bbaed 100644
--- a/contrib/gcppubsub/pkg/controller/channel/controller.go
+++ b/contrib/gcppubsub/pkg/controller/channel/controller.go
@@ -19,7 +19,6 @@ package channel
import (
pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -74,15 +73,6 @@ func ProvideController(defaultGcpProject string, defaultSecret *corev1.ObjectRef
return nil, err
}
- // Watch the VirtualServices that are owned by Channels.
- err = c.Watch(&source.Kind{
- Type: &istiov1alpha3.VirtualService{},
- }, &handler.EnqueueRequestForOwner{OwnerType: &eventingv1alpha1.Channel{}, IsController: true})
- if err != nil {
- logger.Error("Unable to watch VirtualServices.", zap.Error(err))
- return nil, err
- }
-
return c, nil
}
}
diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile.go b/contrib/gcppubsub/pkg/controller/channel/reconcile.go
index aa1dc66c7be..42c9d5c6522 100644
--- a/contrib/gcppubsub/pkg/controller/channel/reconcile.go
+++ b/contrib/gcppubsub/pkg/controller/channel/reconcile.go
@@ -19,10 +19,10 @@ package channel
import (
"context"
"fmt"
- "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
ccpcontroller "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner"
pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util"
+ "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/logging"
util "github.com/knative/eventing/pkg/provisioners"
@@ -49,21 +49,20 @@ const (
noNeedToPersist
// Name of the corev1.Events emitted from the reconciliation process
- channelReconciled = "ChannelReconciled"
- channelUpdateStatusFailed = "ChannelUpdateStatusFailed"
- channelReadStatusFailed = "ChannelReadStatusFailed"
- gcpCredentialsReadFailed = "GcpCredentialsReadFailed"
- gcpResourcesPlanFailed = "GcpResourcesPlanFailed"
- gcpResourcesPersistFailed = "GcpResourcesPersistFailed"
- virtualServiceCreateFailed = "VirtualServiceCreateFailed"
- k8sServiceCreateFailed = "K8sServiceCreateFailed"
- topicCreateFailed = "TopicCreateFailed"
- topicDeleteFailed = "TopicDeleteFailed"
- subscriptionSyncFailed = "SubscriptionSyncFailed"
- subscriptionDeleteFailed = "SubscriptionDeleteFailed"
+ channelReconciled = "ChannelReconciled"
+ channelUpdateStatusFailed = "ChannelUpdateStatusFailed"
+ channelReadStatusFailed = "ChannelReadStatusFailed"
+ gcpCredentialsReadFailed = "GcpCredentialsReadFailed"
+ gcpResourcesPlanFailed = "GcpResourcesPlanFailed"
+ gcpResourcesPersistFailed = "GcpResourcesPersistFailed"
+ k8sServiceCreateFailed = "K8sServiceCreateFailed"
+ topicCreateFailed = "TopicCreateFailed"
+ topicDeleteFailed = "TopicDeleteFailed"
+ subscriptionSyncFailed = "SubscriptionSyncFailed"
+ subscriptionDeleteFailed = "SubscriptionDeleteFailed"
)
-// reconciler reconciles GCP-PubSub Channels by creating the K8s Service and Istio VirtualService
+// reconciler reconciles GCP-PubSub Channels by creating the K8s Service (ExternalName)
// allowing other processes to send data to them. It also creates the GCP PubSub Topics (one per
// Channel) and GCP PubSub Subscriptions (one per Subscriber).
type reconciler struct {
@@ -116,7 +115,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
}
// Does this Controller control this Channel?
- if !r.shouldReconcile(c) {
+ if !ShouldReconcile(c) {
logging.FromContext(ctx).Info("Not reconciling Channel, it is not controlled by this Controller", zap.Any("ref", c.Spec))
return reconcile.Result{}, nil
}
@@ -147,9 +146,9 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
}, reconcileErr
}
-// shouldReconcile determines if this Controller should control (and therefore reconcile) a given
+// ShouldReconcile determines if this Controller should control (and therefore reconcile) a given
// Channel. This Controller only handles gcp-pubsub channels.
-func (r *reconciler) shouldReconcile(c *eventingv1alpha1.Channel) bool {
+func ShouldReconcile(c *eventingv1alpha1.Channel) bool {
if c.Spec.Provisioner != nil {
return ccpcontroller.IsControlled(c.Spec.Provisioner)
}
@@ -162,11 +161,10 @@ func (r *reconciler) shouldReconcile(c *eventingv1alpha1.Channel) bool {
func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel) (bool, error) {
c.Status.InitializeConditions()
- // We are syncing four things:
- // 1. The K8s Service to talk to this Channel.
- // 2. The Istio VirtualService to talk to this Channel.
- // 3. The GCP PubSub Topic (one for the Channel).
- // 4. The GCP PubSub Subscriptions (one for each Subscriber of the Channel).
+ // We are syncing the following:
+ // - The K8s Service to talk to this Channel.
+ // - The GCP PubSub Topic (one for the Channel).
+ // - The GCP PubSub Subscriptions (one for each Subscriber of the Channel).
// First we will plan all the names out for steps 3 and 4 persist them to status.internal. Then, on a
// subsequent reconcile, we manipulate all the GCP resources in steps 3 and 4.
@@ -187,7 +185,7 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
}
if c.DeletionTimestamp != nil {
- // K8s garbage collection will delete the K8s service and VirtualService for this channel.
+ // K8s garbage collection will delete the K8s service for this channel.
// All the subs should be deleted.
subsToSync := &syncSubs{
subsToDelete: originalPCS.Subscriptions,
@@ -232,18 +230,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel)
return true, nil
}
- svc, err := r.createK8sService(ctx, c)
+ _, err = r.createK8sService(ctx, c)
if err != nil {
r.recorder.Eventf(c, corev1.EventTypeWarning, k8sServiceCreateFailed, "Failed to reconcile Channel's K8s Service: %v", err)
return false, err
}
- err = r.createVirtualService(ctx, c, svc)
- if err != nil {
- r.recorder.Eventf(c, corev1.EventTypeWarning, virtualServiceCreateFailed, "Failed to reconcile Virtual Service for the Channel: %v", err)
- return false, err
- }
-
topic, err := r.createTopic(ctx, plannedPCS, gcpCreds)
if err != nil {
r.recorder.Eventf(c, corev1.EventTypeWarning, topicCreateFailed, "Failed to reconcile Topic for the Channel: %v", err)
@@ -360,7 +352,7 @@ func (r *reconciler) planGcpResources(ctx context.Context, c *eventingv1alpha1.C
}
func (r *reconciler) createK8sService(ctx context.Context, c *eventingv1alpha1.Channel) (*corev1.Service, error) {
- svc, err := util.CreateK8sService(ctx, r.client, c)
+ svc, err := util.CreateK8sService(ctx, r.client, c, util.ExternalService(c))
if err != nil {
logging.FromContext(ctx).Info("Error creating the Channel's K8s Service", zap.Error(err))
return nil, err
@@ -370,15 +362,6 @@ func (r *reconciler) createK8sService(ctx context.Context, c *eventingv1alpha1.C
return svc, nil
}
-func (r *reconciler) createVirtualService(ctx context.Context, c *eventingv1alpha1.Channel, svc *corev1.Service) error {
- _, err := util.CreateVirtualService(ctx, r.client, c, svc)
- if err != nil {
- logging.FromContext(ctx).Info("Error creating the Virtual Service for the Channel", zap.Error(err))
- return err
- }
- return nil
-}
-
func (r *reconciler) createTopic(ctx context.Context, plannedPCS *pubsubutil.GcpPubSubChannelStatus, gcpCreds *google.Credentials) (pubsubutil.PubSubTopic, error) {
psc, err := r.pubSubClientCreator(ctx, gcpCreds, plannedPCS.GCPProject)
if err != nil {
diff --git a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go
index b041972563b..1a6dd3db175 100644
--- a/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go
+++ b/contrib/gcppubsub/pkg/controller/channel/reconcile_test.go
@@ -22,31 +22,26 @@ import (
"fmt"
"testing"
- "k8s.io/apimachinery/pkg/types"
-
pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util"
-
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
-
- "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
-
- "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds"
-
"github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub"
-
+ "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds"
+ "github.com/knative/eventing/pkg/apis/duck/v1alpha1"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
util "github.com/knative/eventing/pkg/provisioners"
+ "github.com/knative/eventing/pkg/reconciler/names"
controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
"github.com/knative/eventing/pkg/utils"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ "github.com/knative/pkg/system"
_ "github.com/knative/pkg/system/testing"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
@@ -92,18 +87,17 @@ var (
// map of events to set test cases' expectations easier
events = map[string]corev1.Event{
- channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal},
- channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning},
- channelReadStatusFailed: {Reason: channelReadStatusFailed, Type: corev1.EventTypeWarning},
- gcpCredentialsReadFailed: {Reason: gcpCredentialsReadFailed, Type: corev1.EventTypeWarning},
- gcpResourcesPlanFailed: {Reason: gcpResourcesPlanFailed, Type: corev1.EventTypeWarning},
- gcpResourcesPersistFailed: {Reason: gcpResourcesPersistFailed, Type: corev1.EventTypeWarning},
- virtualServiceCreateFailed: {Reason: virtualServiceCreateFailed, Type: corev1.EventTypeWarning},
- k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning},
- topicCreateFailed: {Reason: topicCreateFailed, Type: corev1.EventTypeWarning},
- topicDeleteFailed: {Reason: topicDeleteFailed, Type: corev1.EventTypeWarning},
- subscriptionSyncFailed: {Reason: subscriptionSyncFailed, Type: corev1.EventTypeWarning},
- subscriptionDeleteFailed: {Reason: subscriptionDeleteFailed, Type: corev1.EventTypeWarning},
+ channelReconciled: {Reason: channelReconciled, Type: corev1.EventTypeNormal},
+ channelUpdateStatusFailed: {Reason: channelUpdateStatusFailed, Type: corev1.EventTypeWarning},
+ channelReadStatusFailed: {Reason: channelReadStatusFailed, Type: corev1.EventTypeWarning},
+ gcpCredentialsReadFailed: {Reason: gcpCredentialsReadFailed, Type: corev1.EventTypeWarning},
+ gcpResourcesPlanFailed: {Reason: gcpResourcesPlanFailed, Type: corev1.EventTypeWarning},
+ gcpResourcesPersistFailed: {Reason: gcpResourcesPersistFailed, Type: corev1.EventTypeWarning},
+ k8sServiceCreateFailed: {Reason: k8sServiceCreateFailed, Type: corev1.EventTypeWarning},
+ topicCreateFailed: {Reason: topicCreateFailed, Type: corev1.EventTypeWarning},
+ topicDeleteFailed: {Reason: topicDeleteFailed, Type: corev1.EventTypeWarning},
+ subscriptionSyncFailed: {Reason: subscriptionSyncFailed, Type: corev1.EventTypeWarning},
+ subscriptionDeleteFailed: {Reason: subscriptionDeleteFailed, Type: corev1.EventTypeWarning},
}
)
@@ -111,7 +105,6 @@ func init() {
// Add types to scheme.
eventingv1alpha1.AddToScheme(scheme.Scheme)
corev1.AddToScheme(scheme.Scheme)
- istiov1alpha3.AddToScheme(scheme.Scheme)
}
func TestInjectClient(t *testing.T) {
@@ -482,62 +475,6 @@ func TestReconcile(t *testing.T) {
events[k8sServiceCreateFailed],
},
},
- {
- Name: "Virtual service get fails",
- InitialState: []runtime.Object{
- makeChannelWithFinalizerAndPCS(),
- makeK8sService(),
- makeVirtualService(),
- testcreds.MakeSecretWithCreds(),
- },
- Mocks: controllertesting.Mocks{
- MockLists: errorListingVirtualService(),
- },
- WantPresent: []runtime.Object{
- // TODO: This should have a useful error message saying that the VirtualService
- // failed.
- makeChannelWithFinalizerAndPCSAndAddress(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[virtualServiceCreateFailed],
- },
- },
- {
- Name: "Virtual service creation fails",
- InitialState: []runtime.Object{
- makeChannelWithFinalizerAndPCS(),
- makeK8sService(),
- testcreds.MakeSecretWithCreds(),
- },
- Mocks: controllertesting.Mocks{
- MockCreates: errorCreatingVirtualService(),
- },
- WantPresent: []runtime.Object{
- // TODO: This should have a useful error message saying that the VirtualService
- // failed.
- makeChannelWithFinalizerAndPCSAndAddress(),
- },
- WantErrMsg: testErrorMessage,
- WantEvent: []corev1.Event{
- events[virtualServiceCreateFailed],
- },
- },
- {
- Name: "VirtualService already exists - not owned by Channel",
- InitialState: []runtime.Object{
- makeChannelWithFinalizerAndPCS(),
- makeK8sService(),
- makeVirtualServiceNotOwnedByChannel(),
- testcreds.MakeSecretWithCreds(),
- },
- WantPresent: []runtime.Object{
- makeReadyChannel(),
- },
- WantEvent: []corev1.Event{
- events[channelReconciled],
- },
- },
{
Name: "Error planning - subscriber missing UID",
InitialState: []runtime.Object{
@@ -573,7 +510,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannelWithFinalizerAndPCS(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
OtherTestData: map[string]interface{}{
@@ -594,7 +530,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannelWithFinalizerAndPCS(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
OtherTestData: map[string]interface{}{
@@ -618,8 +553,6 @@ func TestReconcile(t *testing.T) {
Name: "Create Topic - topic already exists",
InitialState: []runtime.Object{
makeChannelWithFinalizerAndPCS(),
- makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
OtherTestData: map[string]interface{}{
@@ -632,6 +565,7 @@ func TestReconcile(t *testing.T) {
},
},
WantPresent: []runtime.Object{
+ makeK8sService(),
makeReadyChannel(),
},
WantEvent: []corev1.Event{
@@ -643,7 +577,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannelWithFinalizerAndPCS(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
OtherTestData: map[string]interface{}{
@@ -665,12 +598,11 @@ func TestReconcile(t *testing.T) {
Name: "Create Topic - topic create succeeds",
InitialState: []runtime.Object{
makeChannelWithFinalizerAndPCS(),
- makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
WantPresent: []runtime.Object{
makeReadyChannel(),
+ makeK8sService(),
},
WantEvent: []corev1.Event{
events[channelReconciled],
@@ -681,7 +613,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannelWithSubscribersAndFinalizerAndPCS(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
OtherTestData: map[string]interface{}{
@@ -706,7 +637,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannelWithSubscribersAndFinalizerAndPCS(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
OtherTestData: map[string]interface{}{
@@ -730,7 +660,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannelWithSubscribersAndFinalizerAndPCS(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
OtherTestData: map[string]interface{}{
@@ -753,7 +682,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannelWithSubscribersAndFinalizerAndPCS(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
WantPresent: []runtime.Object{
@@ -768,7 +696,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannelWithFinalizerAndPCS(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
Mocks: controllertesting.Mocks{
@@ -784,7 +711,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannel(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
Mocks: controllertesting.Mocks{
@@ -799,7 +725,6 @@ func TestReconcile(t *testing.T) {
InitialState: []runtime.Object{
makeChannelWithFinalizerAndPCS(),
makeK8sService(),
- makeVirtualService(),
testcreds.MakeSecretWithCreds(),
},
Mocks: controllertesting.Mocks{
@@ -856,7 +781,9 @@ func makeChannel() *eventingv1alpha1.Channel {
func makeChannelWithFinalizerAndPCSAndAddress() *eventingv1alpha1.Channel {
c := makeChannelWithFinalizerAndPCS()
- c.Status.SetAddress(fmt.Sprintf("%s-channel.%s.svc.%s", c.Name, c.Namespace, utils.GetClusterDomainName()))
+ // serviceAddress is the address of the K8s Service. It uses a GeneratedName and the fake client
+ // does not fill in Name, so the name is the empty string.
+ c.Status.SetAddress(fmt.Sprintf(".%s.svc.%s", c.Namespace, utils.GetClusterDomainName()))
return c
}
@@ -1075,11 +1002,13 @@ func makeK8sService() *corev1.Service {
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s-channel", cName),
- Namespace: cNamespace,
+ GenerateName: fmt.Sprintf("%s-channel-", cName),
+ Namespace: cNamespace,
Labels: map[string]string{
- "channel": cName,
- "provisioner": ccpName,
+ util.EventingChannelLabel: cName,
+ util.OldEventingChannelLabel: cName,
+ util.EventingProvisionerLabel: ccpName,
+ util.OldEventingProvisionerLabel: ccpName,
},
OwnerReferences: []metav1.OwnerReference{
{
@@ -1093,68 +1022,12 @@ func makeK8sService() *corev1.Service {
},
},
Spec: corev1.ServiceSpec{
- Ports: []corev1.ServicePort{
- {
- Name: util.PortName,
- Port: util.PortNumber,
- },
- },
+ ExternalName: names.ServiceHostName(fmt.Sprintf("%s-dispatcher", ccpName), system.Namespace()),
+ Type: corev1.ServiceTypeExternalName,
},
}
}
-func makeVirtualService() *istiov1alpha3.VirtualService {
- return &istiov1alpha3.VirtualService{
- TypeMeta: metav1.TypeMeta{
- APIVersion: istiov1alpha3.SchemeGroupVersion.String(),
- Kind: "VirtualService",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: fmt.Sprintf("%s-channel", cName),
- Namespace: cNamespace,
- Labels: map[string]string{
- "channel": cName,
- "provisioner": ccpName,
- },
- OwnerReferences: []metav1.OwnerReference{
- {
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: "Channel",
- Name: cName,
- UID: cUID,
- Controller: &truePointer,
- BlockOwnerDeletion: &truePointer,
- },
- },
- },
- Spec: istiov1alpha3.VirtualServiceSpec{
- Hosts: []string{
- fmt.Sprintf("%s-channel.%s.svc.%s", cName, cNamespace, utils.GetClusterDomainName()),
- fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()),
- },
- HTTP: []istiov1alpha3.HTTPRoute{{
- Rewrite: &istiov1alpha3.HTTPRewrite{
- Authority: fmt.Sprintf("%s.%s.channels.%s", cName, cNamespace, utils.GetClusterDomainName()),
- },
- Route: []istiov1alpha3.HTTPRouteDestination{{
- Destination: istiov1alpha3.Destination{
- Host: "in-memory-channel-clusterbus.knative-eventing.svc." + utils.GetClusterDomainName(),
- Port: istiov1alpha3.PortSelector{
- Number: util.PortNumber,
- },
- }},
- }},
- },
- },
- }
-}
-
-func makeVirtualServiceNotOwnedByChannel() *istiov1alpha3.VirtualService {
- vs := makeVirtualService()
- vs.OwnerReferences = nil
- return vs
-}
-
func errorOnSecondChannelGet() []controllertesting.MockGet {
passThrough := []controllertesting.MockGet{
func(innerClient client.Client, ctx context.Context, key client.ObjectKey, obj runtime.Object) (controllertesting.MockHandled, error) {
@@ -1185,18 +1058,6 @@ func errorListingK8sService() []controllertesting.MockList {
},
}
}
-
-func errorListingVirtualService() []controllertesting.MockList {
- return []controllertesting.MockList{
- func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*istiov1alpha3.VirtualServiceList); ok {
- return controllertesting.Handled, errors.New(testErrorMessage)
- }
- return controllertesting.Unhandled, nil
- },
- }
-}
-
func errorCreatingK8sService() []controllertesting.MockCreate {
return []controllertesting.MockCreate{
func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
@@ -1208,17 +1069,6 @@ func errorCreatingK8sService() []controllertesting.MockCreate {
}
}
-func errorCreatingVirtualService() []controllertesting.MockCreate {
- return []controllertesting.MockCreate{
- func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- if _, ok := obj.(*istiov1alpha3.VirtualService); ok {
- return controllertesting.Handled, errors.New(testErrorMessage)
- }
- return controllertesting.Unhandled, nil
- },
- }
-}
-
func errorUpdatingChannel() []controllertesting.MockUpdate {
return []controllertesting.MockUpdate{
func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
diff --git a/contrib/gcppubsub/pkg/controller/cmd/main.go b/contrib/gcppubsub/pkg/controller/cmd/main.go
index baa5540a9f0..ce4961befc5 100644
--- a/contrib/gcppubsub/pkg/controller/cmd/main.go
+++ b/contrib/gcppubsub/pkg/controller/cmd/main.go
@@ -21,15 +21,13 @@ import (
"log"
"os"
- "github.com/knative/eventing/pkg/provisioners"
- v1 "k8s.io/api/core/v1"
-
"github.com/knative/eventing/contrib/gcppubsub/pkg/controller/channel"
"github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
+ "github.com/knative/eventing/pkg/provisioners"
"github.com/knative/pkg/signals"
"go.uber.org/zap"
+ v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
@@ -62,7 +60,6 @@ func main() {
// Add custom types to this array to get them into the manager's scheme.
eventingv1alpha1.AddToScheme(mgr.GetScheme())
- istiov1alpha3.AddToScheme(mgr.GetScheme())
// The controllers for both the ClusterChannelProvisioner and the Channels created by that
// ClusterChannelProvisioner run in this process.
diff --git a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go
index 078e08bf2d4..4b03eb258c3 100644
--- a/contrib/gcppubsub/pkg/dispatcher/cmd/main.go
+++ b/contrib/gcppubsub/pkg/dispatcher/cmd/main.go
@@ -17,6 +17,7 @@
package main
import (
+ "context"
"flag"
"github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner"
@@ -29,6 +30,7 @@ import (
"go.uber.org/zap"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// This is the main method for the GCP PubSub Channel dispatcher. It handles all the data-plane
@@ -61,7 +63,7 @@ func main() {
// PubSub) and the dispatcher (takes messages in PubSub and sends them in cluster) in this
// binary.
- _, runnables, err := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator)
+ receiver, runnables, err := receiver.New(logger.Desugar(), mgr.GetClient(), util.GcpPubSubClientCreator)
if err != nil {
logger.Fatal("Unable to create new receiver and runnable", zap.Error(err))
}
@@ -72,7 +74,14 @@ func main() {
}
}
- if _, err = dispatcher.New(mgr, logger.Desugar()); err != nil {
+ if _, err = dispatcher.New(
+ mgr,
+ logger.Desugar(),
+ []dispatcher.ReconcileHandler{
+ func(ctx context.Context, _ reconcile.Request) error {
+ return receiver.UpdateHostToChannelMap(ctx)
+ },
+ }); err != nil {
logger.Fatal("Unable to create the dispatcher", zap.Error(err))
}
diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go
index 00dd7cdf44a..781ea63b6b9 100644
--- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go
+++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/controller.go
@@ -21,15 +21,13 @@ import (
"sync"
"time"
- "k8s.io/client-go/util/workqueue"
-
- "sigs.k8s.io/controller-runtime/pkg/event"
-
pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/provisioners"
"go.uber.org/zap"
+ "k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/source"
@@ -53,7 +51,7 @@ const (
// New returns a Controller that represents the dispatcher portion (messages from GCP PubSub are
// sent into the cluster) of the GCP PubSub dispatcher. We use a reconcile loop to watch all
// Channels and notice changes to them. It uses an exponential backoff to throttle the retries.
-func New(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) {
+func New(mgr manager.Manager, logger *zap.Logger, additionalHandlers []ReconcileHandler) (controller.Controller, error) {
// reconcileChan is used when the dispatcher itself needs to force reconciliation of a Channel.
reconcileChan := make(chan event.GenericEvent)
@@ -71,7 +69,8 @@ func New(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error)
subscriptionsLock: sync.Mutex{},
subscriptions: map[channelName]map[subscriptionName]context.CancelFunc{},
- rateLimiter: workqueue.NewItemExponentialFailureRateLimiter(expBackoffBaseDelay, expBackoffMaxDelay),
+ rateLimiter: workqueue.NewItemExponentialFailureRateLimiter(expBackoffBaseDelay, expBackoffMaxDelay),
+ additionalHandlers: additionalHandlers,
}
c, err := controller.New(controllerAgentName, mgr, controller.Options{
diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go
index 23d0884d981..78c51756373 100644
--- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go
+++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile.go
@@ -22,20 +22,18 @@ import (
"sync"
"time"
- v1 "k8s.io/api/core/v1"
-
- "k8s.io/client-go/util/workqueue"
-
- ccpcontroller "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/clusterchannelprovisioner"
+ "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/channel"
pubsubutil "github.com/knative/eventing/contrib/gcppubsub/pkg/util"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/logging"
"github.com/knative/eventing/pkg/provisioners"
util "github.com/knative/eventing/pkg/provisioners"
"go.uber.org/zap"
+ v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
+ "k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -54,6 +52,9 @@ type channelName = types.NamespacedName
type subscriptionName = types.UID
type empty struct{}
+// ReconcileHandler will be run by in addition to existing reconcile.
+type ReconcileHandler func(context.Context, reconcile.Request) error
+
// reconciler reconciles Channels with the gcp-pubsub provisioner. It sets up hanging polling for
// every Subscription to any Channel.
type reconciler struct {
@@ -76,6 +77,8 @@ type reconciler struct {
// rateLimiter is used to limit the pace at which we nack a message when it could not be dispatched.
rateLimiter workqueue.RateLimiter
+
+ additionalHandlers []ReconcileHandler
}
// Verify the struct implements reconcile.Reconciler
@@ -106,7 +109,7 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
}
// Does this Controller control this Channel?
- if !r.shouldReconcile(c) {
+ if !channel.ShouldReconcile(c) {
logging.FromContext(ctx).Info("Not reconciling Channel, it is not controlled by this Controller", zap.Any("ref", c.Spec))
return reconcile.Result{}, nil
}
@@ -145,15 +148,6 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
}, reconcileErr
}
-// shouldReconcile determines if this Controller should control (and therefore reconcile) a given
-// ClusterChannelProvisioner. This Controller only handles gcp-pubsub Channels.
-func (r *reconciler) shouldReconcile(c *eventingv1alpha1.Channel) bool {
- if c.Spec.Provisioner != nil {
- return ccpcontroller.IsControlled(c.Spec.Provisioner)
- }
- return false
-}
-
// reconcile reconciles this Channel so that the real world matches the intended state. The returned
// boolean indicates if this Channel should be immediately requeued for another reconcile loop. The
// returned error indicates an error during reconciliation.
@@ -176,6 +170,12 @@ func (r *reconciler) reconcile(ctx context.Context, c *eventingv1alpha1.Channel,
return true, nil
}
+ for _, h := range r.additionalHandlers {
+ if err := h(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: c.Name, Namespace: c.Namespace}}); err != nil {
+ logging.FromContext(ctx).Error("Failed reconcile.", zap.Error(err))
+ return false, err
+ }
+ }
// enqueueChannelForReconciliation is a function that when run will force this Channel to be
// reconciled again.
enqueueChannelForReconciliation := func() {
diff --git a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go
index 2b8d5ae0544..6814dc6448e 100644
--- a/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go
+++ b/contrib/gcppubsub/pkg/dispatcher/dispatcher/reconcile_test.go
@@ -26,32 +26,25 @@ import (
"testing"
"time"
- "k8s.io/client-go/util/workqueue"
-
"github.com/knative/eventing/contrib/gcppubsub/pkg/util"
-
- "sigs.k8s.io/controller-runtime/pkg/reconcile"
-
- "github.com/knative/eventing/pkg/provisioners"
-
- "sigs.k8s.io/controller-runtime/pkg/event"
-
+ "github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub"
"github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds"
"github.com/knative/eventing/pkg/apis/duck/v1alpha1"
- "github.com/knative/eventing/pkg/utils"
-
- "github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub"
-
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/provisioners"
controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
+ "github.com/knative/eventing/pkg/utils"
_ "github.com/knative/pkg/system/testing"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/event"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
@@ -65,10 +58,11 @@ const (
gcpProject = "gcp-project"
- pscData = "pscData"
- reconcileChan = "reconcileChan"
- shouldBeCanceled = "shouldBeCanceled"
- shouldNotBeCanceled = "shouldNotBeCanceled"
+ pscData = "pscData"
+ reconcileChan = "reconcileChan"
+ shouldBeCanceled = "shouldBeCanceled"
+ shouldNotBeCanceled = "shouldNotBeCanceled"
+ additionalHandlerError = "Error in additional test handler."
)
var (
@@ -93,6 +87,8 @@ var (
dispatcherReconcileFailed: {Reason: dispatcherReconcileFailed, Type: corev1.EventTypeWarning},
dispatcherUpdateStatusFailed: {Reason: dispatcherUpdateStatusFailed, Type: corev1.EventTypeWarning},
}
+
+ hostname = fmt.Sprintf("%s-channel.%s.svc.%s", cName, cNamespace, utils.GetClusterDomainName())
)
func init() {
@@ -367,6 +363,22 @@ func TestReconcile(t *testing.T) {
events[dispatcherReconciled], events[dispatcherUpdateStatusFailed],
},
},
+ {
+ Name: "Fail additional reconcile handler",
+ InitialState: []runtime.Object{
+ makeChannelWithSubscribersAndFinalizer(),
+ testcreds.MakeSecretWithCreds(),
+ },
+ WantPresent: []runtime.Object{
+ makeChannelWithSubscribersAndFinalizer(),
+ },
+ WantEvent: []corev1.Event{
+ events[dispatcherReconcileFailed],
+ },
+ OtherTestData: map[string]interface{}{
+ additionalHandlerError: additionalHandlerError,
+ },
+ },
// Note - we do not test update status since this dispatcher only adds
// finalizers to the channel
}
@@ -414,12 +426,19 @@ func TestReconcile(t *testing.T) {
r.subscriptions[c][s] = cc.wantNotCancel(c, s)
}
}
+ if tc.OtherTestData[additionalHandlerError] != nil {
+ r.additionalHandlers = []ReconcileHandler{
+ func(_ context.Context, _ reconcile.Request) error {
+ return fmt.Errorf(tc.OtherTestData[additionalHandlerError].(string))
+ },
+ }
+ tc.WantErrMsg = additionalHandlerError
+ }
tc.AdditionalVerification = append(tc.AdditionalVerification, cc.verify)
tc.IgnoreTimes = true
t.Run(tc.Name, tc.Runner(t, r, c, recorder))
}
}
-
func TestReceiveFunc(t *testing.T) {
testCases := map[string]struct {
ack bool
@@ -518,7 +537,7 @@ func makeChannel() *eventingv1alpha1.Channel {
},
}
c.Status.InitializeConditions()
- c.Status.SetAddress(fmt.Sprintf("%s-channel.%s.svc.%s", c.Name, c.Namespace, utils.GetClusterDomainName()))
+ c.Status.SetAddress(hostname)
c.Status.MarkProvisioned()
pcs := &util.GcpPubSubChannelStatus{
GCPProject: gcpProject,
@@ -638,6 +657,16 @@ func errorGettingChannel() []controllertesting.MockGet {
}
}
+func errorListingChannels() []controllertesting.MockList {
+ return []controllertesting.MockList{
+ func(_ client.Client, _ context.Context, _ *client.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) {
+ if _, ok := obj.(*eventingv1alpha1.ChannelList); ok {
+ return controllertesting.Handled, errors.New(testErrorMessage)
+ }
+ return controllertesting.Unhandled, nil
+ },
+ }
+}
func errorUpdatingChannel() []controllertesting.MockUpdate {
return []controllertesting.MockUpdate{
func(_ client.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go
index 665bb80dda5..09117122e46 100644
--- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go
+++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go
@@ -20,11 +20,15 @@ import (
"context"
"errors"
"fmt"
+ "sync"
+ "sync/atomic"
"cloud.google.com/go/pubsub"
+ "github.com/knative/eventing/contrib/gcppubsub/pkg/controller/channel"
"github.com/knative/eventing/contrib/gcppubsub/pkg/dispatcher/receiver/cache"
"github.com/knative/eventing/contrib/gcppubsub/pkg/util"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/channelwatcher"
"github.com/knative/eventing/pkg/logging"
"github.com/knative/eventing/pkg/provisioners"
"go.uber.org/zap"
@@ -40,6 +44,9 @@ type Receiver struct {
pubSubClientCreator util.PubSubClientCreator
cache *cache.TTL
+
+ hostToChannelMapMutex sync.Mutex
+ hostToChannelMap atomic.Value
}
// New creates a new Receiver and its associated MessageReceiver. The caller is responsible for
@@ -52,7 +59,9 @@ func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubS
pubSubClientCreator: pubSubClientCreator,
cache: cache.NewTTL(),
}
+ r.setHostToChannelMap(map[string]provisioners.ChannelReference{})
receiver, err := r.newMessageReceiver()
+
if err != nil {
return nil, nil, err
}
@@ -60,7 +69,18 @@ func New(logger *zap.Logger, client client.Client, pubSubClientCreator util.PubS
}
func (r *Receiver) newMessageReceiver() (*provisioners.MessageReceiver, error) {
- return provisioners.NewMessageReceiver(r.sendEventToTopic, r.logger.Sugar())
+ return provisioners.NewMessageReceiver(
+ r.sendEventToTopic,
+ r.logger.Sugar(),
+ provisioners.ResolveChannelFromHostHeader(r.getChannelReferenceFromHost))
+}
+func (r *Receiver) getChannelReferenceFromHost(host string) (provisioners.ChannelReference, error) {
+ chMap := r.getHostToChannelMap()
+ cr, ok := chMap[host]
+ if !ok {
+ return cr, fmt.Errorf("Invalid HostName:%q. HostName not found in any of the watched gcp-pubsub channels", host)
+ }
+ return cr, nil
}
// sendEventToTopic sends a message to the Cloud Pub/Sub Topic backing the Channel.
@@ -152,3 +172,36 @@ func (r *Receiver) getChannel(ctx context.Context, ref provisioners.ChannelRefer
return c, err
}
+func (r *Receiver) getHostToChannelMap() map[string]provisioners.ChannelReference {
+ return r.hostToChannelMap.Load().(map[string]provisioners.ChannelReference)
+}
+
+func (r *Receiver) setHostToChannelMap(hcMap map[string]provisioners.ChannelReference) {
+ r.hostToChannelMap.Store(hcMap)
+}
+
+// UpdateHostToChannelMap will be called from the controller that watches gcp-pubsub channels.
+// It will update internal hostToChannelMap which is used to resolve the hostHeader of the
+// incoming request to the correct ChannelReference in the receiver function.
+func (r *Receiver) UpdateHostToChannelMap(ctx context.Context) error {
+ logging.FromContext(ctx).Debug("UpdateHostToChannelMap: Acquiring mutex lock")
+ r.hostToChannelMapMutex.Lock()
+ defer r.hostToChannelMapMutex.Unlock()
+ logging.FromContext(ctx).Debug("UpdateHostToChannelMap: Acquired mutex lock. Updating internal map")
+
+ chanList, err := channelwatcher.ListAllChannels(ctx, r.client, channel.ShouldReconcile)
+ if err != nil {
+ logging.FromContext(ctx).Error("UpdateHostToChannelMap: Failed to list all channels.", zap.Error(err))
+ return err
+ }
+
+ hostToChanMap, err := provisioners.NewHostNameToChannelRefMap(chanList)
+ if err != nil {
+ logging.FromContext(ctx).Error("UpdateHostToChannelMap: Error occured when creating the new hostToChannel map.", zap.Error(err))
+ return err
+ }
+
+ r.setHostToChannelMap(hostToChanMap)
+ logging.FromContext(ctx).Info("UpdateHostToChannelMap: Update successful.")
+ return nil
+}
diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go
index c4789c2c9ed..f850d26049a 100644
--- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go
+++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go
@@ -19,25 +19,25 @@ package receiver
import (
"context"
"errors"
+ "fmt"
"net/http/httptest"
"strings"
"testing"
+ "github.com/google/go-cmp/cmp"
"github.com/knative/eventing/contrib/gcppubsub/pkg/util"
-
- eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
- "k8s.io/client-go/kubernetes/scheme"
-
"github.com/knative/eventing/contrib/gcppubsub/pkg/util/fakepubsub"
- "github.com/knative/eventing/pkg/utils"
+ "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds"
+ eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
+ "github.com/knative/eventing/pkg/provisioners"
+ controllertesting "github.com/knative/eventing/pkg/reconciler/testing"
"go.uber.org/zap"
-
+ corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
-
+ "k8s.io/client-go/kubernetes/scheme"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
-
- "github.com/knative/eventing/contrib/gcppubsub/pkg/util/testcreds"
)
const (
@@ -54,6 +54,9 @@ const (
"contentType" : "text/xml",
"data" : ""
}`
+ ccpName = "gcp-pubsub"
+ listChannelsFailed = "Failed to list channels"
+ hostname = "a.b.c.d"
)
func init() {
@@ -76,14 +79,14 @@ func TestReceiver(t *testing.T) {
"can't read status": {
initialState: []runtime.Object{
testcreds.MakeSecretWithInvalidCreds(),
- makeChannelWithBadStatus(),
+ makeChannel(withBadStatus()),
},
expectedErr: true,
},
"blank status": {
initialState: []runtime.Object{
testcreds.MakeSecretWithInvalidCreds(),
- makeChannelWithBlankStatus(),
+ makeChannel(withBlankStatus()),
},
expectedErr: true,
},
@@ -123,7 +126,7 @@ func TestReceiver(t *testing.T) {
"Publish succeeds": {
initialState: []runtime.Object{
testcreds.MakeSecretWithCreds(),
- makeChannel(),
+ makeChannel(withStatusReady(hostname)),
},
},
}
@@ -136,13 +139,15 @@ func TestReceiver(t *testing.T) {
if err != nil {
t.Fatalf("Error when creating a New receiver. Error:%s", err)
}
+ mr.setHostToChannelMap(map[string]provisioners.ChannelReference{})
resp := httptest.NewRecorder()
req := httptest.NewRequest("POST", "/", strings.NewReader(validMessage))
- req.Host = "test-channel.test-namespace.channels." + utils.GetClusterDomainName()
+ req.Host = hostname
receiver, err := mr.newMessageReceiver()
if err != nil {
t.Fatalf("Error when creating a new message receiver. Error:%s", err)
}
+ mr.UpdateHostToChannelMap(context.TODO())
receiver.HandleRequest(resp, req)
if tc.expectedErr {
if resp.Result().StatusCode >= 200 && resp.Result().StatusCode < 300 {
@@ -157,7 +162,112 @@ func TestReceiver(t *testing.T) {
}
}
-func makeChannel() *eventingv1alpha1.Channel {
+func TestUpdateHostToChannelMap(t *testing.T) {
+ testCases := []struct {
+ name string
+ initialState []runtime.Object
+ expectedMap map[string]provisioners.ChannelReference
+ expectedErrMsg string
+ mocks controllertesting.Mocks
+ }{
+ {
+ name: "client.List() channels fails.",
+ initialState: []runtime.Object{
+ makeChannel(withStatusReady(hostname)),
+ },
+ expectedErrMsg: listChannelsFailed,
+ expectedMap: map[string]provisioners.ChannelReference{},
+ mocks: controllertesting.Mocks{
+ MockLists: []controllertesting.MockList{
+ func(_ client.Client, _ context.Context, _ *client.ListOptions, _ runtime.Object) (controllertesting.MockHandled, error) {
+ return controllertesting.Handled, fmt.Errorf(listChannelsFailed)
+ },
+ },
+ },
+ },
+ {
+ name: "Duplciate hostnames.",
+ initialState: []runtime.Object{
+ makeChannel(withName("chan1"), withNamespace("ns1"), withStatusReady("host.name")),
+ makeChannel(withName("chan2"), withNamespace("ns2"), withStatusReady("host.name")),
+ },
+ expectedErrMsg: "Duplicate hostName found. Each channel must have a unique host header. HostName:host.name, channel:ns2.chan2, channel:ns1.chan1",
+ expectedMap: map[string]provisioners.ChannelReference{},
+ },
+ {
+ name: "Successfully updated hostToChannelMap.",
+ initialState: []runtime.Object{
+ makeChannel(withName("chan1"), withNamespace("ns1"), withStatusReady("host.name1")),
+ makeChannel(withName("chan2"), withNamespace("ns2"), withStatusReady("host.name2")),
+ },
+ expectedMap: map[string]provisioners.ChannelReference{
+ "host.name1": provisioners.ChannelReference{Name: "chan1", Namespace: "ns1"},
+ "host.name2": provisioners.ChannelReference{Name: "chan2", Namespace: "ns2"},
+ },
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ c := controllertesting.NewMockClient(fake.NewFakeClient(tc.initialState...), tc.mocks)
+ r, _, err := New(zap.NewNop(), c, fakepubsub.Creator(nil))
+ if err != nil {
+ t.Fatalf("Failed to create receiver.")
+ }
+ if err := r.UpdateHostToChannelMap(context.Background()); err != nil {
+ if diff := cmp.Diff(tc.expectedErrMsg, err.Error()); diff != "" {
+ t.Fatalf("Unexpected difference (-want +got): %v", diff)
+ }
+ } else if tc.expectedErrMsg != "" {
+ t.Fatalf("Want error:%s, Got nil", tc.expectedErrMsg)
+ }
+
+ if diff := cmp.Diff(tc.expectedMap, r.getHostToChannelMap()); diff != "" {
+ t.Fatalf("Unexpected difference (-want +got): %v", diff)
+ }
+ })
+ }
+}
+
+type option func(*eventingv1alpha1.Channel)
+
+func withName(name string) option {
+ return func(c *eventingv1alpha1.Channel) {
+ c.Name = name
+ }
+}
+
+func withNamespace(ns string) option {
+ return func(c *eventingv1alpha1.Channel) {
+ c.Namespace = ns
+ }
+}
+
+func withStatusReady(hn string) option {
+ return func(c *eventingv1alpha1.Channel) {
+ c.Status.InitializeConditions()
+ c.Status.InitializeConditions()
+ c.Status.MarkProvisioned()
+ c.Status.MarkProvisionerInstalled()
+ c.Status.SetAddress(hn)
+ }
+}
+
+func withBlankStatus() option {
+ return func(c *eventingv1alpha1.Channel) {
+ c.Status = eventingv1alpha1.ChannelStatus{}
+ }
+}
+
+func withBadStatus() option {
+ return func(c *eventingv1alpha1.Channel) {
+ c.Status.Internal = &runtime.RawExtension{
+ // SecretKey must be a string, not an integer, so this will fail during json.Unmarshal.
+ Raw: []byte(`{"secretKey": 123}`),
+ }
+ }
+}
+
+func makeChannel(opts ...option) *eventingv1alpha1.Channel {
c := &eventingv1alpha1.Channel{
TypeMeta: v1.TypeMeta{
APIVersion: "eventing.knative.dev/v1alpha1",
@@ -167,6 +277,11 @@ func makeChannel() *eventingv1alpha1.Channel {
Namespace: "test-namespace",
Name: "test-channel",
},
+ Spec: eventingv1alpha1.ChannelSpec{
+ Provisioner: &corev1.ObjectReference{
+ Name: ccpName,
+ },
+ },
}
pcs := &util.GcpPubSubChannelStatus{
GCPProject: "project",
@@ -176,20 +291,9 @@ func makeChannel() *eventingv1alpha1.Channel {
if err := util.SetInternalStatus(context.Background(), c, pcs); err != nil {
panic(err)
}
- return c
-}
-
-func makeChannelWithBlankStatus() *eventingv1alpha1.Channel {
- c := makeChannel()
- c.Status = eventingv1alpha1.ChannelStatus{}
- return c
-}
-func makeChannelWithBadStatus() *eventingv1alpha1.Channel {
- c := makeChannel()
- c.Status.Internal = &runtime.RawExtension{
- // SecretKey must be a string, not an integer, so this will fail during json.Unmarshal.
- Raw: []byte(`{"secretKey": 123}`),
+ for _, opt := range opts {
+ opt(c)
}
return c
}
diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
index ecbb7d8211f..8d900564b63 100644
--- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
+++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
@@ -76,6 +76,7 @@ func NewDispatcher(natssURL, clusterID string, logger *zap.Logger) (*Subscriptio
clusterID: clusterID,
subscriptions: make(map[provisioners.ChannelReference]map[subscriptionReference]*stan.Subscription),
}
+ d.setHostToChannelMap(map[string]provisioners.ChannelReference{})
receiver, err := provisioners.NewMessageReceiver(
createReceiverFunction(d, logger.Sugar()),
logger.Sugar(),
@@ -84,8 +85,6 @@ func NewDispatcher(natssURL, clusterID string, logger *zap.Logger) (*Subscriptio
return nil, err
}
d.receiver = receiver
- d.setHostToChannelMap(map[string]provisioners.ChannelReference{})
-
return d, nil
}
@@ -313,21 +312,11 @@ func (s *SubscriptionsSupervisor) setHostToChannelMap(hcMap map[string]provision
// It will update internal hostToChannelMap which is used to resolve the hostHeader of the
// incoming request to the correct ChannelReference in the receiver function.
func (s *SubscriptionsSupervisor) UpdateHostToChannelMap(ctx context.Context, chanList []eventingv1alpha1.Channel) error {
- hostToChanMap := make(map[string]provisioners.ChannelReference, len(chanList))
- for _, c := range chanList {
- hostName := c.Status.Address.Hostname
- if cr, ok := hostToChanMap[hostName]; ok {
- return fmt.Errorf(
- "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s",
- hostName,
- c.Namespace,
- c.Name,
- cr.Namespace,
- cr.Name)
- }
- hostToChanMap[hostName] = provisioners.ChannelReference{Name: c.Name, Namespace: c.Namespace}
+ hostToChanMap, err := provisioners.NewHostNameToChannelRefMap(chanList)
+ if err != nil {
+ logging.FromContext(ctx).Info("UpdateHostToChannelMap: Error occured when creating the new hostToChannel map.", zap.Error(err))
+ return err
}
-
s.setHostToChannelMap(hostToChanMap)
logging.FromContext(ctx).Info("hostToChannelMap updated successfully.")
return nil
diff --git a/pkg/channelwatcher/channel_watcher.go b/pkg/channelwatcher/channel_watcher.go
index 249fe2ef439..011554b93dd 100644
--- a/pkg/channelwatcher/channel_watcher.go
+++ b/pkg/channelwatcher/channel_watcher.go
@@ -39,7 +39,7 @@ type reconciler struct {
}
func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
- ctx := logging.WithLogger(context.TODO(), r.logger.With(zap.Any("request", req)))
+ ctx := logging.WithLogger(context.Background(), r.logger.With(zap.Any("request", req)))
logging.FromContext(ctx).Info("New update for channel.")
if err := r.handler(ctx, r.client, req.NamespacedName); err != nil {
logging.FromContext(ctx).Error("WatchHandlerFunc returned error", zap.Error(err))
diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go
index 69e1224e062..df914bc8a6b 100644
--- a/pkg/provisioners/channel_util.go
+++ b/pkg/provisioners/channel_util.go
@@ -394,3 +394,22 @@ func channelServiceName(channelName string) string {
func channelHostName(channelName, namespace string) string {
return fmt.Sprintf("%s.%s.channels.%s", channelName, namespace, utils.GetClusterDomainName())
}
+
+// NewHostNameToChannelRefMap parses each channel from cList and creates a map[string(Status.Address.HostName)]ChannelReference
+func NewHostNameToChannelRefMap(cList []eventingv1alpha1.Channel) (map[string]ChannelReference, error) {
+ hostToChanMap := make(map[string]ChannelReference, len(cList))
+ for _, c := range cList {
+ hostName := c.Status.Address.Hostname
+ if cr, ok := hostToChanMap[hostName]; ok {
+ return nil, fmt.Errorf(
+ "Duplicate hostName found. Each channel must have a unique host header. HostName:%s, channel:%s.%s, channel:%s.%s",
+ hostName,
+ c.Namespace,
+ c.Name,
+ cr.Namespace,
+ cr.Name)
+ }
+ hostToChanMap[hostName] = ChannelReference{Name: c.Name, Namespace: c.Namespace}
+ }
+ return hostToChanMap, nil
+}
diff --git a/pkg/provisioners/inmemory/channel/reconcile_test.go b/pkg/provisioners/inmemory/channel/reconcile_test.go
index 76d75f3c7aa..e5af96d4dc2 100644
--- a/pkg/provisioners/inmemory/channel/reconcile_test.go
+++ b/pkg/provisioners/inmemory/channel/reconcile_test.go
@@ -32,7 +32,6 @@ import (
"github.com/knative/eventing/pkg/sidecar/fanout"
"github.com/knative/eventing/pkg/sidecar/multichannelfanout"
"github.com/knative/eventing/pkg/utils"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"github.com/knative/pkg/system"
_ "github.com/knative/pkg/system/testing"
"go.uber.org/zap"
@@ -191,7 +190,6 @@ func init() {
// Add types to scheme.
_ = eventingv1alpha1.AddToScheme(scheme.Scheme)
_ = corev1.AddToScheme(scheme.Scheme)
- _ = istiov1alpha3.AddToScheme(scheme.Scheme)
}
func TestInjectClient(t *testing.T) {
diff --git a/pkg/provisioners/inmemory/controller/main.go b/pkg/provisioners/inmemory/controller/main.go
index 2b09c992b4f..6a1921d7c31 100644
--- a/pkg/provisioners/inmemory/controller/main.go
+++ b/pkg/provisioners/inmemory/controller/main.go
@@ -24,7 +24,6 @@ import (
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/provisioners"
"github.com/knative/eventing/pkg/provisioners/inmemory/clusterchannelprovisioner"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
"github.com/knative/pkg/signals"
"go.uber.org/zap"
"sigs.k8s.io/controller-runtime/pkg/client/config"
@@ -50,7 +49,6 @@ func main() {
// Add custom types to this array to get them into the manager's scheme.
eventingv1alpha1.AddToScheme(mgr.GetScheme())
- istiov1alpha3.AddToScheme(mgr.GetScheme())
// The controllers for both the ClusterChannelProvisioner and the Channels created by that
// ClusterChannelProvisioner run in this process.
From c67c68080eb5de1c545f1b5fa83a11617d28416d Mon Sep 17 00:00:00 2001
From: Sergey Yedrikov <48031344+syedriko@users.noreply.github.com>
Date: Tue, 30 Apr 2019 10:30:32 -0400
Subject: [PATCH 66/76] compile-time assert numControllers ==
len(controllersArray) (#1112)
* compile-time assert numControllers == len(controllersArray)
* Incorporated review feedback from @grantr
* Typo fix
---
cmd/controller/main.go | 13 ++++++++-----
cmd/sources-controller/main.go | 13 ++++++++-----
2 files changed, 16 insertions(+), 10 deletions(-)
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 1addab462cd..e3cb5d8e7fe 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -93,7 +93,7 @@ func main() {
// Build all of our controllers, with the clients constructed above.
// Add new controllers to this array.
// You also need to modify numControllers above to match this.
- controllers := []*kncontroller.Impl{
+ controllers := [...]*kncontroller.Impl{
subscription.NewController(
opt,
subscriptionInformer,
@@ -134,9 +134,12 @@ func main() {
brokerInformer,
),
}
- if len(controllers) != numControllers {
- logger.Fatalf("Number of controllers and QPS settings mismatch: %d != %d", len(controllers), numControllers)
- }
+ // This line asserts at compile time that the length of controllers is equal to numControllers.
+ // It is based on https://go101.org/article/tips.html#assert-at-compile-time, which notes that
+ // var _ [N-M]int
+ // asserts at compile time that N >= M, which we can use to establish equality of N and M:
+ // (N >= M) && (M >= N) => (N == M)
+ var _ [numControllers - len(controllers)][len(controllers) - numControllers]int
// Watch the logging config map and dynamically update logging levels.
opt.ConfigMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller))
@@ -167,7 +170,7 @@ func main() {
// Start all of the controllers.
logger.Info("Starting controllers.")
- go kncontroller.StartAll(stopCh, controllers...)
+ go kncontroller.StartAll(stopCh, controllers[:]...)
<-stopCh
}
diff --git a/cmd/sources-controller/main.go b/cmd/sources-controller/main.go
index 000cbf04c4a..e4d5154e622 100644
--- a/cmd/sources-controller/main.go
+++ b/cmd/sources-controller/main.go
@@ -82,7 +82,7 @@ func main() {
// Build all of our controllers, with the clients constructed above.
// Add new controllers to this array.
// You also need to modify numControllers above to match this.
- controllers := []*kncontroller.Impl{
+ controllers := [...]*kncontroller.Impl{
cronjobsource.NewController(
opt,
cronJobSourceInformer,
@@ -94,9 +94,12 @@ func main() {
deploymentInformer,
),
}
- if len(controllers) != numControllers {
- logger.Fatalf("Number of controllers and QPS settings mismatch: %d != %d", len(controllers), numControllers)
- }
+ // This line asserts at compile time that the length of controllers is equal to numControllers.
+ // It is based on https://go101.org/article/tips.html#assert-at-compile-time, which notes that
+ // var _ [N-M]int
+ // asserts at compile time that N >= M, which we can use to establish equality of N and M:
+ // (N >= M) && (M >= N) => (N == M)
+ var _ [numControllers - len(controllers)][len(controllers) - numControllers]int
// Watch the logging config map and dynamically update logging levels.
opt.ConfigMapWatcher.Watch(logconfig.ConfigMapName(), logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.SourcesController))
@@ -121,7 +124,7 @@ func main() {
// Start all of the controllers.
logger.Info("Starting controllers.")
- go kncontroller.StartAll(stopCh, controllers...)
+ go kncontroller.StartAll(stopCh, controllers[:]...)
<-stopCh
}
From b8a1d73df845d2af071d8049bf668e73eb6312ca Mon Sep 17 00:00:00 2001
From: Sergey Yedrikov <48031344+syedriko@users.noreply.github.com>
Date: Tue, 30 Apr 2019 12:14:33 -0400
Subject: [PATCH 67/76] Fix for https://github.com/knative/eventing/issues/1109
(#1118)
[BUG] eventing controller and source-controller fall off main() on shutdown signal
---
cmd/controller/main.go | 4 ++--
cmd/sources-controller/main.go | 3 +--
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index e3cb5d8e7fe..74599e9e9f0 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -170,8 +170,8 @@ func main() {
// Start all of the controllers.
logger.Info("Starting controllers.")
- go kncontroller.StartAll(stopCh, controllers[:]...)
- <-stopCh
+
+ kncontroller.StartAll(stopCh, controllers[:]...)
}
func init() {
diff --git a/cmd/sources-controller/main.go b/cmd/sources-controller/main.go
index e4d5154e622..053d816dbd8 100644
--- a/cmd/sources-controller/main.go
+++ b/cmd/sources-controller/main.go
@@ -124,9 +124,8 @@ func main() {
// Start all of the controllers.
logger.Info("Starting controllers.")
- go kncontroller.StartAll(stopCh, controllers[:]...)
- <-stopCh
+ kncontroller.StartAll(stopCh, controllers[:]...)
}
func setupLogger() (*zap.SugaredLogger, zap.AtomicLevel) {
From 7081d16201af6c375a9911412a447a62a16662e6 Mon Sep 17 00:00:00 2001
From: mattmoor-sockpuppet
Date: Tue, 30 Apr 2019 09:49:33 -0700
Subject: [PATCH 68/76] golang format tools (#1123)
Produced via:
`gofmt -s -w $(find -path './vendor' -prune -o -type f -name '*.go' -print))`
`goimports -w $(find -name '*.go' | grep -v vendor)`
---
cmd/broker/ingress/main.go | 2 +-
cmd/pong/main.go | 2 +-
cmd/sendevent/main.go | 2 +-
cmd/sources-controller/main.go | 3 ++-
.../gcppubsub/pkg/controller/channel/names_test.go | 3 ++-
.../pkg/dispatcher/receiver/receiver_test.go | 4 ++--
contrib/gcppubsub/pkg/util/status.go | 1 +
contrib/kafka/pkg/dispatcher/dispatcher_test.go | 14 +++++++-------
.../pkg/dispatcher/dispatcher/dispatcher_test.go | 6 +++---
pkg/broker/context.go | 2 +-
pkg/broker/receiver.go | 2 +-
pkg/broker/receiver_test.go | 2 +-
pkg/broker/ttl.go | 2 +-
pkg/kncloudevents/good_client.go | 2 +-
pkg/logging/logging.go | 1 +
.../containersource/resources/deployment.go | 3 ++-
.../containersource/resources/deployment_test.go | 3 ++-
.../cronjobsource/resources/receive_adapter.go | 1 +
pkg/reconciler/namespace/namespace.go | 1 +
pkg/reconciler/namespace/namespace_test.go | 3 ++-
pkg/reconciler/reconciler.go | 3 ++-
pkg/reconciler/testing/containersource.go | 3 ++-
pkg/reconciler/testing/factory.go | 3 ++-
pkg/reconciler/testing/namespace.go | 3 ++-
test/e2e/main_test.go | 6 +++---
25 files changed, 45 insertions(+), 32 deletions(-)
diff --git a/cmd/broker/ingress/main.go b/cmd/broker/ingress/main.go
index 672516b69f4..c406b9e406b 100644
--- a/cmd/broker/ingress/main.go
+++ b/cmd/broker/ingress/main.go
@@ -29,7 +29,7 @@ import (
"sync"
"time"
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/broker"
"github.com/knative/eventing/pkg/provisioners"
diff --git a/cmd/pong/main.go b/cmd/pong/main.go
index f691d065bb7..9ec7b17fb45 100644
--- a/cmd/pong/main.go
+++ b/cmd/pong/main.go
@@ -22,7 +22,7 @@ import (
"flag"
"log"
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
"github.com/google/uuid"
)
diff --git a/cmd/sendevent/main.go b/cmd/sendevent/main.go
index 53b5a575175..d470d1718bf 100644
--- a/cmd/sendevent/main.go
+++ b/cmd/sendevent/main.go
@@ -25,7 +25,7 @@ import (
"log"
"os"
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
"github.com/knative/eventing/pkg/utils"
)
diff --git a/cmd/sources-controller/main.go b/cmd/sources-controller/main.go
index 053d816dbd8..0868ebe9749 100644
--- a/cmd/sources-controller/main.go
+++ b/cmd/sources-controller/main.go
@@ -18,9 +18,10 @@ package main
import (
"flag"
+ "log"
+
"github.com/knative/eventing/pkg/reconciler/containersource"
"k8s.io/client-go/tools/clientcmd"
- "log"
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
diff --git a/contrib/gcppubsub/pkg/controller/channel/names_test.go b/contrib/gcppubsub/pkg/controller/channel/names_test.go
index 9014ed61e43..f431ce4716c 100644
--- a/contrib/gcppubsub/pkg/controller/channel/names_test.go
+++ b/contrib/gcppubsub/pkg/controller/channel/names_test.go
@@ -17,9 +17,10 @@ limitations under the License.
package channel
import (
- v1 "k8s.io/api/core/v1"
"testing"
+ v1 "k8s.io/api/core/v1"
+
"github.com/knative/eventing/pkg/apis/duck/v1alpha1"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go
index f850d26049a..639563f6249 100644
--- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go
+++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver_test.go
@@ -201,8 +201,8 @@ func TestUpdateHostToChannelMap(t *testing.T) {
makeChannel(withName("chan2"), withNamespace("ns2"), withStatusReady("host.name2")),
},
expectedMap: map[string]provisioners.ChannelReference{
- "host.name1": provisioners.ChannelReference{Name: "chan1", Namespace: "ns1"},
- "host.name2": provisioners.ChannelReference{Name: "chan2", Namespace: "ns2"},
+ "host.name1": {Name: "chan1", Namespace: "ns1"},
+ "host.name2": {Name: "chan2", Namespace: "ns2"},
},
},
}
diff --git a/contrib/gcppubsub/pkg/util/status.go b/contrib/gcppubsub/pkg/util/status.go
index b5680e6c7c2..3db70e1ecef 100644
--- a/contrib/gcppubsub/pkg/util/status.go
+++ b/contrib/gcppubsub/pkg/util/status.go
@@ -19,6 +19,7 @@ package util
import (
"context"
"encoding/json"
+
"github.com/knative/eventing/pkg/apis/duck/v1alpha1"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
diff --git a/contrib/kafka/pkg/dispatcher/dispatcher_test.go b/contrib/kafka/pkg/dispatcher/dispatcher_test.go
index eda73205238..3499718a95c 100644
--- a/contrib/kafka/pkg/dispatcher/dispatcher_test.go
+++ b/contrib/kafka/pkg/dispatcher/dispatcher_test.go
@@ -220,7 +220,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
},
oldHostToChanMap: map[string]provisioners.ChannelReference{},
newHostToChanMap: map[string]provisioners.ChannelReference{
- "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"},
+ "a.b.c.d": {Name: "test-channel", Namespace: "default"},
},
},
{
@@ -250,7 +250,7 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
subscribes: []string{"subscription-1", "subscription-2"},
oldHostToChanMap: map[string]provisioners.ChannelReference{},
newHostToChanMap: map[string]provisioners.ChannelReference{
- "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"},
+ "a.b.c.d": {Name: "test-channel", Namespace: "default"},
},
},
{
@@ -296,10 +296,10 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
subscribes: []string{"subscription-2", "subscription-3"},
unsubscribes: []string{"subscription-1"},
oldHostToChanMap: map[string]provisioners.ChannelReference{
- "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"},
+ "a.b.c.d": {Name: "test-channel", Namespace: "default"},
},
newHostToChanMap: map[string]provisioners.ChannelReference{
- "a.b.c.d": provisioners.ChannelReference{Name: "test-channel", Namespace: "default"},
+ "a.b.c.d": {Name: "test-channel", Namespace: "default"},
},
},
{
@@ -359,11 +359,11 @@ func TestDispatcher_UpdateConfig(t *testing.T) {
subscribes: []string{"subscription-1", "subscription-3", "subscription-4"},
unsubscribes: []string{"subscription-2"},
oldHostToChanMap: map[string]provisioners.ChannelReference{
- "a.b.c.d": provisioners.ChannelReference{Name: "test-channel-1", Namespace: "default"},
+ "a.b.c.d": {Name: "test-channel-1", Namespace: "default"},
},
newHostToChanMap: map[string]provisioners.ChannelReference{
- "a.b.c.d": provisioners.ChannelReference{Name: "test-channel-1", Namespace: "default"},
- "e.f.g.h": provisioners.ChannelReference{Name: "test-channel-2", Namespace: "default"},
+ "a.b.c.d": {Name: "test-channel-1", Namespace: "default"},
+ "e.f.g.h": {Name: "test-channel-2", Namespace: "default"},
},
},
{
diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go
index 5df19d94d44..18a3aa9057f 100644
--- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go
+++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher_test.go
@@ -228,9 +228,9 @@ func TestUpdateHostToChannelMap(t *testing.T) {
*makechannel("chan3", "ns3", "host3"),
},
expected: map[string]provisioners.ChannelReference{
- "host1": provisioners.ChannelReference{Name: "chan1", Namespace: "ns1"},
- "host2": provisioners.ChannelReference{Name: "chan2", Namespace: "ns2"},
- "host3": provisioners.ChannelReference{Name: "chan3", Namespace: "ns3"},
+ "host1": {Name: "chan1", Namespace: "ns1"},
+ "host2": {Name: "chan2", Namespace: "ns2"},
+ "host3": {Name: "chan3", Namespace: "ns3"},
},
},
}
diff --git a/pkg/broker/context.go b/pkg/broker/context.go
index a06be9244d4..fd336f5123e 100644
--- a/pkg/broker/context.go
+++ b/pkg/broker/context.go
@@ -22,7 +22,7 @@ import (
"net/url"
"strings"
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
"k8s.io/apimachinery/pkg/util/sets"
)
diff --git a/pkg/broker/receiver.go b/pkg/broker/receiver.go
index 675af6a3ca9..4904c7c3b23 100644
--- a/pkg/broker/receiver.go
+++ b/pkg/broker/receiver.go
@@ -23,7 +23,7 @@ import (
"net/url"
"time"
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/reconciler/trigger/path"
"go.uber.org/zap"
diff --git a/pkg/broker/receiver_test.go b/pkg/broker/receiver_test.go
index ed14a6c0041..e299178f969 100644
--- a/pkg/broker/receiver_test.go
+++ b/pkg/broker/receiver_test.go
@@ -26,7 +26,7 @@ import (
"strings"
"testing"
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
cehttp "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
"github.com/google/go-cmp/cmp"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
diff --git a/pkg/broker/ttl.go b/pkg/broker/ttl.go
index 8d7ccdef39f..d71f425b8a7 100644
--- a/pkg/broker/ttl.go
+++ b/pkg/broker/ttl.go
@@ -17,7 +17,7 @@
package broker
import (
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
)
const (
diff --git a/pkg/kncloudevents/good_client.go b/pkg/kncloudevents/good_client.go
index 9a000dba51a..83cd6ae6dc8 100644
--- a/pkg/kncloudevents/good_client.go
+++ b/pkg/kncloudevents/good_client.go
@@ -1,7 +1,7 @@
package kncloudevents
import (
- "github.com/cloudevents/sdk-go"
+ cloudevents "github.com/cloudevents/sdk-go"
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
)
diff --git a/pkg/logging/logging.go b/pkg/logging/logging.go
index 296c175225e..f132a90a5ff 100644
--- a/pkg/logging/logging.go
+++ b/pkg/logging/logging.go
@@ -19,6 +19,7 @@ package logging
import (
"context"
+
"github.com/knative/pkg/logging"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
diff --git a/pkg/reconciler/containersource/resources/deployment.go b/pkg/reconciler/containersource/resources/deployment.go
index 5664a54dcfb..e84854bdfe3 100644
--- a/pkg/reconciler/containersource/resources/deployment.go
+++ b/pkg/reconciler/containersource/resources/deployment.go
@@ -18,9 +18,10 @@ package resources
import (
"fmt"
- "github.com/knative/pkg/kmeta"
"strings"
+ "github.com/knative/pkg/kmeta"
+
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/pkg/reconciler/containersource/resources/deployment_test.go b/pkg/reconciler/containersource/resources/deployment_test.go
index 0ac4a82d2fe..f0f544187f2 100644
--- a/pkg/reconciler/containersource/resources/deployment_test.go
+++ b/pkg/reconciler/containersource/resources/deployment_test.go
@@ -17,9 +17,10 @@ limitations under the License.
package resources
import (
- "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
"testing"
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+
"github.com/google/go-cmp/cmp"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/pkg/reconciler/cronjobsource/resources/receive_adapter.go b/pkg/reconciler/cronjobsource/resources/receive_adapter.go
index 9f516db0cab..f3aca93f50f 100644
--- a/pkg/reconciler/cronjobsource/resources/receive_adapter.go
+++ b/pkg/reconciler/cronjobsource/resources/receive_adapter.go
@@ -18,6 +18,7 @@ package resources
import (
"fmt"
+
"github.com/knative/pkg/kmeta"
v1 "k8s.io/api/apps/v1"
diff --git a/pkg/reconciler/namespace/namespace.go b/pkg/reconciler/namespace/namespace.go
index 6840273ec67..ea6f2f9f4e1 100644
--- a/pkg/reconciler/namespace/namespace.go
+++ b/pkg/reconciler/namespace/namespace.go
@@ -19,6 +19,7 @@ package namespace
import (
"context"
"fmt"
+
"github.com/knative/eventing/pkg/reconciler/namespace/resources"
"k8s.io/client-go/tools/cache"
diff --git a/pkg/reconciler/namespace/namespace_test.go b/pkg/reconciler/namespace/namespace_test.go
index 5b76fe2157d..23537252d8b 100644
--- a/pkg/reconciler/namespace/namespace_test.go
+++ b/pkg/reconciler/namespace/namespace_test.go
@@ -17,10 +17,11 @@ limitations under the License.
package namespace
import (
+ "testing"
+
"github.com/knative/eventing/pkg/reconciler/namespace/resources"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "testing"
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
diff --git a/pkg/reconciler/reconciler.go b/pkg/reconciler/reconciler.go
index a15d1246916..89bd57a96a3 100644
--- a/pkg/reconciler/reconciler.go
+++ b/pkg/reconciler/reconciler.go
@@ -17,9 +17,10 @@ limitations under the License.
package reconciler
import (
+ "time"
+
"github.com/knative/pkg/configmap"
"github.com/knative/pkg/system"
- "time"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
diff --git a/pkg/reconciler/testing/containersource.go b/pkg/reconciler/testing/containersource.go
index bc43e909f90..cea44fdd7fa 100644
--- a/pkg/reconciler/testing/containersource.go
+++ b/pkg/reconciler/testing/containersource.go
@@ -17,9 +17,10 @@ limitations under the License.
package testing
import (
- "k8s.io/apimachinery/pkg/types"
"time"
+ "k8s.io/apimachinery/pkg/types"
+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/knative/eventing/pkg/apis/sources/v1alpha1"
diff --git a/pkg/reconciler/testing/factory.go b/pkg/reconciler/testing/factory.go
index c1f39e4f4cf..f89f7791208 100644
--- a/pkg/reconciler/testing/factory.go
+++ b/pkg/reconciler/testing/factory.go
@@ -18,9 +18,10 @@ package testing
import (
"context"
- "k8s.io/apimachinery/pkg/runtime"
"testing"
+ "k8s.io/apimachinery/pkg/runtime"
+
fakedynamicclientset "k8s.io/client-go/dynamic/fake"
fakekubeclientset "k8s.io/client-go/kubernetes/fake"
clientgotesting "k8s.io/client-go/testing"
diff --git a/pkg/reconciler/testing/namespace.go b/pkg/reconciler/testing/namespace.go
index 3f294c0e889..d15036a5d95 100644
--- a/pkg/reconciler/testing/namespace.go
+++ b/pkg/reconciler/testing/namespace.go
@@ -17,9 +17,10 @@ limitations under the License.
package testing
import (
+ "time"
+
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "time"
)
// NamespaceOption enables further configuration of a Namespace.
diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go
index 69d7cdacc8e..e09c29d9f5c 100644
--- a/test/e2e/main_test.go
+++ b/test/e2e/main_test.go
@@ -28,20 +28,20 @@ import (
// channelTestMap indicates which test cases we want to run for a given CCP.
var channelTestMap = map[string][]func(t *testing.T){
- test.InMemoryProvisioner: []func(t *testing.T){
+ test.InMemoryProvisioner: {
TestSingleBinaryEvent,
TestSingleStructuredEvent,
TestEventTransformation,
TestChannelChain,
TestDefaultBrokerWithManyTriggers,
},
- test.InMemoryChannelProvisioner: []func(t *testing.T){
+ test.InMemoryChannelProvisioner: {
TestSingleBinaryEvent,
TestSingleStructuredEvent,
TestEventTransformation,
TestChannelChain,
},
- test.GCPPubSubProvisioner: []func(t *testing.T){
+ test.GCPPubSubProvisioner: {
TestSingleBinaryEvent,
TestSingleStructuredEvent,
TestEventTransformation,
From cb2b2486baf40119021d14e86926289e33a7db26 Mon Sep 17 00:00:00 2001
From: mattmoor-sockpuppet
Date: Tue, 30 Apr 2019 10:09:33 -0700
Subject: [PATCH 69/76] Fix spelling errors (#1124)
Produced via: `github.com/client9/misspell`
---
contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go | 2 +-
contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go
index 09117122e46..12e30191ff7 100644
--- a/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go
+++ b/contrib/gcppubsub/pkg/dispatcher/receiver/receiver.go
@@ -197,7 +197,7 @@ func (r *Receiver) UpdateHostToChannelMap(ctx context.Context) error {
hostToChanMap, err := provisioners.NewHostNameToChannelRefMap(chanList)
if err != nil {
- logging.FromContext(ctx).Error("UpdateHostToChannelMap: Error occured when creating the new hostToChannel map.", zap.Error(err))
+ logging.FromContext(ctx).Error("UpdateHostToChannelMap: Error occurred when creating the new hostToChannel map.", zap.Error(err))
return err
}
diff --git a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
index 8d900564b63..15b2e35e6f0 100644
--- a/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
+++ b/contrib/natss/pkg/dispatcher/dispatcher/dispatcher.go
@@ -314,7 +314,7 @@ func (s *SubscriptionsSupervisor) setHostToChannelMap(hcMap map[string]provision
func (s *SubscriptionsSupervisor) UpdateHostToChannelMap(ctx context.Context, chanList []eventingv1alpha1.Channel) error {
hostToChanMap, err := provisioners.NewHostNameToChannelRefMap(chanList)
if err != nil {
- logging.FromContext(ctx).Info("UpdateHostToChannelMap: Error occured when creating the new hostToChannel map.", zap.Error(err))
+ logging.FromContext(ctx).Info("UpdateHostToChannelMap: Error occurred when creating the new hostToChannel map.", zap.Error(err))
return err
}
s.setHostToChannelMap(hostToChanMap)
From 3546f63cf420f9f3fb81f24e03868bd4fd18e0da Mon Sep 17 00:00:00 2001
From: Chi Zhang
Date: Tue, 30 Apr 2019 14:18:32 -0700
Subject: [PATCH 70/76] Enable e2e testing for NATSS (#1126)
* clean scripts
* update test script
* enable e2e for natss provisioner
* clean up
* fix CR issues
---
test/e2e-tests.sh | 96 ++++++++++++++++++++++++++++---------------
test/e2e/main_test.go | 6 +++
2 files changed, 69 insertions(+), 33 deletions(-)
diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh
index c41bce57f4c..ddf400590ea 100755
--- a/test/e2e-tests.sh
+++ b/test/e2e-tests.sh
@@ -30,21 +30,24 @@ source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/e2e-tests.s
readonly EVENTING_CONFIG="config/"
+# In-memory provisioner config.
readonly IN_MEMORY_CHANNEL_CONFIG="config/provisioners/in-memory-channel/in-memory-channel.yaml"
-# GCP PubSub config template.
+# GCP PubSub provisioner config template.
readonly GCP_PUBSUB_CONFIG_TEMPLATE="contrib/gcppubsub/config/gcppubsub.yaml"
-# Real GCP PubSub config, generated from the template.
+# Real GCP PubSub provisioner config, generated from the template.
readonly GCP_PUBSUB_CONFIG="$(mktemp)"
-# TODO(Fredy-Z): delete this flag after https://github.com/knative/test-infra/pull/692 is merged and updated
-E2E_PROJECT_ID=""
-
# Constants used for creating ServiceAccount for GCP PubSub provisioner setup if it's not running on Prow.
-readonly PUBSUB_SERVICE_ACCOUNT="eventing_pubsub_test"
+readonly PUBSUB_SERVICE_ACCOUNT="eventing-pubsub-test"
readonly PUBSUB_SERVICE_ACCOUNT_KEY="$(mktemp)"
readonly PUBSUB_SECRET_NAME="gcppubsub-channel-key"
+# NATS Streaming installation config.
+readonly NATSS_INSTALLATION_CONFIG="contrib/natss/config/broker/natss.yaml"
+# NATSS provisioner config.
+readonly NATSS_CONFIG="contrib/natss/config/provisioner.yaml"
+
# Setup the Knative environment for running tests.
function knative_setup() {
# Install the latest stable Knative/serving in the current cluster.
@@ -55,25 +58,41 @@ function knative_setup() {
echo "Installing Knative Eventing"
ko apply -f ${EVENTING_CONFIG} || return 1
wait_until_pods_running knative-eventing || fail_test "Knative Eventing did not come up"
+}
+
+# Teardown the Knative environment after tests finish.
+function knative_teardown() {
+ echo ">> Stopping Knative Eventing"
+ echo "Uninstalling Knative Eventing"
+ ko delete --ignore-not-found=true --now --timeout 60s -f ${EVENTING_CONFIG}
+}
+# Setup resources common to all eventing tests.
+function test_setup() {
+ # Install provisioners used by the tests.
echo "Installing In-Memory ClusterChannelProvisioner"
ko apply -f ${IN_MEMORY_CHANNEL_CONFIG} || return 1
wait_until_pods_running knative-eventing || fail_test "Failed to install the In-Memory ClusterChannelProvisioner"
- E2E_PROJECT_ID="$(gcloud config get-value project)"
echo "Installing GCPPubSub ClusterChannelProvisioner"
- gcppubsub_setup
+ gcppubsub_setup || return 1
sed "s/REPLACE_WITH_GCP_PROJECT/${E2E_PROJECT_ID}/" ${GCP_PUBSUB_CONFIG_TEMPLATE} > ${GCP_PUBSUB_CONFIG}
- ko apply -f ${GCP_PUBSUB_CONFIG}
+ ko apply -f ${GCP_PUBSUB_CONFIG} || return 1
wait_until_pods_running knative-eventing || fail_test "Failed to install the GCPPubSub ClusterChannelProvisioner"
-}
-# Teardown the Knative environment after tests finish.
-function knative_teardown() {
- echo ">> Stopping Knative Eventing"
- echo "Uninstalling Knative Eventing"
- ko delete --ignore-not-found=true --now --timeout 60s -f ${EVENTING_CONFIG}
+ echo "Installing NATSS ClusterChannelProvisioner"
+ natss_setup || return 1
+ ko apply -f ${NATSS_CONFIG} || return 1
+ wait_until_pods_running knative-eventing || fail_test "Failed to install the NATSS ClusterChannelProvisioner"
+ # Publish test images.
+ echo ">> Publishing test images"
+ $(dirname $0)/upload-test-images.sh e2e || fail_test "Error uploading test images"
+}
+
+# Tear down resources used in the eventing tests.
+function test_teardown() {
+ # Uninstall provisioners used by the tests.
echo "Uninstalling In-Memory ClusterChannelProvisioner"
ko delete --ignore-not-found=true --now --timeout 60s -f ${IN_MEMORY_CHANNEL_CONFIG}
@@ -81,14 +100,11 @@ function knative_teardown() {
gcppubsub_teardown
ko delete --ignore-not-found=true --now --timeout 60s -f ${GCP_PUBSUB_CONFIG}
- wait_until_object_does_not_exist namespaces knative-eventing
-}
+ echo "Uninstalling NATSS ClusterChannelProvisioner"
+ natss_teardown
+ ko delete --ignore-not-found=true --now --timeout 60s -f ${NATSS_CONFIG}
-# Setup resources common to all eventing tests.
-function test_setup() {
- # Publish test images.
- echo ">> Publishing test images"
- $(dirname $0)/upload-test-images.sh e2e || fail_test "Error uploading test images"
+ wait_until_object_does_not_exist namespaces knative-eventing
}
# Create resources required for GCP PubSub provisioner setup
@@ -124,19 +140,33 @@ function gcppubsub_teardown() {
kubectl -n knative-eventing delete secret ${PUBSUB_SECRET_NAME}
}
+# Create resources required for NATSS provisioner setup
+function natss_setup() {
+ echo "Installing NATS Streaming"
+ kubectl create namespace natss || return 1
+ kubectl label namespace natss istio-injection=enabled || return 1
+ kubectl apply -n natss -f ${NATSS_INSTALLATION_CONFIG} || return 1
+}
+
+# Delete resources used for NATSS provisioner setup
+function natss_teardown() {
+ echo "Uninstalling NATS Streaming"
+ kubectl delete -f ${NATSS_INSTALLATION_CONFIG}
+ kubectl delete namespace natss
+}
+
function dump_extra_cluster_state() {
# Collecting logs from all knative's eventing pods.
echo "============================================================"
- for namespace in "knative-eventing" "e2etestfn3"; do
- for pod in $(kubectl get pod -n $namespace | grep Running | awk '{print $1}' ); do
- for container in $(kubectl get pod "${pod}" -n $namespace -ojsonpath='{.spec.containers[*].name}'); do
- echo "Namespace, Pod, Container: ${namespace}, ${pod}, ${container}"
- kubectl logs -n $namespace "${pod}" -c "${container}" || true
- echo "----------------------------------------------------------"
- echo "Namespace, Pod, Container (Previous instance): ${namespace}, ${pod}, ${container}"
- kubectl logs -p -n $namespace "${pod}" -c "${container}" || true
- echo "============================================================"
- done
+ local namespace="knative-eventing"
+ for pod in $(kubectl get pod -n $namespace | grep Running | awk '{print $1}' ); do
+ for container in $(kubectl get pod "${pod}" -n $namespace -ojsonpath='{.spec.containers[*].name}'); do
+ echo "Namespace, Pod, Container: ${namespace}, ${pod}, ${container}"
+ kubectl logs -n $namespace "${pod}" -c "${container}" || true
+ echo "----------------------------------------------------------"
+ echo "Namespace, Pod, Container (Previous instance): ${namespace}, ${pod}, ${container}"
+ kubectl logs -p -n $namespace "${pod}" -c "${container}" || true
+ echo "============================================================"
done
done
}
@@ -145,6 +175,6 @@ function dump_extra_cluster_state() {
initialize $@
-go_test_e2e -timeout=20m ./test/e2e -run ^TestMain$ -runFromMain=true -clusterChannelProvisioners=in-memory-channel,in-memory || fail_test
+go_test_e2e -timeout=20m ./test/e2e -run ^TestMain$ -runFromMain=true -clusterChannelProvisioners=in-memory-channel,in-memory,natss || fail_test
success
diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go
index e09c29d9f5c..8035bbcf009 100644
--- a/test/e2e/main_test.go
+++ b/test/e2e/main_test.go
@@ -47,6 +47,12 @@ var channelTestMap = map[string][]func(t *testing.T){
TestEventTransformation,
TestChannelChain,
},
+ test.NatssProvisioner: {
+ TestSingleBinaryEvent,
+ TestSingleStructuredEvent,
+ TestEventTransformation,
+ TestChannelChain,
+ },
}
func TestMain(t *testing.T) {
From 8c96f446579f1dd30fa4fe0e5c87327c73dad53f Mon Sep 17 00:00:00 2001
From: Ignacio Cano
Date: Tue, 30 Apr 2019 15:07:32 -0700
Subject: [PATCH 71/76] Namespace Controller watching for Broker,
ServiceAccount, and RoleBinding (#1127)
* Reconciling broker, service account, and role binding in Namespace controller
* updating Gopkg.lock
---
Gopkg.lock | 2 +
cmd/controller/main.go | 7 +++
pkg/reconciler/namespace/namespace.go | 62 ++++++++++++++++++++--
pkg/reconciler/namespace/namespace_test.go | 16 ++++--
pkg/reconciler/testing/listers.go | 10 ++++
5 files changed, 89 insertions(+), 8 deletions(-)
diff --git a/Gopkg.lock b/Gopkg.lock
index 0fd66fadcbc..07057341548 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -1442,12 +1442,14 @@
"k8s.io/client-go/informers",
"k8s.io/client-go/informers/apps/v1",
"k8s.io/client-go/informers/core/v1",
+ "k8s.io/client-go/informers/rbac/v1",
"k8s.io/client-go/kubernetes",
"k8s.io/client-go/kubernetes/fake",
"k8s.io/client-go/kubernetes/scheme",
"k8s.io/client-go/kubernetes/typed/core/v1",
"k8s.io/client-go/listers/apps/v1",
"k8s.io/client-go/listers/core/v1",
+ "k8s.io/client-go/listers/rbac/v1",
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
"k8s.io/client-go/rest",
"k8s.io/client-go/testing",
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 74599e9e9f0..37afc799c65 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -89,6 +89,8 @@ func main() {
namespaceInformer := kubeInformerFactory.Core().V1().Namespaces()
configMapInformer := kubeInformerFactory.Core().V1().ConfigMaps()
deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
+ serviceAccountInformer := kubeInformerFactory.Core().V1().ServiceAccounts()
+ roleBindingInformer := kubeInformerFactory.Rbac().V1().RoleBindings()
// Build all of our controllers, with the clients constructed above.
// Add new controllers to this array.
@@ -101,6 +103,9 @@ func main() {
namespace.NewController(
opt,
namespaceInformer,
+ serviceAccountInformer,
+ roleBindingInformer,
+ brokerInformer,
),
channel.NewController(
opt,
@@ -164,6 +169,8 @@ func main() {
serviceInformer.Informer(),
namespaceInformer.Informer(),
deploymentInformer.Informer(),
+ serviceAccountInformer.Informer(),
+ roleBindingInformer.Informer(),
); err != nil {
logger.Fatalf("Failed to start informers: %v", err)
}
diff --git a/pkg/reconciler/namespace/namespace.go b/pkg/reconciler/namespace/namespace.go
index ea6f2f9f4e1..00ca22b34d1 100644
--- a/pkg/reconciler/namespace/namespace.go
+++ b/pkg/reconciler/namespace/namespace.go
@@ -19,12 +19,17 @@ package namespace
import (
"context"
"fmt"
-
"github.com/knative/eventing/pkg/reconciler/namespace/resources"
+ "github.com/knative/eventing/pkg/utils"
+ "github.com/knative/pkg/tracker"
"k8s.io/client-go/tools/cache"
+ eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ eventinglisters "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
corev1informers "k8s.io/client-go/informers/core/v1"
+ rbacv1informers "k8s.io/client-go/informers/rbac/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
+ rbacv1listers "k8s.io/client-go/listers/rbac/v1"
"github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
"github.com/knative/eventing/pkg/logging"
@@ -52,11 +57,21 @@ const (
serviceAccountRBACCreated = "BrokerFilterServiceAccountRBACCreated"
)
+var (
+ serviceAccountGVK = corev1.SchemeGroupVersion.WithKind("ServiceAccount")
+ roleBindingGVK = rbacv1.SchemeGroupVersion.WithKind("RoleBinding")
+ brokerGVK = v1alpha1.SchemeGroupVersion.WithKind("Broker")
+)
+
type Reconciler struct {
*reconciler.Base
// listers index properties about resources
- namespaceLister corev1listers.NamespaceLister
+ namespaceLister corev1listers.NamespaceLister
+ serviceAccountLister corev1listers.ServiceAccountLister
+ roleBindingLister rbacv1listers.RoleBindingLister
+ brokerLister eventinglisters.BrokerLister
+ tracker tracker.Interface
}
// Check that our Reconciler implements controller.Reconciler
@@ -67,6 +82,9 @@ var _ controller.Reconciler = (*Reconciler)(nil)
func NewController(
opt reconciler.Options,
namespaceInformer corev1informers.NamespaceInformer,
+ serviceAccountInformer corev1informers.ServiceAccountInformer,
+ roleBindingInformer rbacv1informers.RoleBindingInformer,
+ brokerInformer eventinginformers.BrokerInformer,
) *controller.Impl {
r := &Reconciler{
@@ -76,11 +94,24 @@ func NewController(
impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
// TODO: filter label selector: on InjectionEnabledLabels()
- // TODO: we need to also watch for changes to service accounts, RoleBindings, and Brokers to heal on bad changes.
r.Logger.Info("Setting up event handlers")
namespaceInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
+ // Tracker is used to notify us the namespace's resources we need to reconcile.
+ r.tracker = tracker.New(impl.EnqueueKey, opt.GetTrackerLease())
+
+ // Watch all the resources that this reconciler reconciles.
+ serviceAccountInformer.Informer().AddEventHandler(reconciler.Handler(
+ controller.EnsureTypeMeta(r.tracker.OnChanged, serviceAccountGVK),
+ ))
+ roleBindingInformer.Informer().AddEventHandler(reconciler.Handler(
+ controller.EnsureTypeMeta(r.tracker.OnChanged, roleBindingGVK),
+ ))
+ brokerInformer.Informer().AddEventHandler(reconciler.Handler(
+ controller.EnsureTypeMeta(r.tracker.OnChanged, brokerGVK),
+ ))
+
return impl
}
@@ -135,16 +166,37 @@ func (r *Reconciler) reconcile(ctx context.Context, ns *corev1.Namespace) error
logging.FromContext(ctx).Error("Unable to reconcile the Broker Filter Service Account for the namespace", zap.Error(err))
return err
}
- _, err = r.reconcileBrokerFilterRBAC(ctx, ns, sa)
+
+ // Tell tracker to reconcile this namespace whenever the Service Account changes.
+ if err = r.tracker.Track(utils.ObjectRef(sa, serviceAccountGVK), ns); err != nil {
+ logging.FromContext(ctx).Error("Unable to track changes to ServiceAccount", zap.Error(err))
+ return err
+ }
+
+ rb, err := r.reconcileBrokerFilterRBAC(ctx, ns, sa)
if err != nil {
logging.FromContext(ctx).Error("Unable to reconcile the Broker Filter Service Account RBAC for the namespace", zap.Error(err))
return err
}
- _, err = r.reconcileBroker(ctx, ns)
+
+ // Tell tracker to reconcile this namespace whenever the RoleBinding changes.
+ if err = r.tracker.Track(utils.ObjectRef(rb, roleBindingGVK), ns); err != nil {
+ logging.FromContext(ctx).Error("Unable to track changes to RoleBinding", zap.Error(err))
+ return err
+ }
+
+ b, err := r.reconcileBroker(ctx, ns)
if err != nil {
logging.FromContext(ctx).Error("Unable to reconcile Broker for the namespace", zap.Error(err))
return err
}
+
+ // Tell tracker to reconcile this namespace whenever the Broker changes.
+ if err = r.tracker.Track(utils.ObjectRef(b, brokerGVK), ns); err != nil {
+ logging.FromContext(ctx).Error("Unable to track changes to Broker", zap.Error(err))
+ return err
+ }
+
return nil
}
diff --git a/pkg/reconciler/namespace/namespace_test.go b/pkg/reconciler/namespace/namespace_test.go
index 23537252d8b..5af66e14fc7 100644
--- a/pkg/reconciler/namespace/namespace_test.go
+++ b/pkg/reconciler/namespace/namespace_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package namespace
import (
+ "github.com/knative/pkg/tracker"
"testing"
"github.com/knative/eventing/pkg/reconciler/namespace/resources"
@@ -25,6 +26,7 @@ import (
eventingv1alpha1 "github.com/knative/eventing/pkg/apis/eventing/v1alpha1"
fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ eventinginformers "github.com/knative/eventing/pkg/client/informers/externalversions"
"github.com/knative/eventing/pkg/reconciler"
. "github.com/knative/eventing/pkg/reconciler/testing"
"github.com/knative/pkg/controller"
@@ -70,14 +72,18 @@ func TestNew(t *testing.T) {
kubeClient := fakekubeclientset.NewSimpleClientset()
eventingClient := fakeclientset.NewSimpleClientset()
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
+ eventingInformer := eventinginformers.NewSharedInformerFactory(eventingClient, 0)
namespaceInformer := kubeInformer.Core().V1().Namespaces()
+ serviceAccountInformer := kubeInformer.Core().V1().ServiceAccounts()
+ roleBindingInformer := kubeInformer.Rbac().V1().RoleBindings()
+ brokerInformer := eventingInformer.Eventing().V1alpha1().Brokers()
c := NewController(reconciler.Options{
KubeClientSet: kubeClient,
EventingClientSet: eventingClient,
Logger: logtesting.TestLogger(t),
- }, namespaceInformer)
+ }, namespaceInformer, serviceAccountInformer, roleBindingInformer, brokerInformer)
if c == nil {
t.Fatal("Expected NewController to return a non-nil value")
@@ -238,8 +244,12 @@ func TestAllCases(t *testing.T) {
defer logtesting.ClearAll()
table.Test(t, MakeFactory(func(listers *Listers, opt reconciler.Options) controller.Reconciler {
return &Reconciler{
- Base: reconciler.NewBase(opt, controllerAgentName),
- namespaceLister: listers.GetNamespaceLister(),
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ namespaceLister: listers.GetNamespaceLister(),
+ brokerLister: listers.GetBrokerLister(),
+ serviceAccountLister: listers.GetServiceAccountLister(),
+ roleBindingLister: listers.GetRoleBindingLister(),
+ tracker: tracker.New(func(string) {}, 0),
}
}))
}
diff --git a/pkg/reconciler/testing/listers.go b/pkg/reconciler/testing/listers.go
index d84b97755ba..1f41a286e5f 100644
--- a/pkg/reconciler/testing/listers.go
+++ b/pkg/reconciler/testing/listers.go
@@ -28,12 +28,14 @@ import (
"github.com/knative/pkg/reconciler/testing"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
fakekubeclientset "k8s.io/client-go/kubernetes/fake"
appsv1listers "k8s.io/client-go/listers/apps/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
+ rbacv1listers "k8s.io/client-go/listers/rbac/v1"
"k8s.io/client-go/tools/cache"
)
@@ -145,6 +147,14 @@ func (l *Listers) GetNamespaceLister() corev1listers.NamespaceLister {
return corev1listers.NewNamespaceLister(l.indexerFor(&corev1.Namespace{}))
}
+func (l *Listers) GetServiceAccountLister() corev1listers.ServiceAccountLister {
+ return corev1listers.NewServiceAccountLister(l.indexerFor(&corev1.ServiceAccount{}))
+}
+
+func (l *Listers) GetRoleBindingLister() rbacv1listers.RoleBindingLister {
+ return rbacv1listers.NewRoleBindingLister(l.indexerFor(&rbacv1.RoleBinding{}))
+}
+
func (l *Listers) GetEndpointsLister() corev1listers.EndpointsLister {
return corev1listers.NewEndpointsLister(l.indexerFor(&corev1.Endpoints{}))
}
From c7f1582c23c855eaea1af19024ac55b2a4e1600e Mon Sep 17 00:00:00 2001
From: Lionel Villard
Date: Wed, 1 May 2019 10:06:33 -0400
Subject: [PATCH 72/76] move apiserver source to eventing (#1108)
---
Gopkg.lock | 10 +
cmd/apiserver_receive_adapter/main.go | 136 ++++++++
cmd/sources-controller/main.go | 10 +-
config/200-controller-clusterrole.yaml | 3 +
config/300-apiserversource.yaml | 88 +++++
config/500-controller.yaml | 2 +
config/500-sources-controller.yaml | 2 +
pkg/adapter/apiserver/adapter.go | 129 +++++++
pkg/adapter/apiserver/adapter_test.go | 203 +++++++++++
.../sources/v1alpha1/apiserver_lifecycle.go | 66 ++++
.../v1alpha1/apiserver_lifecycle_test.go | 160 +++++++++
pkg/apis/sources/v1alpha1/apiserver_types.go | 101 ++++++
pkg/apis/sources/v1alpha1/register.go | 2 +
pkg/apis/sources/v1alpha1/register_test.go | 2 +
.../sources/v1alpha1/zz_generated.deepcopy.go | 120 +++++++
.../typed/sources/v1alpha1/apiserversource.go | 174 ++++++++++
.../v1alpha1/fake/fake_apiserversource.go | 140 ++++++++
.../v1alpha1/fake/fake_sources_client.go | 4 +
.../sources/v1alpha1/generated_expansion.go | 2 +
.../typed/sources/v1alpha1/sources_client.go | 5 +
.../informers/externalversions/generic.go | 2 +
.../sources/v1alpha1/apiserversource.go | 89 +++++
.../sources/v1alpha1/interface.go | 7 +
.../sources/v1alpha1/apiserversource.go | 94 ++++++
.../sources/v1alpha1/expansion_generated.go | 8 +
.../apiserversource/apiserversource.go | 276 +++++++++++++++
.../apiserversource/apiserversource_test.go | 202 +++++++++++
pkg/reconciler/apiserversource/doc.go | 18 +
.../apiserversource/resources/labels.go | 30 ++
.../resources/receive_adapter.go | 117 +++++++
.../resources/receive_adapter_test.go | 127 +++++++
pkg/reconciler/testing/apiserversource.go | 73 ++++
pkg/reconciler/testing/listers.go | 4 +
third_party/VENDOR-LICENSE | 25 ++
.../kelseyhightower/envconfig/LICENSE | 19 ++
.../kelseyhightower/envconfig/doc.go | 8 +
.../kelseyhightower/envconfig/env_os.go | 7 +
.../kelseyhightower/envconfig/env_syscall.go | 7 +
.../kelseyhightower/envconfig/envconfig.go | 319 ++++++++++++++++++
.../kelseyhightower/envconfig/usage.go | 158 +++++++++
40 files changed, 2948 insertions(+), 1 deletion(-)
create mode 100644 cmd/apiserver_receive_adapter/main.go
create mode 100644 config/300-apiserversource.yaml
create mode 100644 pkg/adapter/apiserver/adapter.go
create mode 100644 pkg/adapter/apiserver/adapter_test.go
create mode 100644 pkg/apis/sources/v1alpha1/apiserver_lifecycle.go
create mode 100644 pkg/apis/sources/v1alpha1/apiserver_lifecycle_test.go
create mode 100644 pkg/apis/sources/v1alpha1/apiserver_types.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/apiserversource.go
create mode 100644 pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_apiserversource.go
create mode 100644 pkg/client/informers/externalversions/sources/v1alpha1/apiserversource.go
create mode 100644 pkg/client/listers/sources/v1alpha1/apiserversource.go
create mode 100644 pkg/reconciler/apiserversource/apiserversource.go
create mode 100644 pkg/reconciler/apiserversource/apiserversource_test.go
create mode 100644 pkg/reconciler/apiserversource/doc.go
create mode 100644 pkg/reconciler/apiserversource/resources/labels.go
create mode 100644 pkg/reconciler/apiserversource/resources/receive_adapter.go
create mode 100644 pkg/reconciler/apiserversource/resources/receive_adapter_test.go
create mode 100644 pkg/reconciler/testing/apiserversource.go
create mode 100644 vendor/github.com/kelseyhightower/envconfig/LICENSE
create mode 100644 vendor/github.com/kelseyhightower/envconfig/doc.go
create mode 100644 vendor/github.com/kelseyhightower/envconfig/env_os.go
create mode 100644 vendor/github.com/kelseyhightower/envconfig/env_syscall.go
create mode 100644 vendor/github.com/kelseyhightower/envconfig/envconfig.go
create mode 100644 vendor/github.com/kelseyhightower/envconfig/usage.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 07057341548..5fd48dab9cf 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -424,6 +424,14 @@
pruneopts = "NUT"
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
+[[projects]]
+ digest = "1:b8870bf2606dca65dc382f4cb8b7a434f17ff36a915451bda12788e9620be368"
+ name = "github.com/kelseyhightower/envconfig"
+ packages = ["."]
+ pruneopts = "NUT"
+ revision = "f611eb38b3875cc3bd991ca91c51d06446afa14c"
+ version = "v1.3.0"
+
[[projects]]
digest = "1:57d04562d05dd4500ff1e7e47f2e62b9be0531388377a3b691a012ce70b210d5"
name = "github.com/knative/pkg"
@@ -1369,12 +1377,14 @@
"github.com/cloudevents/sdk-go",
"github.com/cloudevents/sdk-go/pkg/cloudevents",
"github.com/cloudevents/sdk-go/pkg/cloudevents/client",
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json",
"github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http",
"github.com/cloudevents/sdk-go/pkg/cloudevents/types",
"github.com/fsnotify/fsnotify",
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/google/uuid",
+ "github.com/kelseyhightower/envconfig",
"github.com/knative/pkg/apis",
"github.com/knative/pkg/apis/duck",
"github.com/knative/pkg/apis/duck/v1alpha1",
diff --git a/cmd/apiserver_receive_adapter/main.go b/cmd/apiserver_receive_adapter/main.go
new file mode 100644
index 00000000000..e7bc7004eed
--- /dev/null
+++ b/cmd/apiserver_receive_adapter/main.go
@@ -0,0 +1,136 @@
+/*
+Copyright 2019 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "flag"
+ "time"
+
+ "k8s.io/client-go/rest"
+
+ // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
+ _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+
+ "github.com/knative/eventing/pkg/reconciler"
+
+ "github.com/kelseyhightower/envconfig"
+ "github.com/knative/eventing/pkg/adapter/apiserver"
+ "github.com/knative/eventing/pkg/kncloudevents"
+ "github.com/knative/pkg/apis/duck"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ kncontroller "github.com/knative/pkg/controller"
+ "github.com/knative/pkg/signals"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/dynamic"
+ "k8s.io/client-go/tools/clientcmd"
+)
+
+var (
+ masterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
+ kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
+)
+
+type envConfig struct {
+ SinkURI string `split_words:"true" required:"true"`
+ ApiVersion []string `split_words:"true" required:"true"`
+ Kind []string `required:"true"`
+ Controller []bool `required:"true"`
+}
+
+func main() {
+ flag.Parse()
+
+ logCfg := zap.NewProductionConfig()
+ logCfg.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ dlogger, err := logCfg.Build()
+ logger := dlogger.Sugar()
+
+ var env envConfig
+ err = envconfig.Process("", &env)
+ if err != nil {
+ logger.Fatalw("Error processing environment", zap.Error(err))
+ }
+
+ // set up signals so we handle the first shutdown signal gracefully
+ stopCh := signals.SetupSignalHandler()
+
+ cfg, err := clientcmd.BuildConfigFromFlags(*masterURL, *kubeconfig)
+ if err != nil {
+ logger.Fatalw("Error building kubeconfig", zap.Error(err))
+ }
+
+ logger = logger.With(zap.String("controller/apiserver", "adapter"))
+ logger.Info("Starting the controller")
+
+ numControllers := len(env.ApiVersion)
+ cfg.QPS = float32(numControllers) * rest.DefaultQPS
+ cfg.Burst = numControllers * rest.DefaultBurst
+ opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
+
+ client, err := dynamic.NewForConfig(cfg)
+ if err != nil {
+ logger.Fatalw("Error building dynamic client", zap.Error(err))
+ }
+
+ eventsClient, err := kncloudevents.NewDefaultClient(env.SinkURI)
+ if err != nil {
+ logger.Fatalw("Error building cloud event client", zap.Error(err))
+ }
+
+ controllers := []*kncontroller.Impl{}
+
+ // Create one controller per resource.
+ for i, apiVersion := range env.ApiVersion {
+ kind := env.Kind[i]
+ controlled := env.Controller[i]
+
+ obj := &duckv1alpha1.AddressableType{}
+
+ factory := duck.TypedInformerFactory{
+ Client: client,
+ ResyncPeriod: time.Duration(10), // TODO
+ StopChannel: stopCh,
+ Type: obj,
+ }
+
+ gv, err := schema.ParseGroupVersion(apiVersion)
+ if err != nil {
+ logger.Fatalw("Error parsing APIVersion", zap.Error(err))
+ }
+
+ gvk := schema.GroupVersionKind{Kind: kind, Group: gv.Group, Version: gv.Version}
+
+ // This is really bad.
+ gvr, _ := meta.UnsafeGuessKindToResource(gvk)
+
+ // Get and start the informer for gvr
+ logger.Infof("Starting informer for %v", gvk)
+ informer, lister, err := factory.Get(gvr)
+ if err != nil {
+ logger.Fatalw("Error starting informer", zap.Error(err))
+ }
+ controllers = append(controllers, apiserver.NewController(opt, informer, lister, eventsClient, controlled))
+ }
+
+ // Start all of the controllers.
+ logger.Info("Starting controllers.")
+ go kncontroller.StartAll(stopCh, controllers...)
+ <-stopCh
+}
diff --git a/cmd/sources-controller/main.go b/cmd/sources-controller/main.go
index 0868ebe9749..ebe79143ae9 100644
--- a/cmd/sources-controller/main.go
+++ b/cmd/sources-controller/main.go
@@ -33,6 +33,7 @@ import (
"github.com/knative/eventing/pkg/logconfig"
"github.com/knative/eventing/pkg/logging"
"github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/apiserversource"
"github.com/knative/eventing/pkg/reconciler/cronjobsource"
"github.com/knative/pkg/configmap"
kncontroller "github.com/knative/pkg/controller"
@@ -65,7 +66,7 @@ func main() {
logger = logger.With(zap.String("controller/impl", "pkg"))
logger.Info("Starting the controller")
- const numControllers = 2
+ const numControllers = 3
cfg.QPS = numControllers * rest.DefaultQPS
cfg.Burst = numControllers * rest.DefaultBurst
opt := reconciler.NewOptionsOrDie(cfg, logger, stopCh)
@@ -76,6 +77,7 @@ func main() {
// Eventing
cronJobSourceInformer := eventingInformerFactory.Sources().V1alpha1().CronJobSources()
containerSourceInformer := eventingInformerFactory.Sources().V1alpha1().ContainerSources()
+ apiserverSourceInformer := eventingInformerFactory.Sources().V1alpha1().ApiServerSources()
// Kube
deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
@@ -94,6 +96,11 @@ func main() {
containerSourceInformer,
deploymentInformer,
),
+ apiserversource.NewController(
+ opt,
+ apiserverSourceInformer,
+ deploymentInformer,
+ ),
}
// This line asserts at compile time that the length of controllers is equal to numControllers.
// It is based on https://go101.org/article/tips.html#assert-at-compile-time, which notes that
@@ -117,6 +124,7 @@ func main() {
// Eventing
cronJobSourceInformer.Informer(),
containerSourceInformer.Informer(),
+ apiserverSourceInformer.Informer(),
// Kube
deploymentInformer.Informer(),
); err != nil {
diff --git a/config/200-controller-clusterrole.yaml b/config/200-controller-clusterrole.yaml
index d6772c6d6cd..37a4178fcd6 100644
--- a/config/200-controller-clusterrole.yaml
+++ b/config/200-controller-clusterrole.yaml
@@ -77,4 +77,7 @@ rules:
- "containersources"
- "containersources/status"
- "containersources/finalizers"
+ - "apiserversources"
+ - "apiserversources/status"
+ - "apiserversources/finalizers"
verbs: *everything
diff --git a/config/300-apiserversource.yaml b/config/300-apiserversource.yaml
new file mode 100644
index 00000000000..dfdc580e0ad
--- /dev/null
+++ b/config/300-apiserversource.yaml
@@ -0,0 +1,88 @@
+# Copyright 2019 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ creationTimestamp: null
+ labels:
+ eventing.knative.dev/source: "true"
+ knative.dev/crd-install: "true"
+ name: apiserversources.sources.eventing.knative.dev
+spec:
+ group: sources.eventing.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - eventing
+ - sources
+ kind: ApiServerSource
+ plural: apiserversources
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ serviceAccountName:
+ type: string
+ sink:
+ type: object
+ resources:
+ items:
+ properties:
+ apiVersion:
+ type: string
+ kind:
+ type: string
+ type: array
+ required:
+ - resources
+ - sink
+ type: object
+ status:
+ properties:
+ conditions:
+ items:
+ properties:
+ lastTransitionTime:
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ severity:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - type
+ - status
+ type: object
+ type: array
+ sinkUri:
+ type: string
+ type: object
+ version: v1alpha1
diff --git a/config/500-controller.yaml b/config/500-controller.yaml
index 2cb7d815c06..092945f9589 100644
--- a/config/500-controller.yaml
+++ b/config/500-controller.yaml
@@ -52,6 +52,8 @@ spec:
value: eventing-broker-filter
- name: CRONJOB_RA_IMAGE
value: github.com/knative/eventing/cmd/cronjob_receive_adapter
+ - name: APISERVER_RA_IMAGE
+ value: github.com/knative/eventing/cmd/apiserver_receiver_adapter
ports:
- containerPort: 9090
name: metrics
diff --git a/config/500-sources-controller.yaml b/config/500-sources-controller.yaml
index 6f77f264418..f4998c16207 100644
--- a/config/500-sources-controller.yaml
+++ b/config/500-sources-controller.yaml
@@ -62,6 +62,8 @@ spec:
# This is the Go import path for cron job receive adapter binary
# that is containerized and substituted here.
value: github.com/knative/eventing/cmd/cronjob_receive_adapter
+ - name: APISERVER_RA_IMAGE
+ value: github.com/knative/eventing/cmd/apiserver_receive_adapter
volumes:
- name: config-logging
configMap:
diff --git a/pkg/adapter/apiserver/adapter.go b/pkg/adapter/apiserver/adapter.go
new file mode 100644
index 00000000000..db14f450645
--- /dev/null
+++ b/pkg/adapter/apiserver/adapter.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiserver
+
+import (
+ "context"
+
+ "github.com/cloudevents/sdk-go/pkg/cloudevents"
+ eventsclient "github.com/cloudevents/sdk-go/pkg/cloudevents/client"
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/types"
+ "github.com/knative/eventing/pkg/reconciler"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ "github.com/knative/pkg/controller"
+ "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/client-go/tools/cache"
+)
+
+const (
+ // ReconcilerName is the name of the reconciler
+ ReconcilerName = "ApiServerSource"
+
+ controllerAgentName = "apiserver-source-adapter-controller"
+ updateEventType = "dev.knative.apiserver.object.update"
+ deleteEventType = "dev.knative.apiserver.object.delete"
+)
+
+// NewController initializes the controller and is called by the generated code
+// Registers event handlers to enqueue events
+func NewController(
+ opt reconciler.Options,
+ informer cache.SharedInformer,
+ lister cache.GenericLister,
+ eventsclient eventsclient.Client,
+ controlled bool) *controller.Impl {
+
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ lister: lister,
+ eventsClient: eventsclient,
+ }
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ r.Logger.Info("Setting up event handlers")
+
+ if controlled {
+ informer.AddEventHandler(reconciler.Handler(impl.EnqueueControllerOf))
+ } else {
+ informer.AddEventHandler(reconciler.Handler(impl.Enqueue))
+ }
+ return impl
+}
+
+// Reconciler reconciles an ApiServerSource object
+type Reconciler struct {
+ *reconciler.Base
+
+ eventsClient eventsclient.Client
+ lister cache.GenericLister
+}
+
+// Reconcile sends a cloud event corresponding to the given key
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ // Convert the namespace/name string into a distinct namespace and name
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
+ }
+
+ // Get the resource with this namespace/name
+ original, err := r.lister.ByNamespace(namespace).Get(name)
+ if apierrs.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ r.Logger.Error("resource key in work queue no longer exists", zap.Any("key", key))
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ object := original.(*duckv1alpha1.AddressableType)
+
+ eventType := updateEventType
+ timestamp := object.GetCreationTimestamp()
+ if object.GetDeletionTimestamp() != nil {
+ eventType = deleteEventType
+ timestamp = *object.GetDeletionTimestamp()
+ }
+
+ objectRef := corev1.ObjectReference{
+ APIVersion: object.APIVersion,
+ Kind: object.Kind,
+ Name: object.GetName(),
+ Namespace: object.GetNamespace(),
+ }
+
+ event := cloudevents.Event{
+ Context: cloudevents.EventContextV02{
+ ID: string(object.GetUID()),
+ Type: eventType,
+ Source: *types.ParseURLRef(object.GetSelfLink()),
+ Time: &types.Timestamp{Time: timestamp.Time},
+ }.AsV02(),
+ Data: objectRef,
+ }
+
+ if _, err := r.eventsClient.Send(ctx, event); err != nil {
+ r.Logger.Error("failed to send cloudevent (retrying)", err)
+
+ return err
+ }
+
+ return nil
+}
diff --git a/pkg/adapter/apiserver/adapter_test.go b/pkg/adapter/apiserver/adapter_test.go
new file mode 100644
index 00000000000..4cddd8d2f28
--- /dev/null
+++ b/pkg/adapter/apiserver/adapter_test.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiserver
+
+import (
+ "context"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ gotesting "testing"
+ "time"
+
+ "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json"
+ "github.com/google/go-cmp/cmp"
+ "github.com/knative/eventing/pkg/kncloudevents"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/pkg/apis/duck"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ logtesting "github.com/knative/pkg/logging/testing"
+ "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ fakedynamicclientset "k8s.io/client-go/dynamic/fake"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+)
+
+const (
+ sourceName = "test-apiserver-adapter"
+ sourceUID = "1234-5678-90"
+ testNS = "testnamespace"
+)
+
+type testCase struct {
+ // Name is a descriptive name for this test suitable as a first argument to t.Run()
+ Name string
+
+ // InitialState is the list of objects that already exists when reconciliation
+ // starts.
+ InitialState []runtime.Object
+
+ // Key is the parameter to reconciliation.
+ // This has the form "namespace/name".
+ Key string
+
+ // Where to send events
+ sink func(http.ResponseWriter, *http.Request)
+
+ // Expected event data
+ data interface{}
+}
+
+func TestReconcile(t *gotesting.T) {
+ table := []testCase{
+ testCase{
+ Name: "Receive Pod creation event",
+ InitialState: []runtime.Object{
+ getPod(),
+ },
+ Key: testNS + "/" + sourceName,
+
+ sink: sinkAccepted,
+ data: decode(t, encode(t, getPodRef())),
+ },
+ }
+
+ for _, tc := range table {
+ t.Run(tc.Name, func(t *gotesting.T) {
+ // Create fake sink server
+ h := &fakeHandler{
+ handler: tc.sink,
+ }
+
+ sinkServer := httptest.NewServer(h)
+ defer sinkServer.Close()
+
+ // Bind cloud event client
+ ceClient, err := kncloudevents.NewDefaultClient(sinkServer.URL)
+ if err != nil {
+ t.Errorf("cannot create cloud event client: %v", zap.Error(err))
+ }
+
+ // Create fake dynamic client
+ dynamicScheme := runtime.NewScheme()
+ client := fakedynamicclientset.NewSimpleDynamicClient(dynamicScheme, tc.InitialState...)
+
+ stopCh := make(chan struct{})
+ defer close(stopCh)
+
+ tif := &duck.TypedInformerFactory{
+ Client: client,
+ Type: &duckv1alpha1.AddressableType{},
+ ResyncPeriod: 1 * time.Second,
+ StopChannel: stopCh,
+ }
+
+ _, lister, err := tif.Get(schema.GroupVersionResource{Group: "", Resource: "pods", Version: "v1"})
+ if err != nil {
+ t.Fatalf("Get() = %v", err)
+ }
+
+ opt := reconciler.Options{
+ KubeClientSet: fakekubeclientset.NewSimpleClientset(),
+ Logger: logtesting.TestLogger(t),
+ }
+
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ eventsClient: ceClient,
+ lister: lister,
+ }
+ ctx := context.Background()
+
+ err = r.Reconcile(ctx, tc.Key)
+ if err != nil {
+ t.Errorf("Expected no error")
+ }
+
+ if diff := cmp.Diff(tc.data, decode(t, h.body)); diff != "" {
+ t.Errorf("incorrect event (-want, +got): %v", diff)
+ }
+ })
+ }
+
+}
+
+func getPod() runtime.Object {
+ return &unstructured.Unstructured{
+ Object: map[string]interface{}{
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": map[string]interface{}{
+ "namespace": testNS,
+ "name": sourceName,
+ "selfLink": "/apis/v1/namespaces/" + testNS + "/pod/" + sourceName,
+ },
+ },
+ }
+}
+
+func getPodRef() corev1.ObjectReference {
+ return corev1.ObjectReference{
+ APIVersion: "v1",
+ Kind: "Pod",
+ Name: sourceName,
+ Namespace: testNS,
+ }
+}
+
+type fakeHandler struct {
+ body []byte
+ header http.Header
+
+ handler func(http.ResponseWriter, *http.Request)
+}
+
+func (h *fakeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ h.header = r.Header
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "can not read body", http.StatusBadRequest)
+ return
+ }
+ h.body = body
+ defer r.Body.Close()
+ h.handler(w, r)
+}
+
+func sinkAccepted(writer http.ResponseWriter, req *http.Request) {
+ writer.WriteHeader(http.StatusOK)
+}
+
+func encode(t *gotesting.T, data interface{}) string {
+ b, err := json.Encode(data)
+ if err != nil {
+ t.Fatalf("failed to encode data: %v", err)
+ }
+ return string(b)
+}
+
+func decode(t *gotesting.T, data interface{}) interface{} {
+ var out interface{}
+ err := json.Decode(data, &out)
+ if err != nil {
+ t.Fatalf("failed to decode data: %v", err)
+ }
+ return out
+}
diff --git a/pkg/apis/sources/v1alpha1/apiserver_lifecycle.go b/pkg/apis/sources/v1alpha1/apiserver_lifecycle.go
new file mode 100644
index 00000000000..b5c96f60023
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/apiserver_lifecycle.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+)
+
+// GetConditions returns Conditions
+func (s *ApiServerSourceStatus) GetConditions() duckv1alpha1.Conditions {
+ return s.Conditions
+}
+
+// SetConditions sets Conditions
+func (s *ApiServerSourceStatus) SetConditions(conditions duckv1alpha1.Conditions) {
+ s.Conditions = conditions
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (s *ApiServerSourceStatus) GetCondition(t duckv1alpha1.ConditionType) *duckv1alpha1.Condition {
+ return apiserverCondSet.Manage(s).GetCondition(t)
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *ApiServerSourceStatus) InitializeConditions() {
+ apiserverCondSet.Manage(s).InitializeConditions()
+}
+
+// MarkSink sets the condition that the source has a sink configured.
+func (s *ApiServerSourceStatus) MarkSink(uri string) {
+ s.SinkURI = uri
+ if len(uri) > 0 {
+ apiserverCondSet.Manage(s).MarkTrue(ApiServerConditionSinkProvided)
+ } else {
+ apiserverCondSet.Manage(s).MarkUnknown(ApiServerConditionSinkProvided, "SinkEmpty", "Sink has resolved to empty.%s", "")
+ }
+}
+
+// MarkNoSink sets the condition that the source does not have a sink configured.
+func (s *ApiServerSourceStatus) MarkNoSink(reason, messageFormat string, messageA ...interface{}) {
+ apiserverCondSet.Manage(s).MarkFalse(ApiServerConditionSinkProvided, reason, messageFormat, messageA...)
+}
+
+// MarkDeployed sets the condition that the source has been deployed.
+func (s *ApiServerSourceStatus) MarkDeployed() {
+ apiserverCondSet.Manage(s).MarkTrue(ApiServerConditionDeployed)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (s *ApiServerSourceStatus) IsReady() bool {
+ return apiserverCondSet.Manage(s).IsHappy()
+}
diff --git a/pkg/apis/sources/v1alpha1/apiserver_lifecycle_test.go b/pkg/apis/sources/v1alpha1/apiserver_lifecycle_test.go
new file mode 100644
index 00000000000..f6df49e1da2
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/apiserver_lifecycle_test.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+)
+
+func TestApiServerSourceStatusIsReady(t *testing.T) {
+ tests := []struct {
+ name string
+ s *ApiServerSourceStatus
+ want bool
+ }{{
+ name: "uninitialized",
+ s: &ApiServerSourceStatus{},
+ want: false,
+ }, {
+ name: "initialized",
+ s: func() *ApiServerSourceStatus {
+ s := &ApiServerSourceStatus{}
+ s.InitializeConditions()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark deployed",
+ s: func() *ApiServerSourceStatus {
+ s := &ApiServerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkDeployed()
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink",
+ s: func() *ApiServerSourceStatus {
+ s := &ApiServerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ want: false,
+ }, {
+ name: "mark sink and deployed",
+ s: func() *ApiServerSourceStatus {
+ s := &ApiServerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ return s
+ }(),
+ want: true,
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.s.IsReady()
+ if diff := cmp.Diff(test.want, got); diff != "" {
+ t.Errorf("%s: unexpected condition (-want, +got) = %v", test.name, diff)
+ }
+ })
+ }
+}
+
+func TestApiServerSourceStatusGetCondition(t *testing.T) {
+ tests := []struct {
+ name string
+ s *ApiServerSourceStatus
+ condQuery duckv1alpha1.ConditionType
+ want *duckv1alpha1.Condition
+ }{{
+ name: "uninitialized",
+ s: &ApiServerSourceStatus{},
+ condQuery: ApiServerConditionReady,
+ want: nil,
+ }, {
+ name: "initialized",
+ s: func() *ApiServerSourceStatus {
+ s := &ApiServerSourceStatus{}
+ s.InitializeConditions()
+ return s
+ }(),
+ condQuery: ApiServerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ApiServerConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark deployed",
+ s: func() *ApiServerSourceStatus {
+ s := &ApiServerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: ApiServerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ApiServerConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark sink",
+ s: func() *ApiServerSourceStatus {
+ s := &ApiServerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ return s
+ }(),
+ condQuery: ApiServerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ApiServerConditionReady,
+ Status: corev1.ConditionUnknown,
+ },
+ }, {
+ name: "mark sink and deployed",
+ s: func() *ApiServerSourceStatus {
+ s := &ApiServerSourceStatus{}
+ s.InitializeConditions()
+ s.MarkSink("uri://example")
+ s.MarkDeployed()
+ return s
+ }(),
+ condQuery: ApiServerConditionReady,
+ want: &duckv1alpha1.Condition{
+ Type: ApiServerConditionReady,
+ Status: corev1.ConditionTrue,
+ },
+ }}
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ got := test.s.GetCondition(test.condQuery)
+ ignoreTime := cmpopts.IgnoreFields(duckv1alpha1.Condition{},
+ "LastTransitionTime", "Severity")
+ if diff := cmp.Diff(test.want, got, ignoreTime); diff != "" {
+ t.Errorf("unexpected condition (-want, +got) = %v", diff)
+ }
+ })
+ }
+}
diff --git a/pkg/apis/sources/v1alpha1/apiserver_types.go b/pkg/apis/sources/v1alpha1/apiserver_types.go
new file mode 100644
index 00000000000..d3ae3b89947
--- /dev/null
+++ b/pkg/apis/sources/v1alpha1/apiserver_types.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ApiServerSource is the Schema for the apiserversources API
+// +k8s:openapi-gen=true
+type ApiServerSource struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ApiServerSourceSpec `json:"spec,omitempty"`
+ Status ApiServerSourceStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ApiServerSourceList contains a list of ApiServerSource
+type ApiServerSourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ApiServerSource `json:"items"`
+}
+
+const (
+ // ApiServerConditionReady has status True when the ApiServerSource is ready to send events.
+ ApiServerConditionReady = duckv1alpha1.ConditionReady
+
+ // ApiServerConditionSinkProvided has status True when the ApiServerSource has been configured with a sink target.
+ ApiServerConditionSinkProvided duckv1alpha1.ConditionType = "SinkProvided"
+
+ // ApiServerConditionDeployed has status True when the ApiServerSource has had it's deployment created.
+ ApiServerConditionDeployed duckv1alpha1.ConditionType = "Deployed"
+)
+
+var apiserverCondSet = duckv1alpha1.NewLivingConditionSet(
+ ApiServerConditionSinkProvided,
+ ApiServerConditionDeployed,
+)
+
+// ApiServerSourceSpec defines the desired state of ApiServerSource
+type ApiServerSourceSpec struct {
+ // Resources is the list of resources to watch
+ Resources []ApiServerResource `json:"resources"`
+
+ // ServiceAccountName is the name of the ServiceAccount to use to run this
+ // source.
+ // +optional
+ ServiceAccountName string `json:"serviceAccountName,omitempty"`
+
+ // Sink is a reference to an object that will resolve to a domain name to use as the sink.
+ // +optional
+ Sink *corev1.ObjectReference `json:"sink,omitempty"`
+}
+
+// ApiServerSourceStatus defines the observed state of ApiServerSource
+type ApiServerSourceStatus struct {
+ // inherits duck/v1alpha1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1alpha1.Status `json:",inline"`
+
+ // SinkURI is the current active sink URI that has been configured for the ApiServerSource.
+ // +optional
+ SinkURI string `json:"sinkUri,omitempty"`
+}
+
+// ApiServerResource defines the resource to watch
+type ApiServerResource struct {
+ // API version of the resource to watch.
+ APIVersion string `json:"apiVersion"`
+
+ // Kind of the resource to watch.
+ // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
+ Kind string `json:"kind"`
+
+ // If true, send an event referencing the object controlling the resource
+ Controller bool `json:"controller"`
+}
diff --git a/pkg/apis/sources/v1alpha1/register.go b/pkg/apis/sources/v1alpha1/register.go
index 83556830ff3..cf1656669c7 100644
--- a/pkg/apis/sources/v1alpha1/register.go
+++ b/pkg/apis/sources/v1alpha1/register.go
@@ -49,6 +49,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&CronJobSourceList{},
&ContainerSource{},
&ContainerSourceList{},
+ &ApiServerSource{},
+ &ApiServerSourceList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
diff --git a/pkg/apis/sources/v1alpha1/register_test.go b/pkg/apis/sources/v1alpha1/register_test.go
index c111fda185c..19055cf4a66 100644
--- a/pkg/apis/sources/v1alpha1/register_test.go
+++ b/pkg/apis/sources/v1alpha1/register_test.go
@@ -64,6 +64,8 @@ func TestKnownTypes(t *testing.T) {
"CronJobSourceList",
"ContainerSource",
"ContainerSourceList",
+ "ApiServerSource",
+ "ApiServerSourceList",
} {
if _, ok := types[name]; !ok {
t.Errorf("Did not find %q as registered type", name)
diff --git a/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
index 29ee5bbe035..134c8cad298 100644
--- a/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/sources/v1alpha1/zz_generated.deepcopy.go
@@ -25,6 +25,126 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApiServerResource) DeepCopyInto(out *ApiServerResource) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiServerResource.
+func (in *ApiServerResource) DeepCopy() *ApiServerResource {
+ if in == nil {
+ return nil
+ }
+ out := new(ApiServerResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApiServerSource) DeepCopyInto(out *ApiServerSource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiServerSource.
+func (in *ApiServerSource) DeepCopy() *ApiServerSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ApiServerSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ApiServerSource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApiServerSourceList) DeepCopyInto(out *ApiServerSourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ out.ListMeta = in.ListMeta
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ApiServerSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiServerSourceList.
+func (in *ApiServerSourceList) DeepCopy() *ApiServerSourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(ApiServerSourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ApiServerSourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApiServerSourceSpec) DeepCopyInto(out *ApiServerSourceSpec) {
+ *out = *in
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]ApiServerResource, len(*in))
+ copy(*out, *in)
+ }
+ if in.Sink != nil {
+ in, out := &in.Sink, &out.Sink
+ *out = new(v1.ObjectReference)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiServerSourceSpec.
+func (in *ApiServerSourceSpec) DeepCopy() *ApiServerSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ApiServerSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApiServerSourceStatus) DeepCopyInto(out *ApiServerSourceStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiServerSourceStatus.
+func (in *ApiServerSourceStatus) DeepCopy() *ApiServerSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ApiServerSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerSource) DeepCopyInto(out *ContainerSource) {
*out = *in
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/apiserversource.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/apiserversource.go
new file mode 100644
index 00000000000..a54b43d33e3
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/apiserversource.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ scheme "github.com/knative/eventing/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ApiServerSourcesGetter has a method to return a ApiServerSourceInterface.
+// A group's client should implement this interface.
+type ApiServerSourcesGetter interface {
+ ApiServerSources(namespace string) ApiServerSourceInterface
+}
+
+// ApiServerSourceInterface has methods to work with ApiServerSource resources.
+type ApiServerSourceInterface interface {
+ Create(*v1alpha1.ApiServerSource) (*v1alpha1.ApiServerSource, error)
+ Update(*v1alpha1.ApiServerSource) (*v1alpha1.ApiServerSource, error)
+ UpdateStatus(*v1alpha1.ApiServerSource) (*v1alpha1.ApiServerSource, error)
+ Delete(name string, options *v1.DeleteOptions) error
+ DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
+ Get(name string, options v1.GetOptions) (*v1alpha1.ApiServerSource, error)
+ List(opts v1.ListOptions) (*v1alpha1.ApiServerSourceList, error)
+ Watch(opts v1.ListOptions) (watch.Interface, error)
+ Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ApiServerSource, err error)
+ ApiServerSourceExpansion
+}
+
+// apiServerSources implements ApiServerSourceInterface
+type apiServerSources struct {
+ client rest.Interface
+ ns string
+}
+
+// newApiServerSources returns a ApiServerSources
+func newApiServerSources(c *SourcesV1alpha1Client, namespace string) *apiServerSources {
+ return &apiServerSources{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the apiServerSource, and returns the corresponding apiServerSource object, and an error if there is any.
+func (c *apiServerSources) Get(name string, options v1.GetOptions) (result *v1alpha1.ApiServerSource, err error) {
+ result = &v1alpha1.ApiServerSource{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ApiServerSources that match those selectors.
+func (c *apiServerSources) List(opts v1.ListOptions) (result *v1alpha1.ApiServerSourceList, err error) {
+ result = &v1alpha1.ApiServerSourceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Do().
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested apiServerSources.
+func (c *apiServerSources) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Watch()
+}
+
+// Create takes the representation of a apiServerSource and creates it. Returns the server's representation of the apiServerSource, and an error, if there is any.
+func (c *apiServerSources) Create(apiServerSource *v1alpha1.ApiServerSource) (result *v1alpha1.ApiServerSource, err error) {
+ result = &v1alpha1.ApiServerSource{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Body(apiServerSource).
+ Do().
+ Into(result)
+ return
+}
+
+// Update takes the representation of a apiServerSource and updates it. Returns the server's representation of the apiServerSource, and an error, if there is any.
+func (c *apiServerSources) Update(apiServerSource *v1alpha1.ApiServerSource) (result *v1alpha1.ApiServerSource, err error) {
+ result = &v1alpha1.ApiServerSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Name(apiServerSource.Name).
+ Body(apiServerSource).
+ Do().
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+
+func (c *apiServerSources) UpdateStatus(apiServerSource *v1alpha1.ApiServerSource) (result *v1alpha1.ApiServerSource, err error) {
+ result = &v1alpha1.ApiServerSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Name(apiServerSource.Name).
+ SubResource("status").
+ Body(apiServerSource).
+ Do().
+ Into(result)
+ return
+}
+
+// Delete takes name of the apiServerSource and deletes it. Returns an error if one occurs.
+func (c *apiServerSources) Delete(name string, options *v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Name(name).
+ Body(options).
+ Do().
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *apiServerSources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ VersionedParams(&listOptions, scheme.ParameterCodec).
+ Body(options).
+ Do().
+ Error()
+}
+
+// Patch applies the patch and returns the patched apiServerSource.
+func (c *apiServerSources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ApiServerSource, err error) {
+ result = &v1alpha1.ApiServerSource{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("apiserversources").
+ SubResource(subresources...).
+ Name(name).
+ Body(data).
+ Do().
+ Into(result)
+ return
+}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_apiserversource.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_apiserversource.go
new file mode 100644
index 00000000000..5933ac37691
--- /dev/null
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_apiserversource.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeApiServerSources implements ApiServerSourceInterface
+type FakeApiServerSources struct {
+ Fake *FakeSourcesV1alpha1
+ ns string
+}
+
+var apiserversourcesResource = schema.GroupVersionResource{Group: "sources.eventing.knative.dev", Version: "v1alpha1", Resource: "apiserversources"}
+
+var apiserversourcesKind = schema.GroupVersionKind{Group: "sources.eventing.knative.dev", Version: "v1alpha1", Kind: "ApiServerSource"}
+
+// Get takes name of the apiServerSource, and returns the corresponding apiServerSource object, and an error if there is any.
+func (c *FakeApiServerSources) Get(name string, options v1.GetOptions) (result *v1alpha1.ApiServerSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(apiserversourcesResource, c.ns, name), &v1alpha1.ApiServerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ApiServerSource), err
+}
+
+// List takes label and field selectors, and returns the list of ApiServerSources that match those selectors.
+func (c *FakeApiServerSources) List(opts v1.ListOptions) (result *v1alpha1.ApiServerSourceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(apiserversourcesResource, apiserversourcesKind, c.ns, opts), &v1alpha1.ApiServerSourceList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ApiServerSourceList{ListMeta: obj.(*v1alpha1.ApiServerSourceList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ApiServerSourceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested apiServerSources.
+func (c *FakeApiServerSources) Watch(opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(apiserversourcesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a apiServerSource and creates it. Returns the server's representation of the apiServerSource, and an error, if there is any.
+func (c *FakeApiServerSources) Create(apiServerSource *v1alpha1.ApiServerSource) (result *v1alpha1.ApiServerSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(apiserversourcesResource, c.ns, apiServerSource), &v1alpha1.ApiServerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ApiServerSource), err
+}
+
+// Update takes the representation of a apiServerSource and updates it. Returns the server's representation of the apiServerSource, and an error, if there is any.
+func (c *FakeApiServerSources) Update(apiServerSource *v1alpha1.ApiServerSource) (result *v1alpha1.ApiServerSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(apiserversourcesResource, c.ns, apiServerSource), &v1alpha1.ApiServerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ApiServerSource), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeApiServerSources) UpdateStatus(apiServerSource *v1alpha1.ApiServerSource) (*v1alpha1.ApiServerSource, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(apiserversourcesResource, "status", c.ns, apiServerSource), &v1alpha1.ApiServerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ApiServerSource), err
+}
+
+// Delete takes name of the apiServerSource and deletes it. Returns an error if one occurs.
+func (c *FakeApiServerSources) Delete(name string, options *v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteAction(apiserversourcesResource, c.ns, name), &v1alpha1.ApiServerSource{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeApiServerSources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(apiserversourcesResource, c.ns, listOptions)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ApiServerSourceList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched apiServerSource.
+func (c *FakeApiServerSources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ApiServerSource, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(apiserversourcesResource, c.ns, name, data, subresources...), &v1alpha1.ApiServerSource{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ApiServerSource), err
+}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go
index 2742a8e7195..b244bc3539e 100644
--- a/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/fake/fake_sources_client.go
@@ -28,6 +28,10 @@ type FakeSourcesV1alpha1 struct {
*testing.Fake
}
+func (c *FakeSourcesV1alpha1) ApiServerSources(namespace string) v1alpha1.ApiServerSourceInterface {
+ return &FakeApiServerSources{c, namespace}
+}
+
func (c *FakeSourcesV1alpha1) ContainerSources(namespace string) v1alpha1.ContainerSourceInterface {
return &FakeContainerSources{c, namespace}
}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go
index b250cd0c5e3..90d7d66818c 100644
--- a/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/generated_expansion.go
@@ -18,6 +18,8 @@ limitations under the License.
package v1alpha1
+type ApiServerSourceExpansion interface{}
+
type ContainerSourceExpansion interface{}
type CronJobSourceExpansion interface{}
diff --git a/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go b/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go
index dbbdbdf0392..c63c6e26b69 100644
--- a/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go
+++ b/pkg/client/clientset/versioned/typed/sources/v1alpha1/sources_client.go
@@ -27,6 +27,7 @@ import (
type SourcesV1alpha1Interface interface {
RESTClient() rest.Interface
+ ApiServerSourcesGetter
ContainerSourcesGetter
CronJobSourcesGetter
}
@@ -36,6 +37,10 @@ type SourcesV1alpha1Client struct {
restClient rest.Interface
}
+func (c *SourcesV1alpha1Client) ApiServerSources(namespace string) ApiServerSourceInterface {
+ return newApiServerSources(c, namespace)
+}
+
func (c *SourcesV1alpha1Client) ContainerSources(namespace string) ContainerSourceInterface {
return newContainerSources(c, namespace)
}
diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go
index fbcb3b3bcba..4ba6d95cab5 100644
--- a/pkg/client/informers/externalversions/generic.go
+++ b/pkg/client/informers/externalversions/generic.go
@@ -68,6 +68,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().Triggers().Informer()}, nil
// Group=sources.eventing.knative.dev, Version=v1alpha1
+ case sourcesv1alpha1.SchemeGroupVersion.WithResource("apiserversources"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1alpha1().ApiServerSources().Informer()}, nil
case sourcesv1alpha1.SchemeGroupVersion.WithResource("containersources"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1alpha1().ContainerSources().Informer()}, nil
case sourcesv1alpha1.SchemeGroupVersion.WithResource("cronjobsources"):
diff --git a/pkg/client/informers/externalversions/sources/v1alpha1/apiserversource.go b/pkg/client/informers/externalversions/sources/v1alpha1/apiserversource.go
new file mode 100644
index 00000000000..9b99c4a78e3
--- /dev/null
+++ b/pkg/client/informers/externalversions/sources/v1alpha1/apiserversource.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ time "time"
+
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ versioned "github.com/knative/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/knative/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/knative/eventing/pkg/client/listers/sources/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ApiServerSourceInformer provides access to a shared informer and lister for
+// ApiServerSources.
+type ApiServerSourceInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.ApiServerSourceLister
+}
+
+type apiServerSourceInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewApiServerSourceInformer constructs a new informer for ApiServerSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewApiServerSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredApiServerSourceInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredApiServerSourceInformer constructs a new informer for ApiServerSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredApiServerSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1alpha1().ApiServerSources(namespace).List(options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1alpha1().ApiServerSources(namespace).Watch(options)
+ },
+ },
+ &sourcesv1alpha1.ApiServerSource{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *apiServerSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredApiServerSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *apiServerSourceInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&sourcesv1alpha1.ApiServerSource{}, f.defaultInformer)
+}
+
+func (f *apiServerSourceInformer) Lister() v1alpha1.ApiServerSourceLister {
+ return v1alpha1.NewApiServerSourceLister(f.Informer().GetIndexer())
+}
diff --git a/pkg/client/informers/externalversions/sources/v1alpha1/interface.go b/pkg/client/informers/externalversions/sources/v1alpha1/interface.go
index 9647603c828..ea02ad62be7 100644
--- a/pkg/client/informers/externalversions/sources/v1alpha1/interface.go
+++ b/pkg/client/informers/externalversions/sources/v1alpha1/interface.go
@@ -24,6 +24,8 @@ import (
// Interface provides access to all the informers in this group version.
type Interface interface {
+ // ApiServerSources returns a ApiServerSourceInformer.
+ ApiServerSources() ApiServerSourceInformer
// ContainerSources returns a ContainerSourceInformer.
ContainerSources() ContainerSourceInformer
// CronJobSources returns a CronJobSourceInformer.
@@ -41,6 +43,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
+// ApiServerSources returns a ApiServerSourceInformer.
+func (v *version) ApiServerSources() ApiServerSourceInformer {
+ return &apiServerSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
// ContainerSources returns a ContainerSourceInformer.
func (v *version) ContainerSources() ContainerSourceInformer {
return &containerSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
diff --git a/pkg/client/listers/sources/v1alpha1/apiserversource.go b/pkg/client/listers/sources/v1alpha1/apiserversource.go
new file mode 100644
index 00000000000..ac970ca4ad0
--- /dev/null
+++ b/pkg/client/listers/sources/v1alpha1/apiserversource.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ApiServerSourceLister helps list ApiServerSources.
+type ApiServerSourceLister interface {
+ // List lists all ApiServerSources in the indexer.
+ List(selector labels.Selector) (ret []*v1alpha1.ApiServerSource, err error)
+ // ApiServerSources returns an object that can list and get ApiServerSources.
+ ApiServerSources(namespace string) ApiServerSourceNamespaceLister
+ ApiServerSourceListerExpansion
+}
+
+// apiServerSourceLister implements the ApiServerSourceLister interface.
+type apiServerSourceLister struct {
+ indexer cache.Indexer
+}
+
+// NewApiServerSourceLister returns a new ApiServerSourceLister.
+func NewApiServerSourceLister(indexer cache.Indexer) ApiServerSourceLister {
+ return &apiServerSourceLister{indexer: indexer}
+}
+
+// List lists all ApiServerSources in the indexer.
+func (s *apiServerSourceLister) List(selector labels.Selector) (ret []*v1alpha1.ApiServerSource, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.ApiServerSource))
+ })
+ return ret, err
+}
+
+// ApiServerSources returns an object that can list and get ApiServerSources.
+func (s *apiServerSourceLister) ApiServerSources(namespace string) ApiServerSourceNamespaceLister {
+ return apiServerSourceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ApiServerSourceNamespaceLister helps list and get ApiServerSources.
+type ApiServerSourceNamespaceLister interface {
+ // List lists all ApiServerSources in the indexer for a given namespace.
+ List(selector labels.Selector) (ret []*v1alpha1.ApiServerSource, err error)
+ // Get retrieves the ApiServerSource from the indexer for a given namespace and name.
+ Get(name string) (*v1alpha1.ApiServerSource, error)
+ ApiServerSourceNamespaceListerExpansion
+}
+
+// apiServerSourceNamespaceLister implements the ApiServerSourceNamespaceLister
+// interface.
+type apiServerSourceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all ApiServerSources in the indexer for a given namespace.
+func (s apiServerSourceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ApiServerSource, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.ApiServerSource))
+ })
+ return ret, err
+}
+
+// Get retrieves the ApiServerSource from the indexer for a given namespace and name.
+func (s apiServerSourceNamespaceLister) Get(name string) (*v1alpha1.ApiServerSource, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("apiserversource"), name)
+ }
+ return obj.(*v1alpha1.ApiServerSource), nil
+}
diff --git a/pkg/client/listers/sources/v1alpha1/expansion_generated.go b/pkg/client/listers/sources/v1alpha1/expansion_generated.go
index 49fe2bab0ae..5f68e073e54 100644
--- a/pkg/client/listers/sources/v1alpha1/expansion_generated.go
+++ b/pkg/client/listers/sources/v1alpha1/expansion_generated.go
@@ -18,6 +18,14 @@ limitations under the License.
package v1alpha1
+// ApiServerSourceListerExpansion allows custom methods to be added to
+// ApiServerSourceLister.
+type ApiServerSourceListerExpansion interface{}
+
+// ApiServerSourceNamespaceListerExpansion allows custom methods to be added to
+// ApiServerSourceNamespaceLister.
+type ApiServerSourceNamespaceListerExpansion interface{}
+
// ContainerSourceListerExpansion allows custom methods to be added to
// ContainerSourceLister.
type ContainerSourceListerExpansion interface{}
diff --git a/pkg/reconciler/apiserversource/apiserversource.go b/pkg/reconciler/apiserversource/apiserversource.go
new file mode 100644
index 00000000000..38d6b9d5f74
--- /dev/null
+++ b/pkg/reconciler/apiserversource/apiserversource.go
@@ -0,0 +1,276 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiserversource
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "reflect"
+ "sync"
+ "time"
+
+ v1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ sourceinformers "github.com/knative/eventing/pkg/client/informers/externalversions/sources/v1alpha1"
+ listers "github.com/knative/eventing/pkg/client/listers/sources/v1alpha1"
+ "github.com/knative/eventing/pkg/duck"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/apiserversource/resources"
+ "github.com/knative/pkg/controller"
+ "github.com/knative/pkg/logging"
+ "go.uber.org/zap"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ appsv1informers "k8s.io/client-go/informers/apps/v1"
+ appsv1listers "k8s.io/client-go/listers/apps/v1"
+ "k8s.io/client-go/tools/cache"
+)
+
+const (
+ // ReconcilerName is the name of the reconciler
+ ReconcilerName = "ApiServerSources"
+
+ // controllerAgentName is the string used by this controller to identify
+ // itself when creating events.
+ controllerAgentName = "apiserver-source-controller"
+
+ // Name of the corev1.Events emitted from the reconciliation process
+ apiserversourceReconciled = "ApiServerSourceReconciled"
+ apiserversourceUpdateStatusFailed = "ApiServerSourceUpdateStatusFailed"
+
+ // raImageEnvVar is the name of the environment variable that contains the receive adapter's
+ // image. It must be defined.
+ raImageEnvVar = "APISERVER_RA_IMAGE"
+)
+
+// Reconciler reconciles a ApiServerSource object
+type Reconciler struct {
+ *reconciler.Base
+
+ receiveAdapterImage string
+ once sync.Once
+
+ // listers index properties about resources
+ apiserversourceLister listers.ApiServerSourceLister
+ deploymentLister appsv1listers.DeploymentLister
+}
+
+// NewController initializes the controller and is called by the generated code
+// Registers event handlers to enqueue events
+func NewController(
+ opt reconciler.Options,
+ apiserversourceInformer sourceinformers.ApiServerSourceInformer,
+ deploymentInformer appsv1informers.DeploymentInformer,
+) *controller.Impl {
+ r := &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ apiserversourceLister: apiserversourceInformer.Lister(),
+ deploymentLister: deploymentInformer.Lister(),
+ }
+ impl := controller.NewImpl(r, r.Logger, ReconcilerName, reconciler.MustNewStatsReporter(ReconcilerName, r.Logger))
+
+ r.Logger.Info("Setting up event handlers")
+ apiserversourceInformer.Informer().AddEventHandler(reconciler.Handler(impl.Enqueue))
+
+ deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
+ FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("ApiServerSource")),
+ Handler: reconciler.Handler(impl.EnqueueControllerOf),
+ })
+
+ return impl
+}
+
+// Reconcile compares the actual state with the desired, and attempts to
+// converge the two. It then updates the Status block of the ApiServerSource
+// resource with the current status of the resource.
+func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
+ // Convert the namespace/name string into a distinct namespace and name
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ r.Logger.Errorf("invalid resource key: %s", key)
+ return nil
+ }
+
+ // Get the ApiServerSource resource with this namespace/name
+ original, err := r.apiserversourceLister.ApiServerSources(namespace).Get(name)
+ if apierrors.IsNotFound(err) {
+ // The resource may no longer exist, in which case we stop processing.
+ logging.FromContext(ctx).Error("ApiServerSource key in work queue no longer exists", zap.Any("key", key))
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ // Don't modify the informers copy
+ apiserversource := original.DeepCopy()
+
+ // Reconcile this copy of the ApiServerSource and then write back any status
+ // updates regardless of whether the reconcile error out.
+ err = r.reconcile(ctx, apiserversource)
+ if err != nil {
+ logging.FromContext(ctx).Warn("Error reconciling ApiServerSource", zap.Error(err))
+ } else {
+ logging.FromContext(ctx).Debug("ApiServerSource reconciled")
+ r.Recorder.Eventf(apiserversource, corev1.EventTypeNormal, apiserversourceReconciled, `ApiServerSource reconciled: "%s/%s"`, apiserversource.Namespace, apiserversource.Name)
+ }
+
+ if _, updateStatusErr := r.updateStatus(ctx, apiserversource.DeepCopy()); updateStatusErr != nil {
+ logging.FromContext(ctx).Warn("Failed to update the ApiServerSource", zap.Error(err))
+ r.Recorder.Eventf(apiserversource, corev1.EventTypeWarning, apiserversourceUpdateStatusFailed, "Failed to update ApiServerSource's status: %v", err)
+ return updateStatusErr
+ }
+
+ // Requeue if the resource is not ready:
+ return err
+}
+
+func (r *Reconciler) reconcile(ctx context.Context, source *v1alpha1.ApiServerSource) error {
+ source.Status.InitializeConditions()
+
+ sinkURI, err := duck.GetSinkURI(ctx, r.DynamicClientSet, source.Spec.Sink, source.Namespace)
+ if err != nil {
+ source.Status.MarkNoSink("NotFound", "")
+ return err
+ }
+ source.Status.MarkSink(sinkURI)
+
+ _, err = r.createReceiveAdapter(ctx, source, sinkURI)
+ if err != nil {
+ r.Logger.Error("Unable to create the receive adapter", zap.Error(err))
+ return err
+ }
+
+ // Update source status
+ source.Status.MarkDeployed()
+ return nil
+}
+
+func (r *Reconciler) getReceiveAdapterImage() string {
+ if r.receiveAdapterImage == "" {
+ r.once.Do(func() {
+ raImage, defined := os.LookupEnv(raImageEnvVar)
+ if !defined {
+ panic(fmt.Errorf("required environment variable %q not defined", raImageEnvVar))
+ }
+ r.receiveAdapterImage = raImage
+ })
+ }
+ return r.receiveAdapterImage
+}
+
+func (r *Reconciler) createReceiveAdapter(ctx context.Context, src *v1alpha1.ApiServerSource, sinkURI string) (*appsv1.Deployment, error) {
+ ra, err := r.getReceiveAdapter(ctx, src)
+ if err != nil && !apierrors.IsNotFound(err) {
+ logging.FromContext(ctx).Error("Unable to get an existing receive adapter", zap.Error(err))
+ return nil, err
+ }
+ if ra != nil {
+ logging.FromContext(ctx).Desugar().Info("Reusing existing receive adapter", zap.Any("receiveAdapter", ra))
+ return ra, nil
+ }
+ adapterArgs := resources.ReceiveAdapterArgs{
+ Image: r.getReceiveAdapterImage(),
+ Source: src,
+ Labels: resources.Labels(src.Name),
+ SinkURI: sinkURI,
+ }
+ expected := resources.MakeReceiveAdapter(&adapterArgs)
+ if ra != nil {
+ if r.podSpecChanged(ra.Spec.Template.Spec, expected.Spec.Template.Spec) {
+ ra.Spec.Template.Spec = expected.Spec.Template.Spec
+ if ra, err = r.KubeClientSet.AppsV1().Deployments(src.Namespace).Update(ra); err != nil {
+ return ra, err
+ }
+ logging.FromContext(ctx).Desugar().Info("Receive Adapter updated.", zap.Any("receiveAdapter", ra))
+ } else {
+ logging.FromContext(ctx).Desugar().Info("Reusing existing receive adapter", zap.Any("receiveAdapter", ra))
+ }
+ return ra, nil
+ }
+
+ if ra, err = r.KubeClientSet.AppsV1().Deployments(src.Namespace).Create(expected); err != nil {
+ return nil, err
+ }
+ logging.FromContext(ctx).Desugar().Info("Receive Adapter created.", zap.Any("receiveAdapter", expected))
+ return ra, err
+}
+
+func (r *Reconciler) podSpecChanged(oldPodSpec corev1.PodSpec, newPodSpec corev1.PodSpec) bool {
+ if !equality.Semantic.DeepDerivative(newPodSpec, oldPodSpec) {
+ return true
+ }
+ if len(oldPodSpec.Containers) != len(newPodSpec.Containers) {
+ return true
+ }
+ for i := range newPodSpec.Containers {
+ if !equality.Semantic.DeepEqual(newPodSpec.Containers[i].Env, oldPodSpec.Containers[i].Env) {
+ return true
+ }
+ }
+ return false
+}
+
+func (r *Reconciler) getReceiveAdapter(ctx context.Context, src *v1alpha1.ApiServerSource) (*appsv1.Deployment, error) {
+ dl, err := r.KubeClientSet.AppsV1().Deployments(src.Namespace).List(metav1.ListOptions{
+ LabelSelector: r.getLabelSelector(src).String(),
+ })
+ if err != nil {
+ logging.FromContext(ctx).Desugar().Error("Unable to list deployments: %v", zap.Error(err))
+ return nil, err
+ }
+ for _, dep := range dl.Items {
+ if metav1.IsControlledBy(&dep, src) {
+ return &dep, nil
+ }
+ }
+ return nil, apierrors.NewNotFound(schema.GroupResource{}, "")
+}
+
+func (r *Reconciler) getLabelSelector(src *v1alpha1.ApiServerSource) labels.Selector {
+ return labels.SelectorFromSet(resources.Labels(src.Name))
+}
+
+func (r *Reconciler) updateStatus(ctx context.Context, desired *v1alpha1.ApiServerSource) (*v1alpha1.ApiServerSource, error) {
+ apiserversource, err := r.apiserversourceLister.ApiServerSources(desired.Namespace).Get(desired.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ // If there's nothing to update, just return.
+ if reflect.DeepEqual(apiserversource.Status, desired.Status) {
+ return apiserversource, nil
+ }
+
+ becomesReady := desired.Status.IsReady() && !apiserversource.Status.IsReady()
+
+ // Don't modify the informers copy.
+ existing := apiserversource.DeepCopy()
+ existing.Status = desired.Status
+
+ cj, err := r.EventingClientSet.SourcesV1alpha1().ApiServerSources(desired.Namespace).UpdateStatus(existing)
+ if err == nil && becomesReady {
+ duration := time.Since(cj.ObjectMeta.CreationTimestamp.Time)
+ r.Logger.Infof("ApiServerSource %q became ready after %v", apiserversource.Name, duration)
+ }
+
+ return cj, err
+}
diff --git a/pkg/reconciler/apiserversource/apiserversource_test.go b/pkg/reconciler/apiserversource/apiserversource_test.go
new file mode 100644
index 00000000000..f93a4b9afd9
--- /dev/null
+++ b/pkg/reconciler/apiserversource/apiserversource_test.go
@@ -0,0 +1,202 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiserversource
+
+import (
+ "os"
+ "testing"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ kubeinformers "k8s.io/client-go/informers"
+ fakekubeclientset "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/kubernetes/scheme"
+ clientgotesting "k8s.io/client-go/testing"
+
+ sourcesv1alpha1 "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ fakeclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
+ informers "github.com/knative/eventing/pkg/client/informers/externalversions"
+ "github.com/knative/eventing/pkg/reconciler"
+ "github.com/knative/eventing/pkg/reconciler/apiserversource/resources"
+ "github.com/knative/eventing/pkg/utils"
+ duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1"
+ "github.com/knative/pkg/controller"
+ logtesting "github.com/knative/pkg/logging/testing"
+
+ . "github.com/knative/eventing/pkg/reconciler/testing"
+ . "github.com/knative/pkg/reconciler/testing"
+)
+
+var (
+ sinkRef = corev1.ObjectReference{
+ Name: sinkName,
+ Kind: "Channel",
+ APIVersion: "eventing.knative.dev/v1alpha1",
+ }
+ sinkDNS = "sink.mynamespace.svc." + utils.GetClusterDomainName()
+ sinkURI = "http://" + sinkDNS + "/"
+)
+
+const (
+ image = "github.com/knative/test/image"
+ sourceName = "test-apiserver-source"
+ sourceUID = "1234-5678-90"
+ testNS = "testnamespace"
+
+ sinkName = "testsink"
+)
+
+func init() {
+ // Add types to scheme
+ _ = appsv1.AddToScheme(scheme.Scheme)
+ _ = corev1.AddToScheme(scheme.Scheme)
+ _ = duckv1alpha1.AddToScheme(scheme.Scheme)
+
+ _ = os.Setenv("APISERVER_RA_IMAGE", image)
+}
+
+func TestReconcile(t *testing.T) {
+ table := TableTest{
+ {
+ Name: "missing sink",
+ Objects: []runtime.Object{
+ NewApiServerSource(sourceName, testNS,
+ WithApiServerSourceSpec(sourcesv1alpha1.ApiServerSourceSpec{
+ Sink: &sinkRef,
+ }),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantErr: true,
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewApiServerSource(sourceName, testNS,
+ WithApiServerSourceSpec(sourcesv1alpha1.ApiServerSourceSpec{
+ Sink: &sinkRef,
+ }),
+ // Status Update:
+ WithInitApiServerSourceConditions,
+ WithApiServerSourceSinkNotFound,
+ ),
+ }},
+ },
+ {
+ Name: "valid",
+ Objects: []runtime.Object{
+ NewApiServerSource(sourceName, testNS,
+ WithApiServerSourceSpec(sourcesv1alpha1.ApiServerSourceSpec{
+ Resources: []sourcesv1alpha1.ApiServerResource{
+ sourcesv1alpha1.ApiServerResource{
+ APIVersion: "",
+ Kind: "Namespace",
+ },
+ },
+ Sink: &sinkRef,
+ }),
+ ),
+ NewChannel(sinkName, testNS,
+ WithInitChannelConditions,
+ WithChannelAddress(sinkDNS),
+ ),
+ },
+ Key: testNS + "/" + sourceName,
+ WantEvents: []string{
+ Eventf(corev1.EventTypeNormal, "ApiServerSourceReconciled", `ApiServerSource reconciled: "%s/%s"`, testNS, sourceName),
+ },
+ WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
+ Object: NewApiServerSource(sourceName, testNS,
+ WithApiServerSourceSpec(sourcesv1alpha1.ApiServerSourceSpec{
+ Resources: []sourcesv1alpha1.ApiServerResource{
+ sourcesv1alpha1.ApiServerResource{
+ APIVersion: "",
+ Kind: "Namespace",
+ },
+ },
+ Sink: &sinkRef,
+ }),
+ // Status Update:
+ WithInitApiServerSourceConditions,
+ WithApiServerSourceDeployed,
+ WithApiServerSourceSink(sinkURI),
+ ),
+ }},
+ WantCreates: []metav1.Object{
+ makeReceiveAdapter(),
+ },
+ },
+ }
+
+ defer logtesting.ClearAll()
+ table.Test(t, MakeFactory(func(listers *Listers, opt reconciler.Options) controller.Reconciler {
+ return &Reconciler{
+ Base: reconciler.NewBase(opt, controllerAgentName),
+ apiserversourceLister: listers.GetApiServerSourceLister(),
+ deploymentLister: listers.GetDeploymentLister(),
+ }
+ }))
+}
+func TestNew(t *testing.T) {
+ defer logtesting.ClearAll()
+ kubeClient := fakekubeclientset.NewSimpleClientset()
+ eventingClient := fakeclientset.NewSimpleClientset()
+ eventingInformer := informers.NewSharedInformerFactory(eventingClient, 0)
+ kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
+
+ apiserverInformer := eventingInformer.Sources().V1alpha1().ApiServerSources()
+ deploymentInformer := kubeInformer.Apps().V1().Deployments()
+
+ c := NewController(reconciler.Options{
+ KubeClientSet: kubeClient,
+ EventingClientSet: eventingClient,
+ Logger: logtesting.TestLogger(t),
+ },
+ apiserverInformer,
+ deploymentInformer,
+ )
+
+ if c == nil {
+ t.Fatal("Expected NewController to return a non-nil value")
+ }
+}
+
+func makeReceiveAdapter() *appsv1.Deployment {
+ source := NewApiServerSource(sourceName, testNS,
+ WithApiServerSourceSpec(sourcesv1alpha1.ApiServerSourceSpec{
+ Resources: []sourcesv1alpha1.ApiServerResource{
+ sourcesv1alpha1.ApiServerResource{
+ APIVersion: "",
+ Kind: "Namespace",
+ },
+ },
+ Sink: &sinkRef,
+ },
+ ),
+ // Status Update:
+ WithInitApiServerSourceConditions,
+ WithApiServerSourceDeployed,
+ WithApiServerSourceSink(sinkURI),
+ )
+
+ args := resources.ReceiveAdapterArgs{
+ Image: image,
+ Source: source,
+ Labels: resources.Labels(sourceName),
+ SinkURI: sinkURI,
+ }
+ return resources.MakeReceiveAdapter(&args)
+}
diff --git a/pkg/reconciler/apiserversource/doc.go b/pkg/reconciler/apiserversource/doc.go
new file mode 100644
index 00000000000..02638d2eb7c
--- /dev/null
+++ b/pkg/reconciler/apiserversource/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package apiserversource implements the ApiSource controller.
+package apiserversource
diff --git a/pkg/reconciler/apiserversource/resources/labels.go b/pkg/reconciler/apiserversource/resources/labels.go
new file mode 100644
index 00000000000..1bcb28668e2
--- /dev/null
+++ b/pkg/reconciler/apiserversource/resources/labels.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+const (
+ // controllerAgentName is the string used by this controller to identify
+ // itself when creating events.
+ controllerAgentName = "apiserver-source-controller"
+)
+
+func Labels(name string) map[string]string {
+ return map[string]string{
+ "knative-eventing-source": controllerAgentName,
+ "knative-eventing-source-name": name,
+ }
+}
diff --git a/pkg/reconciler/apiserversource/resources/receive_adapter.go b/pkg/reconciler/apiserversource/resources/receive_adapter.go
new file mode 100644
index 00000000000..24cbadce897
--- /dev/null
+++ b/pkg/reconciler/apiserversource/resources/receive_adapter.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "fmt"
+
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ v1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ReceiveAdapterArgs are the arguments needed to create a ApiServer Receive Adapter.
+// Every field is required.
+type ReceiveAdapterArgs struct {
+ Image string
+ Source *v1alpha1.ApiServerSource
+ Labels map[string]string
+ SinkURI string
+}
+
+// MakeReceiveAdapter generates (but does not insert into K8s) the Receive Adapter Deployment for
+// ApiServer Sources.
+func MakeReceiveAdapter(args *ReceiveAdapterArgs) *v1.Deployment {
+ replicas := int32(1)
+ return &v1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: args.Source.Namespace,
+ GenerateName: fmt.Sprintf("apiserver-%s-", args.Source.Name),
+ Labels: args.Labels,
+ },
+ Spec: v1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: args.Labels,
+ },
+ Replicas: &replicas,
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "sidecar.istio.io/inject": "true",
+ },
+ Labels: args.Labels,
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: args.Source.Spec.ServiceAccountName,
+ Containers: []corev1.Container{
+ {
+ Name: "receive-adapter",
+ Image: args.Image,
+ Env: makeEnv(args.SinkURI, &args.Source.Spec),
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func makeEnv(sinkURI string, spec *v1alpha1.ApiServerSourceSpec) []corev1.EnvVar {
+ apiversions := ""
+ kinds := ""
+ controlled := ""
+ sep := ""
+
+ for _, res := range spec.Resources {
+ apiversions += sep + res.APIVersion
+ kinds += sep + res.Kind
+ if res.Controller {
+ controlled += sep + "true"
+ } else {
+ controlled += sep + "false"
+ }
+ sep = ","
+ }
+
+ return []corev1.EnvVar{
+ {
+ Name: "SINK_URI",
+ Value: sinkURI,
+ },
+ {
+ Name: "API_VERSION",
+ Value: apiversions,
+ },
+ {
+ Name: "KIND",
+ Value: kinds,
+ },
+ {
+ Name: "CONTROLLER",
+ Value: controlled,
+ },
+ {
+ Name: "SYSTEM_NAMESPACE",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.namespace",
+ },
+ },
+ },
+ }
+}
diff --git a/pkg/reconciler/apiserversource/resources/receive_adapter_test.go b/pkg/reconciler/apiserversource/resources/receive_adapter_test.go
new file mode 100644
index 00000000000..1ad126b3c61
--- /dev/null
+++ b/pkg/reconciler/apiserversource/resources/receive_adapter_test.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+ v1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestMakeReceiveAdapter(t *testing.T) {
+ src := &v1alpha1.ApiServerSource{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "source-name",
+ Namespace: "source-namespace",
+ },
+ Spec: v1alpha1.ApiServerSourceSpec{
+ ServiceAccountName: "source-svc-acct",
+ Resources: []v1alpha1.ApiServerResource{
+ v1alpha1.ApiServerResource{
+ APIVersion: "",
+ Kind: "Namespace",
+ },
+ v1alpha1.ApiServerResource{
+ APIVersion: "",
+ Kind: "Pod",
+ Controller: true,
+ },
+ },
+ },
+ }
+
+ got := MakeReceiveAdapter(&ReceiveAdapterArgs{
+ Image: "test-image",
+ Source: src,
+ Labels: map[string]string{
+ "test-key1": "test-value1",
+ "test-key2": "test-value2",
+ },
+ SinkURI: "sink-uri",
+ })
+
+ one := int32(1)
+ want := &v1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: "source-namespace",
+ GenerateName: "apiserver-source-name-",
+ Labels: map[string]string{
+ "test-key1": "test-value1",
+ "test-key2": "test-value2",
+ },
+ },
+ Spec: v1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ "test-key1": "test-value1",
+ "test-key2": "test-value2",
+ },
+ },
+ Replicas: &one,
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "sidecar.istio.io/inject": "true",
+ },
+ Labels: map[string]string{
+ "test-key1": "test-value1",
+ "test-key2": "test-value2",
+ },
+ },
+ Spec: corev1.PodSpec{
+ ServiceAccountName: "source-svc-acct",
+ Containers: []corev1.Container{
+ {
+ Name: "receive-adapter",
+ Image: "test-image",
+ Env: []corev1.EnvVar{
+ {
+ Name: "SINK_URI",
+ Value: "sink-uri",
+ }, {
+ Name: "API_VERSION",
+ Value: ",",
+ }, {
+ Name: "KIND",
+ Value: "Namespace,Pod",
+ }, {
+ Name: "CONTROLLER",
+ Value: "false,true",
+ }, {
+ Name: "SYSTEM_NAMESPACE",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.namespace",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ if diff := cmp.Diff(want, got); diff != "" {
+ t.Errorf("unexpected deploy (-want, +got) = %v", diff)
+ }
+}
diff --git a/pkg/reconciler/testing/apiserversource.go b/pkg/reconciler/testing/apiserversource.go
new file mode 100644
index 00000000000..158315a3085
--- /dev/null
+++ b/pkg/reconciler/testing/apiserversource.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/knative/eventing/pkg/apis/sources/v1alpha1"
+)
+
+// ApiServerSourceOption enables further configuration of a ApiServer.
+type ApiServerSourceOption func(*v1alpha1.ApiServerSource)
+
+// NewApiServerSource creates a ApiServer with ApiServerOptions
+func NewApiServerSource(name, namespace string, o ...ApiServerSourceOption) *v1alpha1.ApiServerSource {
+ c := &v1alpha1.ApiServerSource{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ }
+ for _, opt := range o {
+ opt(c)
+ }
+ //c.SetDefaults(context.Background()) // TODO: We should add defaults and validation.
+ return c
+}
+
+// WithInitApiServerConditions initializes the ApiServerSource's conditions.
+func WithInitApiServerSourceConditions(s *v1alpha1.ApiServerSource) {
+ s.Status.InitializeConditions()
+}
+
+func WithApiServerSourceSinkNotFound(s *v1alpha1.ApiServerSource) {
+ s.Status.MarkNoSink("NotFound", "")
+}
+
+func WithApiServerSourceSink(uri string) ApiServerSourceOption {
+ return func(s *v1alpha1.ApiServerSource) {
+ s.Status.MarkSink(uri)
+ }
+}
+
+func WithApiServerSourceDeployed(s *v1alpha1.ApiServerSource) {
+ s.Status.MarkDeployed()
+}
+
+func WithApiServerSourceDeleted(c *v1alpha1.ApiServerSource) {
+ t := metav1.NewTime(time.Unix(1e9, 0))
+ c.ObjectMeta.SetDeletionTimestamp(&t)
+}
+
+func WithApiServerSourceSpec(spec v1alpha1.ApiServerSourceSpec) ApiServerSourceOption {
+ return func(c *v1alpha1.ApiServerSource) {
+ c.Spec = spec
+ }
+}
diff --git a/pkg/reconciler/testing/listers.go b/pkg/reconciler/testing/listers.go
index 1f41a286e5f..7f7b896b6ab 100644
--- a/pkg/reconciler/testing/listers.go
+++ b/pkg/reconciler/testing/listers.go
@@ -126,6 +126,10 @@ func (l *Listers) GetCronJobSourceLister() sourcelisters.CronJobSourceLister {
return sourcelisters.NewCronJobSourceLister(l.indexerFor(&sourcesv1alpha1.CronJobSource{}))
}
+func (l *Listers) GetApiServerSourceLister() sourcelisters.ApiServerSourceLister {
+ return sourcelisters.NewApiServerSourceLister(l.indexerFor(&sourcesv1alpha1.ApiServerSource{}))
+}
+
func (l *Listers) GetContainerSourceLister() sourcelisters.ContainerSourceLister {
return sourcelisters.NewContainerSourceLister(l.indexerFor(&sourcesv1alpha1.ContainerSource{}))
}
diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE
index dcf5fffc254..2a4f2a03a7b 100644
--- a/third_party/VENDOR-LICENSE
+++ b/third_party/VENDOR-LICENSE
@@ -3454,6 +3454,31 @@ SOFTWARE.
+===========================================================
+Import: github.com/knative/eventing/vendor/github.com/kelseyhightower/envconfig
+
+Copyright (c) 2013 Kelsey Hightower
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+
===========================================================
Import: github.com/knative/eventing/vendor/github.com/knative/pkg
diff --git a/vendor/github.com/kelseyhightower/envconfig/LICENSE b/vendor/github.com/kelseyhightower/envconfig/LICENSE
new file mode 100644
index 00000000000..4bfa7a84d81
--- /dev/null
+++ b/vendor/github.com/kelseyhightower/envconfig/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Kelsey Hightower
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/kelseyhightower/envconfig/doc.go b/vendor/github.com/kelseyhightower/envconfig/doc.go
new file mode 100644
index 00000000000..f28561cd1cb
--- /dev/null
+++ b/vendor/github.com/kelseyhightower/envconfig/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+// Package envconfig implements decoding of environment variables based on a user
+// defined specification. A typical use is using environment variables for
+// configuration settings.
+package envconfig
diff --git a/vendor/github.com/kelseyhightower/envconfig/env_os.go b/vendor/github.com/kelseyhightower/envconfig/env_os.go
new file mode 100644
index 00000000000..a6a014a2b47
--- /dev/null
+++ b/vendor/github.com/kelseyhightower/envconfig/env_os.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package envconfig
+
+import "os"
+
+var lookupEnv = os.LookupEnv
diff --git a/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
new file mode 100644
index 00000000000..9d98085b99f
--- /dev/null
+++ b/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
@@ -0,0 +1,7 @@
+// +build !appengine
+
+package envconfig
+
+import "syscall"
+
+var lookupEnv = syscall.Getenv
diff --git a/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/vendor/github.com/kelseyhightower/envconfig/envconfig.go
new file mode 100644
index 00000000000..892d74699f6
--- /dev/null
+++ b/vendor/github.com/kelseyhightower/envconfig/envconfig.go
@@ -0,0 +1,319 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// ErrInvalidSpecification indicates that a specification is of the wrong type.
+var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
+
+// A ParseError occurs when an environment variable cannot be converted to
+// the type required by a struct field during assignment.
+type ParseError struct {
+ KeyName string
+ FieldName string
+ TypeName string
+ Value string
+ Err error
+}
+
+// Decoder has the same semantics as Setter, but takes higher precedence.
+// It is provided for historical compatibility.
+type Decoder interface {
+ Decode(value string) error
+}
+
+// Setter is implemented by types can self-deserialize values.
+// Any type that implements flag.Value also implements Setter.
+type Setter interface {
+ Set(value string) error
+}
+
+func (e *ParseError) Error() string {
+ return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
+}
+
+// varInfo maintains information about the configuration variable
+type varInfo struct {
+ Name string
+ Alt string
+ Key string
+ Field reflect.Value
+ Tags reflect.StructTag
+}
+
+// GatherInfo gathers information about the specified struct
+func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
+ expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
+ s := reflect.ValueOf(spec)
+
+ if s.Kind() != reflect.Ptr {
+ return nil, ErrInvalidSpecification
+ }
+ s = s.Elem()
+ if s.Kind() != reflect.Struct {
+ return nil, ErrInvalidSpecification
+ }
+ typeOfSpec := s.Type()
+
+ // over allocate an info array, we will extend if needed later
+ infos := make([]varInfo, 0, s.NumField())
+ for i := 0; i < s.NumField(); i++ {
+ f := s.Field(i)
+ ftype := typeOfSpec.Field(i)
+ if !f.CanSet() || ftype.Tag.Get("ignored") == "true" {
+ continue
+ }
+
+ for f.Kind() == reflect.Ptr {
+ if f.IsNil() {
+ if f.Type().Elem().Kind() != reflect.Struct {
+ // nil pointer to a non-struct: leave it alone
+ break
+ }
+ // nil pointer to struct: create a zero instance
+ f.Set(reflect.New(f.Type().Elem()))
+ }
+ f = f.Elem()
+ }
+
+ // Capture information about the config variable
+ info := varInfo{
+ Name: ftype.Name,
+ Field: f,
+ Tags: ftype.Tag,
+ Alt: strings.ToUpper(ftype.Tag.Get("envconfig")),
+ }
+
+ // Default to the field name as the env var name (will be upcased)
+ info.Key = info.Name
+
+ // Best effort to un-pick camel casing as separate words
+ if ftype.Tag.Get("split_words") == "true" {
+ words := expr.FindAllStringSubmatch(ftype.Name, -1)
+ if len(words) > 0 {
+ var name []string
+ for _, words := range words {
+ name = append(name, words[0])
+ }
+
+ info.Key = strings.Join(name, "_")
+ }
+ }
+ if info.Alt != "" {
+ info.Key = info.Alt
+ }
+ if prefix != "" {
+ info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
+ }
+ info.Key = strings.ToUpper(info.Key)
+ infos = append(infos, info)
+
+ if f.Kind() == reflect.Struct {
+ // honor Decode if present
+ if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil {
+ innerPrefix := prefix
+ if !ftype.Anonymous {
+ innerPrefix = info.Key
+ }
+
+ embeddedPtr := f.Addr().Interface()
+ embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
+ if err != nil {
+ return nil, err
+ }
+ infos = append(infos[:len(infos)-1], embeddedInfos...)
+
+ continue
+ }
+ }
+ }
+ return infos, nil
+}
+
+// Process populates the specified struct based on environment variables
+func Process(prefix string, spec interface{}) error {
+ infos, err := gatherInfo(prefix, spec)
+
+ for _, info := range infos {
+
+ // `os.Getenv` cannot differentiate between an explicitly set empty value
+ // and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
+ // but it is only available in go1.5 or newer. We're using Go build tags
+ // here to use os.LookupEnv for >=go1.5
+ value, ok := lookupEnv(info.Key)
+ if !ok && info.Alt != "" {
+ value, ok = lookupEnv(info.Alt)
+ }
+
+ def := info.Tags.Get("default")
+ if def != "" && !ok {
+ value = def
+ }
+
+ req := info.Tags.Get("required")
+ if !ok && def == "" {
+ if req == "true" {
+ return fmt.Errorf("required key %s missing value", info.Key)
+ }
+ continue
+ }
+
+ err := processField(value, info.Field)
+ if err != nil {
+ return &ParseError{
+ KeyName: info.Key,
+ FieldName: info.Name,
+ TypeName: info.Field.Type().String(),
+ Value: value,
+ Err: err,
+ }
+ }
+ }
+
+ return err
+}
+
+// MustProcess is the same as Process but panics if an error occurs
+func MustProcess(prefix string, spec interface{}) {
+ if err := Process(prefix, spec); err != nil {
+ panic(err)
+ }
+}
+
+func processField(value string, field reflect.Value) error {
+ typ := field.Type()
+
+ decoder := decoderFrom(field)
+ if decoder != nil {
+ return decoder.Decode(value)
+ }
+ // look for Set method if Decode not defined
+ setter := setterFrom(field)
+ if setter != nil {
+ return setter.Set(value)
+ }
+
+ if t := textUnmarshaler(field); t != nil {
+ return t.UnmarshalText([]byte(value))
+ }
+
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ if field.IsNil() {
+ field.Set(reflect.New(typ))
+ }
+ field = field.Elem()
+ }
+
+ switch typ.Kind() {
+ case reflect.String:
+ field.SetString(value)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ var (
+ val int64
+ err error
+ )
+ if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
+ var d time.Duration
+ d, err = time.ParseDuration(value)
+ val = int64(d)
+ } else {
+ val, err = strconv.ParseInt(value, 0, typ.Bits())
+ }
+ if err != nil {
+ return err
+ }
+
+ field.SetInt(val)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ val, err := strconv.ParseUint(value, 0, typ.Bits())
+ if err != nil {
+ return err
+ }
+ field.SetUint(val)
+ case reflect.Bool:
+ val, err := strconv.ParseBool(value)
+ if err != nil {
+ return err
+ }
+ field.SetBool(val)
+ case reflect.Float32, reflect.Float64:
+ val, err := strconv.ParseFloat(value, typ.Bits())
+ if err != nil {
+ return err
+ }
+ field.SetFloat(val)
+ case reflect.Slice:
+ vals := strings.Split(value, ",")
+ sl := reflect.MakeSlice(typ, len(vals), len(vals))
+ for i, val := range vals {
+ err := processField(val, sl.Index(i))
+ if err != nil {
+ return err
+ }
+ }
+ field.Set(sl)
+ case reflect.Map:
+ pairs := strings.Split(value, ",")
+ mp := reflect.MakeMap(typ)
+ for _, pair := range pairs {
+ kvpair := strings.Split(pair, ":")
+ if len(kvpair) != 2 {
+ return fmt.Errorf("invalid map item: %q", pair)
+ }
+ k := reflect.New(typ.Key()).Elem()
+ err := processField(kvpair[0], k)
+ if err != nil {
+ return err
+ }
+ v := reflect.New(typ.Elem()).Elem()
+ err = processField(kvpair[1], v)
+ if err != nil {
+ return err
+ }
+ mp.SetMapIndex(k, v)
+ }
+ field.Set(mp)
+ }
+
+ return nil
+}
+
+func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
+ // it may be impossible for a struct field to fail this check
+ if !field.CanInterface() {
+ return
+ }
+ var ok bool
+ fn(field.Interface(), &ok)
+ if !ok && field.CanAddr() {
+ fn(field.Addr().Interface(), &ok)
+ }
+}
+
+func decoderFrom(field reflect.Value) (d Decoder) {
+ interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
+ return d
+}
+
+func setterFrom(field reflect.Value) (s Setter) {
+ interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
+ return s
+}
+
+func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
+ interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
+ return t
+}
diff --git a/vendor/github.com/kelseyhightower/envconfig/usage.go b/vendor/github.com/kelseyhightower/envconfig/usage.go
new file mode 100644
index 00000000000..184635380f2
--- /dev/null
+++ b/vendor/github.com/kelseyhightower/envconfig/usage.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/tabwriter"
+ "text/template"
+)
+
+const (
+ // DefaultListFormat constant to use to display usage in a list format
+ DefaultListFormat = `This application is configured via the environment. The following environment
+variables can be used:
+{{range .}}
+{{usage_key .}}
+ [description] {{usage_description .}}
+ [type] {{usage_type .}}
+ [default] {{usage_default .}}
+ [required] {{usage_required .}}{{end}}
+`
+ // DefaultTableFormat constant to use to display usage in a tabluar format
+ DefaultTableFormat = `This application is configured via the environment. The following environment
+variables can be used:
+
+KEY TYPE DEFAULT REQUIRED DESCRIPTION
+{{range .}}{{usage_key .}} {{usage_type .}} {{usage_default .}} {{usage_required .}} {{usage_description .}}
+{{end}}`
+)
+
+var (
+ decoderType = reflect.TypeOf((*Decoder)(nil)).Elem()
+ setterType = reflect.TypeOf((*Setter)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+func implementsInterface(t reflect.Type) bool {
+ return t.Implements(decoderType) ||
+ reflect.PtrTo(t).Implements(decoderType) ||
+ t.Implements(setterType) ||
+ reflect.PtrTo(t).Implements(setterType) ||
+ t.Implements(unmarshalerType) ||
+ reflect.PtrTo(t).Implements(unmarshalerType)
+}
+
+// toTypeDescription converts Go types into a human readable description
+func toTypeDescription(t reflect.Type) string {
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice:
+ return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem()))
+ case reflect.Map:
+ return fmt.Sprintf(
+ "Comma-separated list of %s:%s pairs",
+ toTypeDescription(t.Key()),
+ toTypeDescription(t.Elem()),
+ )
+ case reflect.Ptr:
+ return toTypeDescription(t.Elem())
+ case reflect.Struct:
+ if implementsInterface(t) && t.Name() != "" {
+ return t.Name()
+ }
+ return ""
+ case reflect.String:
+ name := t.Name()
+ if name != "" && name != "string" {
+ return name
+ }
+ return "String"
+ case reflect.Bool:
+ name := t.Name()
+ if name != "" && name != "bool" {
+ return name
+ }
+ return "True or False"
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ name := t.Name()
+ if name != "" && !strings.HasPrefix(name, "int") {
+ return name
+ }
+ return "Integer"
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ name := t.Name()
+ if name != "" && !strings.HasPrefix(name, "uint") {
+ return name
+ }
+ return "Unsigned Integer"
+ case reflect.Float32, reflect.Float64:
+ name := t.Name()
+ if name != "" && !strings.HasPrefix(name, "float") {
+ return name
+ }
+ return "Float"
+ }
+ return fmt.Sprintf("%+v", t)
+}
+
+// Usage writes usage information to stderr using the default header and table format
+func Usage(prefix string, spec interface{}) error {
+ // The default is to output the usage information as a table
+ // Create tabwriter instance to support table output
+ tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)
+
+ err := Usagef(prefix, spec, tabs, DefaultTableFormat)
+ tabs.Flush()
+ return err
+}
+
+// Usagef writes usage information to the specified io.Writer using the specifed template specification
+func Usagef(prefix string, spec interface{}, out io.Writer, format string) error {
+
+ // Specify the default usage template functions
+ functions := template.FuncMap{
+ "usage_key": func(v varInfo) string { return v.Key },
+ "usage_description": func(v varInfo) string { return v.Tags.Get("desc") },
+ "usage_type": func(v varInfo) string { return toTypeDescription(v.Field.Type()) },
+ "usage_default": func(v varInfo) string { return v.Tags.Get("default") },
+ "usage_required": func(v varInfo) (string, error) {
+ req := v.Tags.Get("required")
+ if req != "" {
+ reqB, err := strconv.ParseBool(req)
+ if err != nil {
+ return "", err
+ }
+ if reqB {
+ req = "true"
+ }
+ }
+ return req, nil
+ },
+ }
+
+ tmpl, err := template.New("envconfig").Funcs(functions).Parse(format)
+ if err != nil {
+ return err
+ }
+
+ return Usaget(prefix, spec, out, tmpl)
+}
+
+// Usaget writes usage information to the specified io.Writer using the specified template
+func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error {
+ // gather first
+ infos, err := gatherInfo(prefix, spec)
+ if err != nil {
+ return err
+ }
+
+ return tmpl.Execute(out, infos)
+}
From aaecace6d1b897a5816da6caccc073b9cbe3b46c Mon Sep 17 00:00:00 2001
From: mattmoor-sockpuppet
Date: Wed, 1 May 2019 08:25:33 -0700
Subject: [PATCH 73/76] golang format tools (#1132)
Produced via:
`gofmt -s -w $(find -path './vendor' -prune -o -type f -name '*.go' -print))`
`goimports -w $(find -name '*.go' | grep -v vendor)`
---
pkg/reconciler/namespace/namespace.go | 1 +
pkg/reconciler/namespace/namespace_test.go | 3 ++-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/pkg/reconciler/namespace/namespace.go b/pkg/reconciler/namespace/namespace.go
index 00ca22b34d1..d696fb8d4f0 100644
--- a/pkg/reconciler/namespace/namespace.go
+++ b/pkg/reconciler/namespace/namespace.go
@@ -19,6 +19,7 @@ package namespace
import (
"context"
"fmt"
+
"github.com/knative/eventing/pkg/reconciler/namespace/resources"
"github.com/knative/eventing/pkg/utils"
"github.com/knative/pkg/tracker"
diff --git a/pkg/reconciler/namespace/namespace_test.go b/pkg/reconciler/namespace/namespace_test.go
index 5af66e14fc7..3a54fc1d4a8 100644
--- a/pkg/reconciler/namespace/namespace_test.go
+++ b/pkg/reconciler/namespace/namespace_test.go
@@ -17,9 +17,10 @@ limitations under the License.
package namespace
import (
- "github.com/knative/pkg/tracker"
"testing"
+ "github.com/knative/pkg/tracker"
+
"github.com/knative/eventing/pkg/reconciler/namespace/resources"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
From da72a315ff8375cb1176c6f1d5fc34c53662ee76 Mon Sep 17 00:00:00 2001
From: Akash Verenkar <45154452+akashrv@users.noreply.github.com>
Date: Wed, 1 May 2019 14:11:36 -0700
Subject: [PATCH 74/76] Remove Istio dependency from Eventing (Part - 5) -
final cleanup (#1119)
* WIP
* WIP - In-memory working with E2E tests
* WIP - remove istio dependency from in-memroy channel
* UTs pass, E2E tests pass with in-memory as well as kafka
* fixed uts that failed due to last K8s service change
* Removed unnecessary space from a line
* dding istio annotation to test POD. This will ve needed when running E2E
tests against channels other than in-memory
* Bug fix to set clusterIp of K8s service only when it is not of type ExternalName
* WIP kafka channel
* WIP kafka - UTs and E2E pass
More UTs needded
* Updated code based on PR comments
* WIP
* Updates based on PR comments
* Updates based on PR comments
* Fixed UTs
* Updated VENDOR_LICENSE
* WIP. Update fanout sidecar
* Merge from upstream master
* UTs pass, ITs passed. COde ready for PR
* Update natss to not use ISTIO. UTs and E2E tests pass.
* Updates based on PR comments
* REmoved permission to istio virtual service from controller
* WIP
* Changes based on PR comments
* Added back permission that was removed by mistake
* WIP
* Remove istio references
* WIP
* Removed one more reference of istio
* Revert kafka.yaml local change
* WIP
* Revert kafka dispatcher change
* Removing Mutex. No need to use Mutex when using atomic value for hostToChannelMap
* Removed named port from GCP dispatcher K8s service
* WIP
* FInal changes before validating E2E tests
* Removed istio sidecars from all dispatchers and from broker.
* Updates based on PR comments
* gcppubsub istio/serviceentry removed
* cronjob source tested with an without sidecar
* Container source
* Removed sidecars from tests
* hack/update-dep.sh
* Fixed build error
* Updates based on PR comments
---
Gopkg.lock | 5 +-
config/100-namespace.yaml | 2 -
.../in-memory-channel/in-memory-channel.yaml | 2 -
contrib/gcppubsub/config/gcppubsub.yaml | 35 ++-
contrib/kafka/config/kafka.yaml | 2 -
contrib/natss/config/broker/README.md | 1 -
contrib/natss/config/provisioner.yaml | 2 -
docs/broker/README.md | 4 +-
.../eventing/v1alpha1/subscription_types.go | 3 +-
pkg/provisioners/channel_util.go | 110 ---------
pkg/provisioners/channel_util_test.go | 210 ------------------
.../containersource/containersource_test.go | 9 +-
.../containersource/resources/deployment.go | 9 +-
.../resources/deployment_test.go | 7 -
.../resources/receive_adapter.go | 3 -
.../resources/receive_adapter_test.go | 3 -
pkg/reconciler/testing/listers.go | 11 -
pkg/reconciler/v1alpha1/broker/broker_test.go | 31 ---
.../v1alpha1/broker/resources/filter.go | 4 -
.../v1alpha1/broker/resources/ingress.go | 5 -
test/crd.go | 19 +-
test/e2e/single_event_test.go | 1 -
.../listers/istio/v1alpha3/destinationrule.go | 94 --------
.../istio/v1alpha3/expansion_generated.go | 43 ----
.../client/listers/istio/v1alpha3/gateway.go | 94 --------
.../listers/istio/v1alpha3/virtualservice.go | 94 --------
26 files changed, 34 insertions(+), 769 deletions(-)
delete mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go
delete mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go
delete mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go
delete mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go
diff --git a/Gopkg.lock b/Gopkg.lock
index 5fd48dab9cf..cb31b28128c 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -433,7 +433,7 @@
version = "v1.3.0"
[[projects]]
- digest = "1:57d04562d05dd4500ff1e7e47f2e62b9be0531388377a3b691a012ce70b210d5"
+ digest = "1:8cd04a6109f3a15c2481ff5cffcb15a57eb6e19dc14eb94006d1a20d8db2b20c"
name = "github.com/knative/pkg"
packages = [
"apis",
@@ -452,7 +452,6 @@
"client/clientset/versioned/typed/authentication/v1alpha1/fake",
"client/clientset/versioned/typed/istio/v1alpha3",
"client/clientset/versioned/typed/istio/v1alpha3/fake",
- "client/listers/istio/v1alpha3",
"configmap",
"controller",
"kmeta",
@@ -1388,9 +1387,7 @@
"github.com/knative/pkg/apis",
"github.com/knative/pkg/apis/duck",
"github.com/knative/pkg/apis/duck/v1alpha1",
- "github.com/knative/pkg/apis/istio/v1alpha3",
"github.com/knative/pkg/client/clientset/versioned/fake",
- "github.com/knative/pkg/client/listers/istio/v1alpha3",
"github.com/knative/pkg/configmap",
"github.com/knative/pkg/controller",
"github.com/knative/pkg/kmeta",
diff --git a/config/100-namespace.yaml b/config/100-namespace.yaml
index e57fdd07720..07781fa5b47 100644
--- a/config/100-namespace.yaml
+++ b/config/100-namespace.yaml
@@ -15,5 +15,3 @@ apiVersion: v1
kind: Namespace
metadata:
name: knative-eventing
- labels:
- istio-injection: enabled
diff --git a/config/provisioners/in-memory-channel/in-memory-channel.yaml b/config/provisioners/in-memory-channel/in-memory-channel.yaml
index d1b30298273..ed9472d863b 100644
--- a/config/provisioners/in-memory-channel/in-memory-channel.yaml
+++ b/config/provisioners/in-memory-channel/in-memory-channel.yaml
@@ -188,8 +188,6 @@ spec:
role: dispatcher
template:
metadata:
- annotations:
- sidecar.istio.io/inject: "true"
labels: *labels
spec:
serviceAccountName: in-memory-channel-dispatcher
diff --git a/contrib/gcppubsub/config/gcppubsub.yaml b/contrib/gcppubsub/config/gcppubsub.yaml
index a1807aa9851..8cf2dca10b8 100644
--- a/contrib/gcppubsub/config/gcppubsub.yaml
+++ b/contrib/gcppubsub/config/gcppubsub.yaml
@@ -195,8 +195,6 @@ spec:
role: dispatcher
template:
metadata:
- annotations:
- sidecar.istio.io/inject: "true"
labels: *labels
spec:
serviceAccountName: gcp-pubsub-channel-dispatcher
@@ -233,20 +231,21 @@ spec:
- protocol: TCP
port: 80
targetPort: 8080
-
---
-# Needed by the GCP PubSub Channel to communicate with GCP PubSub.
-apiVersion: networking.istio.io/v1alpha3
-kind: ServiceEntry
-metadata:
- name: gcppubsub-bus-ext
- namespace: knative-eventing
-spec:
- hosts:
- - "*.googleapis.com"
- - "accounts.google.com"
- ports:
- - number: 443
- name: https
- protocol: HTTPS
- location: MESH_EXTERNAL
+# Needed by the GCP PubSub Channel to communicate with GCP PubSub.
+# Please uncomment if ISTIO automatic sidecar injector is enabled on the cluster, and
+# the GCP dispatcher pod in knative-eventing namespace runs with a ISTIO sidecar.
+# apiVersion: networking.istio.io/v1alpha3
+# kind: ServiceEntry
+# metadata:
+# name: gcppubsub-bus-ext
+# namespace: knative-eventing
+# spec:
+# hosts:
+# - "*.googleapis.com"
+# - "accounts.google.com"
+# ports:
+# - number: 443
+# name: https
+# protocol: HTTPS
+# location: MESH_EXTERNAL
diff --git a/contrib/kafka/config/kafka.yaml b/contrib/kafka/config/kafka.yaml
index 49882a5e158..6b89bbd68b5 100644
--- a/contrib/kafka/config/kafka.yaml
+++ b/contrib/kafka/config/kafka.yaml
@@ -199,8 +199,6 @@ spec:
role: dispatcher
template:
metadata:
- annotations:
- sidecar.istio.io/inject: "true"
labels: *labels
spec:
serviceAccountName: kafka-channel-dispatcher
diff --git a/contrib/natss/config/broker/README.md b/contrib/natss/config/broker/README.md
index cc81528710a..eb59553937f 100644
--- a/contrib/natss/config/broker/README.md
+++ b/contrib/natss/config/broker/README.md
@@ -4,7 +4,6 @@
```sbtshell
kubectl create namespace natss
- kubectl label namespace natss istio-injection=enabled
kubectl apply -n natss -f contrib/natss/config/broker/natss.yaml
```
diff --git a/contrib/natss/config/provisioner.yaml b/contrib/natss/config/provisioner.yaml
index 6790bbe7b1d..d53f772ae8f 100644
--- a/contrib/natss/config/provisioner.yaml
+++ b/contrib/natss/config/provisioner.yaml
@@ -162,8 +162,6 @@ spec:
role: dispatcher
template:
metadata:
- annotations:
- sidecar.istio.io/inject: "true"
labels: *labels
spec:
serviceAccountName: natss-dispatcher
diff --git a/docs/broker/README.md b/docs/broker/README.md
index e95ca70422a..8b90d0b66fa 100644
--- a/docs/broker/README.md
+++ b/docs/broker/README.md
@@ -235,7 +235,7 @@ The `Broker`'s address is well known, it will always be
`-broker..svc.`. In our case, it is
`default-broker.default.svc.cluster.local`.
-While SSHed into a `Pod` with the Istio sidecar, run:
+While SSHed into a `Pod` and run:
```shell
curl -v "http://default-broker.default.svc.cluster.local/" \
@@ -278,7 +278,7 @@ implmentation**.
### Namespace
Namespaces are reconciled by the
-[Namespace Reconciler](../../pkg/reconciler/v1alpha1/namespace). The
+[Namespace Reconciler](../../pkg/reconciler/namespace). The
`Namespace Reconciler` looks for all `namespace`s that have the label
`knative-eventing-injection: enabled`. If that label is present, then the
`Namespace Reconciler` reconciles:
diff --git a/pkg/apis/eventing/v1alpha1/subscription_types.go b/pkg/apis/eventing/v1alpha1/subscription_types.go
index 6e03585190e..e43377c021f 100644
--- a/pkg/apis/eventing/v1alpha1/subscription_types.go
+++ b/pkg/apis/eventing/v1alpha1/subscription_types.go
@@ -104,8 +104,7 @@ type SubscriptionSpec struct {
// provide the resolved target of the action.
// Currently we inspect the objects Status and see if there's a predefined
// Status field that we will then use to dispatch events to be processed by
-// the target. Currently must resolve to a k8s service or Istio virtual
-// service.
+// the target. Currently must resolve to a k8s service.
// Note that in the future we should try to utilize subresources (/resolve ?) to
// make this cleaner, but CRDs do not support subresources yet, so we need
// to rely on a specified Status field today. By relying on this behaviour
diff --git a/pkg/provisioners/channel_util.go b/pkg/provisioners/channel_util.go
index df914bc8a6b..abf79f75598 100644
--- a/pkg/provisioners/channel_util.go
+++ b/pkg/provisioners/channel_util.go
@@ -7,7 +7,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -157,61 +156,6 @@ func createK8sService(ctx context.Context, client runtimeClient.Client, getSvc g
return current, nil
}
-func getVirtualService(ctx context.Context, client runtimeClient.Client, c *eventingv1alpha1.Channel) (*istiov1alpha3.VirtualService, error) {
- list := &istiov1alpha3.VirtualServiceList{}
- opts := &runtimeClient.ListOptions{
- Namespace: c.Namespace,
- // TODO After the full release start selecting on new set of labels by using virtualServiceLabels(c)
- LabelSelector: labels.SelectorFromSet(virtualOldServiceLabels(c)),
- // Set Raw because if we need to get more than one page, then we will put the continue token
- // into opts.Raw.Continue.
- Raw: &metav1.ListOptions{},
- }
-
- err := client.List(ctx, opts, list)
- if err != nil {
- return nil, err
- }
- for _, vs := range list.Items {
- if metav1.IsControlledBy(&vs, c) {
- return &vs, nil
- }
- }
-
- return nil, k8serrors.NewNotFound(schema.GroupResource{}, "")
-}
-
-func CreateVirtualService(ctx context.Context, client runtimeClient.Client, channel *eventingv1alpha1.Channel, svc *corev1.Service) (*istiov1alpha3.VirtualService, error) {
- virtualService, err := getVirtualService(ctx, client, channel)
-
- // If the resource doesn't exist, we'll create it
- if k8serrors.IsNotFound(err) {
- virtualService = newVirtualService(channel, svc)
- err = client.Create(ctx, virtualService)
- if err != nil {
- return nil, err
- }
- return virtualService, nil
- } else if err != nil {
- return nil, err
- }
-
- // Update VirtualService if it has changed. This is possible since in version 0.2.0, the destinationHost in
- // spec.HTTP.Route for the dispatcher was changed from *-clusterbus to *-dispatcher. Even otherwise, this
- // reconciliation is useful for the future mutations to the object.
- expected := newVirtualService(channel, svc)
- if !equality.Semantic.DeepDerivative(expected.Spec, virtualService.Spec) ||
- !expectedLabelsPresent(virtualService.ObjectMeta.Labels, expected.ObjectMeta.Labels) {
- virtualService.Spec = expected.Spec
- virtualService.ObjectMeta.Labels = addExpectedLabels(virtualService.ObjectMeta.Labels, expected.ObjectMeta.Labels)
- err := client.Update(ctx, virtualService)
- if err != nil {
- return nil, err
- }
- }
- return virtualService, nil
-}
-
// checkExpectedLabels checks the presence of expected labels and its values and return true
// if all labels are found.
func expectedLabelsPresent(actual, expected map[string]string) bool {
@@ -333,60 +277,6 @@ func k8sServiceLabels(c *eventingv1alpha1.Channel) map[string]string {
}
}
-func virtualServiceLabels(c *eventingv1alpha1.Channel) map[string]string {
- // Use the same labels as the K8s service.
- return k8sServiceLabels(c)
-}
-
-func virtualOldServiceLabels(c *eventingv1alpha1.Channel) map[string]string {
- // Use the same labels as the K8s service.
- return k8sOldServiceLabels(c)
-}
-
-// newVirtualService creates a new VirtualService for a Channel resource. It also sets the
-// appropriate OwnerReferences on the resource so handleObject can discover the Channel resource
-// that 'owns' it. As well as being garbage collected when the Channel is deleted.
-func newVirtualService(channel *eventingv1alpha1.Channel, svc *corev1.Service) *istiov1alpha3.VirtualService {
- destinationHost := names.ServiceHostName(channelDispatcherServiceName(channel.Spec.Provisioner.Name), system.Namespace())
- return &istiov1alpha3.VirtualService{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: channelVirtualServiceName(channel.Name),
- Namespace: channel.Namespace,
- Labels: virtualServiceLabels(channel),
- OwnerReferences: []metav1.OwnerReference{
- *metav1.NewControllerRef(channel, schema.GroupVersionKind{
- Group: eventingv1alpha1.SchemeGroupVersion.Group,
- Version: eventingv1alpha1.SchemeGroupVersion.Version,
- Kind: "Channel",
- }),
- },
- },
- Spec: istiov1alpha3.VirtualServiceSpec{
- Hosts: []string{
- names.ServiceHostName(svc.Name, channel.Namespace),
- channelHostName(channel.Name, channel.Namespace),
- },
- HTTP: []istiov1alpha3.HTTPRoute{{
- Rewrite: &istiov1alpha3.HTTPRewrite{
- Authority: channelHostName(channel.Name, channel.Namespace),
- },
- Route: []istiov1alpha3.HTTPRouteDestination{{
- Destination: istiov1alpha3.Destination{
- Host: destinationHost,
- Port: istiov1alpha3.PortSelector{
- Number: PortNumber,
- },
- }},
- }},
- },
- },
- }
-}
-
-func channelVirtualServiceName(channelName string) string {
- return fmt.Sprintf("%s-channel-", channelName)
-}
-
func channelServiceName(channelName string) string {
return fmt.Sprintf("%s-channel-", channelName)
}
diff --git a/pkg/provisioners/channel_util_test.go b/pkg/provisioners/channel_util_test.go
index 502c1fd5909..db35894106c 100644
--- a/pkg/provisioners/channel_util_test.go
+++ b/pkg/provisioners/channel_util_test.go
@@ -16,7 +16,6 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/knative/pkg/apis"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
@@ -43,7 +42,6 @@ var (
func init() {
// Add types to scheme.
- istiov1alpha3.AddToScheme(scheme.Scheme)
eventingv1alpha1.AddToScheme(scheme.Scheme)
}
@@ -67,35 +65,6 @@ func TestChannelUtils(t *testing.T) {
return CreateK8sService(context.TODO(), client, getNewChannel())
},
want: makeK8sService(),
- }, {
- name: "CreateVirtualService",
- f: func() (metav1.Object, error) {
- client := fake.NewFakeClient()
- return CreateVirtualService(context.TODO(), client, getNewChannel(), makeK8sService())
- },
- want: makeVirtualService(),
- }, {
- name: "CreateVirtualService_Existing",
- f: func() (metav1.Object, error) {
- existing := makeVirtualService()
- client := fake.NewFakeClient(existing)
- return CreateVirtualService(context.TODO(), client, getNewChannel(), makeK8sService())
- },
- want: makeVirtualService(),
- }, {
- name: "CreateVirtualService_ModifiedSpec",
- f: func() (metav1.Object, error) {
- existing := makeVirtualService()
- destHost := fmt.Sprintf("%s-clusterbus.knative-eventing.svc.%s", clusterChannelProvisionerName, utils.GetClusterDomainName())
- existing.Spec.HTTP[0].Route[0].Destination.Host = destHost
- client := fake.NewFakeClient(existing)
- CreateVirtualService(context.TODO(), client, getNewChannel(), makeK8sService())
-
- got := &istiov1alpha3.VirtualService{}
- got, err := getVirtualService(context.TODO(), client, getNewChannel())
- return got, err
- },
- want: makeVirtualService(),
}, {
name: "UpdateChannel",
f: func() (metav1.Object, error) {
@@ -255,125 +224,6 @@ func TestCreateK8sService(t *testing.T) {
}
}
-func TestCreateVirtualService(t *testing.T) {
- testCases := map[string]struct {
- list controllertesting.MockList
- create controllertesting.MockCreate
- update controllertesting.MockUpdate
- expected *istiov1alpha3.VirtualService
- err error
- }{
- "error getting svc": {
- list: func(_ runtimeClient.Client, _ context.Context, _ *runtimeClient.ListOptions, _ runtime.Object) (controllertesting.MockHandled, error) {
- return controllertesting.Handled, errTestInduced
- },
- err: errTestInduced,
- },
- "not found - create error": {
- create: func(_ runtimeClient.Client, _ context.Context, _ runtime.Object) (controllertesting.MockHandled, error) {
- return controllertesting.Handled, errTestInduced
- },
- err: errTestInduced,
- },
- "not found - create succeeds": {
- create: func(_ runtimeClient.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- vs := obj.(*istiov1alpha3.VirtualService)
- vs.Spec = makeTamperedVirtualService().Spec
- return controllertesting.Handled, nil
- },
- expected: makeTamperedVirtualService(),
- },
- "different spec - update fails": {
- list: func(_ runtimeClient.Client, _ context.Context, _ *runtimeClient.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) {
- l := obj.(*istiov1alpha3.VirtualServiceList)
- l.Items = []istiov1alpha3.VirtualService{
- {
- ObjectMeta: metav1.ObjectMeta{
- OwnerReferences: []metav1.OwnerReference{
- {
- Controller: &truePointer,
- UID: channelUID,
- },
- },
- },
- Spec: istiov1alpha3.VirtualServiceSpec{
- Gateways: []string{"set in get"},
- },
- },
- }
- return controllertesting.Handled, nil
- },
- update: func(_ runtimeClient.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- return controllertesting.Handled, errTestInduced
- },
- err: errTestInduced,
- },
- "different spec - update succeeds": {
- list: func(_ runtimeClient.Client, _ context.Context, _ *runtimeClient.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) {
- l := obj.(*istiov1alpha3.VirtualServiceList)
- l.Items = []istiov1alpha3.VirtualService{
- {
- ObjectMeta: metav1.ObjectMeta{
- OwnerReferences: []metav1.OwnerReference{
- {
- Controller: &truePointer,
- UID: channelUID,
- },
- },
- },
- Spec: istiov1alpha3.VirtualServiceSpec{
- Gateways: []string{"set in get"},
- },
- },
- }
- return controllertesting.Handled, nil
- },
- update: func(_ runtimeClient.Client, _ context.Context, obj runtime.Object) (controllertesting.MockHandled, error) {
- vs := obj.(*istiov1alpha3.VirtualService)
- makeTamperedVirtualService().DeepCopyInto(vs)
- return controllertesting.Handled, nil
- },
- expected: makeTamperedVirtualService(),
- },
- "found doesn't need altering": {
- list: func(_ runtimeClient.Client, _ context.Context, _ *runtimeClient.ListOptions, obj runtime.Object) (controllertesting.MockHandled, error) {
- l := obj.(*istiov1alpha3.VirtualServiceList)
- l.Items = []istiov1alpha3.VirtualService{*makeVirtualService()}
- return controllertesting.Handled, nil
- },
- create: func(_ runtimeClient.Client, _ context.Context, _ runtime.Object) (controllertesting.MockHandled, error) {
- return controllertesting.Handled, errors.New("create should not have been called")
- },
- update: func(_ runtimeClient.Client, _ context.Context, _ runtime.Object) (controllertesting.MockHandled, error) {
- return controllertesting.Handled, errors.New("update should not have been called")
- },
- expected: makeVirtualService(),
- },
- }
- for n, tc := range testCases {
- t.Run(n, func(t *testing.T) {
- mocks := controllertesting.Mocks{}
- if tc.list != nil {
- mocks.MockLists = []controllertesting.MockList{tc.list}
- }
- if tc.create != nil {
- mocks.MockCreates = []controllertesting.MockCreate{tc.create}
- }
- if tc.update != nil {
- mocks.MockUpdates = []controllertesting.MockUpdate{tc.update}
- }
- client := controllertesting.NewMockClient(fake.NewFakeClient(), mocks)
- vs, err := CreateVirtualService(context.TODO(), client, getNewChannel(), makeK8sService())
- if tc.err != err {
- t.Fatalf("Unexpected error. Expected '%s', actual '%v'", tc.err, err)
- }
- if diff := cmp.Diff(tc.expected, vs); diff != "" {
- t.Fatalf("Unexpected virtual service (-want +got): %s", diff)
- }
- })
- }
-}
-
func TestAddFinalizer(t *testing.T) {
testCases := map[string]struct {
alreadyPresent bool
@@ -439,12 +289,6 @@ func TestChannelNames(t *testing.T) {
F func() string
Want string
}{{
- Name: "channelVirtualServiceName",
- F: func() string {
- return channelVirtualServiceName("foo")
- },
- Want: "foo-channel-",
- }, {
Name: "channelServiceName",
F: func() string {
return channelServiceName("foo")
@@ -642,57 +486,3 @@ func makeTamperedK8sService() *corev1.Service {
}
return svc
}
-
-func makeVirtualService() *istiov1alpha3.VirtualService {
- return &istiov1alpha3.VirtualService{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: fmt.Sprintf("%s-channel-", channelName),
- Namespace: testNS,
- Labels: map[string]string{
- EventingChannelLabel: channelName,
- OldEventingChannelLabel: channelName,
- EventingProvisionerLabel: clusterChannelProvisionerName,
- OldEventingProvisionerLabel: clusterChannelProvisionerName,
- },
- OwnerReferences: []metav1.OwnerReference{
- {
- APIVersion: eventingv1alpha1.SchemeGroupVersion.String(),
- Kind: "Channel",
- Name: channelName,
- UID: channelUID,
- Controller: &truePointer,
- BlockOwnerDeletion: &truePointer,
- },
- },
- },
- Spec: istiov1alpha3.VirtualServiceSpec{
- Hosts: []string{
- // The fake client doesn't fill in a Name when GeneratedName is used, so the
- // Channel's Name will be the empty string.
- fmt.Sprintf("%s.%s.svc.%s", "", testNS, utils.GetClusterDomainName()),
- fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()),
- },
- HTTP: []istiov1alpha3.HTTPRoute{{
- Rewrite: &istiov1alpha3.HTTPRewrite{
- Authority: fmt.Sprintf("%s.%s.channels.%s", channelName, testNS, utils.GetClusterDomainName()),
- },
- Route: []istiov1alpha3.HTTPRouteDestination{{
- Destination: istiov1alpha3.Destination{
- Host: fmt.Sprintf("%s-dispatcher.knative-testing.svc.%s", clusterChannelProvisionerName, utils.GetClusterDomainName()),
- Port: istiov1alpha3.PortSelector{
- Number: PortNumber,
- },
- }},
- }},
- },
- },
- }
-}
-
-func makeTamperedVirtualService() *istiov1alpha3.VirtualService {
- vs := makeVirtualService()
- vs.Spec = istiov1alpha3.VirtualServiceSpec{
- Gateways: []string{"tamped by the unit tests"},
- }
- return vs
-}
diff --git a/pkg/reconciler/containersource/containersource_test.go b/pkg/reconciler/containersource/containersource_test.go
index eb5a9d8b02b..f27363ab259 100644
--- a/pkg/reconciler/containersource/containersource_test.go
+++ b/pkg/reconciler/containersource/containersource_test.go
@@ -444,13 +444,6 @@ func makeDeployment(source *sourcesv1alpha1.ContainerSource, replicas int32, lab
args := append(source.Spec.Args, fmt.Sprintf("--sink=%s", sinkURI))
env := append(source.Spec.Env, corev1.EnvVar{Name: "SINK", Value: sinkURI})
- annos := map[string]string{
- "sidecar.istio.io/inject": "true",
- }
- for k, v := range annotations {
- annos[k] = v
- }
-
labs := map[string]string{
"eventing.knative.dev/source": source.Name,
}
@@ -476,7 +469,7 @@ func makeDeployment(source *sourcesv1alpha1.ContainerSource, replicas int32, lab
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Annotations: annos,
+ Annotations: annotations,
Labels: labs,
},
Spec: corev1.PodSpec{
diff --git a/pkg/reconciler/containersource/resources/deployment.go b/pkg/reconciler/containersource/resources/deployment.go
index e84854bdfe3..efefb490ec4 100644
--- a/pkg/reconciler/containersource/resources/deployment.go
+++ b/pkg/reconciler/containersource/resources/deployment.go
@@ -61,9 +61,6 @@ func MakeDeployment(args ContainerArguments) *appsv1.Deployment {
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Annotations: map[string]string{
- "sidecar.istio.io/inject": "true",
- },
Labels: map[string]string{
sourceLabelKey: args.Name,
},
@@ -84,9 +81,11 @@ func MakeDeployment(args ContainerArguments) *appsv1.Deployment {
},
}
- // Then wire through any annotations from the source. Not a bug by allowing
- // the container to override Istio injection.
+ // Then wire through any annotations from the source.
if args.Annotations != nil {
+ if deploy.Spec.Template.ObjectMeta.Annotations == nil {
+ deploy.Spec.Template.ObjectMeta.Annotations = make(map[string]string, len(args.Annotations))
+ }
for k, v := range args.Annotations {
deploy.Spec.Template.ObjectMeta.Annotations[k] = v
}
diff --git a/pkg/reconciler/containersource/resources/deployment_test.go b/pkg/reconciler/containersource/resources/deployment_test.go
index f0f544187f2..833ad7ec2b3 100644
--- a/pkg/reconciler/containersource/resources/deployment_test.go
+++ b/pkg/reconciler/containersource/resources/deployment_test.go
@@ -185,9 +185,6 @@ func TestMakeDeployment_sink(t *testing.T) {
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Annotations: map[string]string{
- "sidecar.istio.io/inject": "true",
- },
Labels: map[string]string{
"eventing.knative.dev/source": "test-name",
},
@@ -255,7 +252,6 @@ func TestMakeDeployment_sinkinargs(t *testing.T) {
ServiceAccountName: "test-service-account",
SinkInArgs: true,
Labels: map[string]string{"eventing.knative.dev/source": "test-name"},
- Annotations: map[string]string{"sidecar.istio.io/inject": "true"},
})
want := &appsv1.Deployment{
@@ -283,9 +279,6 @@ func TestMakeDeployment_sinkinargs(t *testing.T) {
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Annotations: map[string]string{
- "sidecar.istio.io/inject": "true",
- },
Labels: map[string]string{
"eventing.knative.dev/source": "test-name",
},
diff --git a/pkg/reconciler/cronjobsource/resources/receive_adapter.go b/pkg/reconciler/cronjobsource/resources/receive_adapter.go
index f3aca93f50f..7dfee230ab8 100644
--- a/pkg/reconciler/cronjobsource/resources/receive_adapter.go
+++ b/pkg/reconciler/cronjobsource/resources/receive_adapter.go
@@ -57,9 +57,6 @@ func MakeReceiveAdapter(args *ReceiveAdapterArgs) *v1.Deployment {
Replicas: &replicas,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Annotations: map[string]string{
- "sidecar.istio.io/inject": "true", // TODO this might be removed.
- },
Labels: args.Labels,
},
Spec: corev1.PodSpec{
diff --git a/pkg/reconciler/cronjobsource/resources/receive_adapter_test.go b/pkg/reconciler/cronjobsource/resources/receive_adapter_test.go
index e8b2c5fd1df..4c077bcd909 100644
--- a/pkg/reconciler/cronjobsource/resources/receive_adapter_test.go
+++ b/pkg/reconciler/cronjobsource/resources/receive_adapter_test.go
@@ -78,9 +78,6 @@ func TestMakeReceiveAdapter(t *testing.T) {
Replicas: &one,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Annotations: map[string]string{
- "sidecar.istio.io/inject": "true",
- },
Labels: map[string]string{
"test-key1": "test-value1",
"test-key2": "test-value2",
diff --git a/pkg/reconciler/testing/listers.go b/pkg/reconciler/testing/listers.go
index 7f7b896b6ab..9048d1f570c 100644
--- a/pkg/reconciler/testing/listers.go
+++ b/pkg/reconciler/testing/listers.go
@@ -22,9 +22,7 @@ import (
fakeeventingclientset "github.com/knative/eventing/pkg/client/clientset/versioned/fake"
eventinglisters "github.com/knative/eventing/pkg/client/listers/eventing/v1alpha1"
sourcelisters "github.com/knative/eventing/pkg/client/listers/sources/v1alpha1"
- istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
fakesharedclientset "github.com/knative/pkg/client/clientset/versioned/fake"
- istiolisters "github.com/knative/pkg/client/listers/istio/v1alpha3"
"github.com/knative/pkg/reconciler/testing"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
@@ -118,10 +116,6 @@ func (l *Listers) GetChannelLister() eventinglisters.ChannelLister {
return eventinglisters.NewChannelLister(l.indexerFor(&eventingv1alpha1.Channel{}))
}
-func (l *Listers) GetVirtualServiceLister() istiolisters.VirtualServiceLister {
- return istiolisters.NewVirtualServiceLister(l.indexerFor(&istiov1alpha3.VirtualService{}))
-}
-
func (l *Listers) GetCronJobSourceLister() sourcelisters.CronJobSourceLister {
return sourcelisters.NewCronJobSourceLister(l.indexerFor(&sourcesv1alpha1.CronJobSource{}))
}
@@ -134,11 +128,6 @@ func (l *Listers) GetContainerSourceLister() sourcelisters.ContainerSourceLister
return sourcelisters.NewContainerSourceLister(l.indexerFor(&sourcesv1alpha1.ContainerSource{}))
}
-// GetGatewayLister gets lister for Istio Gateway resource.
-func (l *Listers) GetGatewayLister() istiolisters.GatewayLister {
- return istiolisters.NewGatewayLister(l.indexerFor(&istiov1alpha3.Gateway{}))
-}
-
func (l *Listers) GetDeploymentLister() appsv1listers.DeploymentLister {
return appsv1listers.NewDeploymentLister(l.indexerFor(&appsv1.Deployment{}))
}
diff --git a/pkg/reconciler/v1alpha1/broker/broker_test.go b/pkg/reconciler/v1alpha1/broker/broker_test.go
index 524ce825420..9d260c9bf78 100644
--- a/pkg/reconciler/v1alpha1/broker/broker_test.go
+++ b/pkg/reconciler/v1alpha1/broker/broker_test.go
@@ -189,7 +189,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
},
@@ -222,7 +221,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, "some-other-image", envVars(filterContainerName), nil)),
},
@@ -240,7 +238,6 @@ func TestReconcile(t *testing.T) {
Object: NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
}},
@@ -266,7 +263,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
},
@@ -308,7 +304,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -354,7 +349,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -369,7 +363,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080)),
),
@@ -404,7 +397,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -414,7 +406,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(9090))),
},
@@ -425,7 +416,6 @@ func TestReconcile(t *testing.T) {
Object: NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
}},
@@ -459,7 +449,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -469,7 +458,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
},
@@ -512,7 +500,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -522,7 +509,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
NewService(ingressServiceName, testNS,
@@ -569,7 +555,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -579,7 +564,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
NewService(ingressServiceName, testNS,
@@ -630,7 +614,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -640,7 +623,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
NewService(ingressServiceName, testNS,
@@ -702,7 +684,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -712,7 +693,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
NewService(ingressServiceName, testNS,
@@ -776,7 +756,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -786,7 +765,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
NewService(ingressServiceName, testNS,
@@ -858,7 +836,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(filterDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.FilterLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(filterSA),
WithDeploymentContainer(filterContainerName, filterImage, envVars(filterContainerName), nil)),
NewService(filterServiceName, testNS,
@@ -868,7 +845,6 @@ func TestReconcile(t *testing.T) {
NewDeployment(ingressDeploymentName, testNS,
WithDeploymentOwnerReferences(ownerReferences()),
WithDeploymentLabels(resources.IngressLabels(brokerName)),
- WithDeploymentAnnotations(annotations()),
WithDeploymentServiceAccount(ingressSA),
WithDeploymentContainer(ingressContainerName, ingressImage, envVars(ingressContainerName), containerPorts(8080))),
NewService(ingressServiceName, testNS,
@@ -939,13 +915,6 @@ func channelProvisioner(name string) *corev1.ObjectReference {
}
}
-// TODO remove this once we get rid of istio.
-func annotations() map[string]string {
- return map[string]string{
- "sidecar.istio.io/inject": "true",
- }
-}
-
func envVars(containerName string) []corev1.EnvVar {
switch containerName {
case filterContainerName:
diff --git a/pkg/reconciler/v1alpha1/broker/resources/filter.go b/pkg/reconciler/v1alpha1/broker/resources/filter.go
index 74e641badd1..5f64e1370f7 100644
--- a/pkg/reconciler/v1alpha1/broker/resources/filter.go
+++ b/pkg/reconciler/v1alpha1/broker/resources/filter.go
@@ -57,10 +57,6 @@ func MakeFilterDeployment(args *FilterArgs) *appsv1.Deployment {
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: FilterLabels(args.Broker.Name),
- // TODO remove this once we get rid of istio.
- Annotations: map[string]string{
- "sidecar.istio.io/inject": "true",
- },
},
Spec: corev1.PodSpec{
ServiceAccountName: args.ServiceAccountName,
diff --git a/pkg/reconciler/v1alpha1/broker/resources/ingress.go b/pkg/reconciler/v1alpha1/broker/resources/ingress.go
index 5721d5654a2..ccd38a58fd1 100644
--- a/pkg/reconciler/v1alpha1/broker/resources/ingress.go
+++ b/pkg/reconciler/v1alpha1/broker/resources/ingress.go
@@ -58,11 +58,6 @@ func MakeIngress(args *IngressArgs) *appsv1.Deployment {
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: IngressLabels(args.Broker.Name),
- // TODO: Remove this annotation once all channels stop using istio virtual service
- // https://github.com/knative/eventing/issues/294
- Annotations: map[string]string{
- "sidecar.istio.io/inject": "true",
- },
},
Spec: corev1.PodSpec{
ServiceAccountName: args.ServiceAccountName,
diff --git a/test/crd.go b/test/crd.go
index 046532f5f7b..6d0fd111ad1 100644
--- a/test/crd.go
+++ b/test/crd.go
@@ -128,9 +128,8 @@ func EventSenderPod(name string, namespace string, sink string, event *CloudEven
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
- Name: name,
- Namespace: namespace,
- Annotations: map[string]string{"sidecar.istio.io/inject": "true"},
+ Name: name,
+ Namespace: namespace,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
@@ -162,10 +161,9 @@ func EventSenderPod(name string, namespace string, sink string, event *CloudEven
func EventLoggerPod(name string, namespace string, selector map[string]string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
- Name: name,
- Namespace: namespace,
- Labels: selector,
- Annotations: map[string]string{"sidecar.istio.io/inject": "true"},
+ Name: name,
+ Namespace: namespace,
+ Labels: selector,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
@@ -182,10 +180,9 @@ func EventLoggerPod(name string, namespace string, selector map[string]string) *
func EventTransformationPod(name string, namespace string, selector map[string]string, msgPostfix string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
- Name: name,
- Namespace: namespace,
- Labels: selector,
- Annotations: map[string]string{"sidecar.istio.io/inject": "true"},
+ Name: name,
+ Namespace: namespace,
+ Labels: selector,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
diff --git a/test/e2e/single_event_test.go b/test/e2e/single_event_test.go
index c6158cee871..9537f8a538b 100644
--- a/test/e2e/single_event_test.go
+++ b/test/e2e/single_event_test.go
@@ -85,7 +85,6 @@ func singleEvent(t *testing.T, encoding string) {
if err := pkgTest.WaitForLogContent(clients.Kube, loggerPodName, loggerPod.Spec.Containers[0].Name, ns, body); err != nil {
logPodLogsForDebugging(clients, loggerPodName, loggerPod.Spec.Containers[0].Name, ns, t.Logf)
logPodLogsForDebugging(clients, senderName, "sendevent", ns, t.Logf)
- logPodLogsForDebugging(clients, senderName, "istio-proxy", ns, t.Logf)
t.Fatalf("String %q not found in logs of logger pod %q: %v", body, loggerPodName, err)
}
}
diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go
deleted file mode 100644
index ec1ff75565a..00000000000
--- a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/tools/cache"
-)
-
-// DestinationRuleLister helps list DestinationRules.
-type DestinationRuleLister interface {
- // List lists all DestinationRules in the indexer.
- List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error)
- // DestinationRules returns an object that can list and get DestinationRules.
- DestinationRules(namespace string) DestinationRuleNamespaceLister
- DestinationRuleListerExpansion
-}
-
-// destinationRuleLister implements the DestinationRuleLister interface.
-type destinationRuleLister struct {
- indexer cache.Indexer
-}
-
-// NewDestinationRuleLister returns a new DestinationRuleLister.
-func NewDestinationRuleLister(indexer cache.Indexer) DestinationRuleLister {
- return &destinationRuleLister{indexer: indexer}
-}
-
-// List lists all DestinationRules in the indexer.
-func (s *destinationRuleLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) {
- err = cache.ListAll(s.indexer, selector, func(m interface{}) {
- ret = append(ret, m.(*v1alpha3.DestinationRule))
- })
- return ret, err
-}
-
-// DestinationRules returns an object that can list and get DestinationRules.
-func (s *destinationRuleLister) DestinationRules(namespace string) DestinationRuleNamespaceLister {
- return destinationRuleNamespaceLister{indexer: s.indexer, namespace: namespace}
-}
-
-// DestinationRuleNamespaceLister helps list and get DestinationRules.
-type DestinationRuleNamespaceLister interface {
- // List lists all DestinationRules in the indexer for a given namespace.
- List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error)
- // Get retrieves the DestinationRule from the indexer for a given namespace and name.
- Get(name string) (*v1alpha3.DestinationRule, error)
- DestinationRuleNamespaceListerExpansion
-}
-
-// destinationRuleNamespaceLister implements the DestinationRuleNamespaceLister
-// interface.
-type destinationRuleNamespaceLister struct {
- indexer cache.Indexer
- namespace string
-}
-
-// List lists all DestinationRules in the indexer for a given namespace.
-func (s destinationRuleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) {
- err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
- ret = append(ret, m.(*v1alpha3.DestinationRule))
- })
- return ret, err
-}
-
-// Get retrieves the DestinationRule from the indexer for a given namespace and name.
-func (s destinationRuleNamespaceLister) Get(name string) (*v1alpha3.DestinationRule, error) {
- obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, errors.NewNotFound(v1alpha3.Resource("destinationrule"), name)
- }
- return obj.(*v1alpha3.DestinationRule), nil
-}
diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go
deleted file mode 100644
index f3e2ec937f7..00000000000
--- a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1alpha3
-
-// DestinationRuleListerExpansion allows custom methods to be added to
-// DestinationRuleLister.
-type DestinationRuleListerExpansion interface{}
-
-// DestinationRuleNamespaceListerExpansion allows custom methods to be added to
-// DestinationRuleNamespaceLister.
-type DestinationRuleNamespaceListerExpansion interface{}
-
-// GatewayListerExpansion allows custom methods to be added to
-// GatewayLister.
-type GatewayListerExpansion interface{}
-
-// GatewayNamespaceListerExpansion allows custom methods to be added to
-// GatewayNamespaceLister.
-type GatewayNamespaceListerExpansion interface{}
-
-// VirtualServiceListerExpansion allows custom methods to be added to
-// VirtualServiceLister.
-type VirtualServiceListerExpansion interface{}
-
-// VirtualServiceNamespaceListerExpansion allows custom methods to be added to
-// VirtualServiceNamespaceLister.
-type VirtualServiceNamespaceListerExpansion interface{}
diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go
deleted file mode 100644
index 62a78893a63..00000000000
--- a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/tools/cache"
-)
-
-// GatewayLister helps list Gateways.
-type GatewayLister interface {
- // List lists all Gateways in the indexer.
- List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error)
- // Gateways returns an object that can list and get Gateways.
- Gateways(namespace string) GatewayNamespaceLister
- GatewayListerExpansion
-}
-
-// gatewayLister implements the GatewayLister interface.
-type gatewayLister struct {
- indexer cache.Indexer
-}
-
-// NewGatewayLister returns a new GatewayLister.
-func NewGatewayLister(indexer cache.Indexer) GatewayLister {
- return &gatewayLister{indexer: indexer}
-}
-
-// List lists all Gateways in the indexer.
-func (s *gatewayLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) {
- err = cache.ListAll(s.indexer, selector, func(m interface{}) {
- ret = append(ret, m.(*v1alpha3.Gateway))
- })
- return ret, err
-}
-
-// Gateways returns an object that can list and get Gateways.
-func (s *gatewayLister) Gateways(namespace string) GatewayNamespaceLister {
- return gatewayNamespaceLister{indexer: s.indexer, namespace: namespace}
-}
-
-// GatewayNamespaceLister helps list and get Gateways.
-type GatewayNamespaceLister interface {
- // List lists all Gateways in the indexer for a given namespace.
- List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error)
- // Get retrieves the Gateway from the indexer for a given namespace and name.
- Get(name string) (*v1alpha3.Gateway, error)
- GatewayNamespaceListerExpansion
-}
-
-// gatewayNamespaceLister implements the GatewayNamespaceLister
-// interface.
-type gatewayNamespaceLister struct {
- indexer cache.Indexer
- namespace string
-}
-
-// List lists all Gateways in the indexer for a given namespace.
-func (s gatewayNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) {
- err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
- ret = append(ret, m.(*v1alpha3.Gateway))
- })
- return ret, err
-}
-
-// Get retrieves the Gateway from the indexer for a given namespace and name.
-func (s gatewayNamespaceLister) Get(name string) (*v1alpha3.Gateway, error) {
- obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, errors.NewNotFound(v1alpha3.Resource("gateway"), name)
- }
- return obj.(*v1alpha3.Gateway), nil
-}
diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go
deleted file mode 100644
index 3284cda81bd..00000000000
--- a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
-Copyright 2019 The Knative Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1alpha3
-
-import (
- v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/tools/cache"
-)
-
-// VirtualServiceLister helps list VirtualServices.
-type VirtualServiceLister interface {
- // List lists all VirtualServices in the indexer.
- List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error)
- // VirtualServices returns an object that can list and get VirtualServices.
- VirtualServices(namespace string) VirtualServiceNamespaceLister
- VirtualServiceListerExpansion
-}
-
-// virtualServiceLister implements the VirtualServiceLister interface.
-type virtualServiceLister struct {
- indexer cache.Indexer
-}
-
-// NewVirtualServiceLister returns a new VirtualServiceLister.
-func NewVirtualServiceLister(indexer cache.Indexer) VirtualServiceLister {
- return &virtualServiceLister{indexer: indexer}
-}
-
-// List lists all VirtualServices in the indexer.
-func (s *virtualServiceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) {
- err = cache.ListAll(s.indexer, selector, func(m interface{}) {
- ret = append(ret, m.(*v1alpha3.VirtualService))
- })
- return ret, err
-}
-
-// VirtualServices returns an object that can list and get VirtualServices.
-func (s *virtualServiceLister) VirtualServices(namespace string) VirtualServiceNamespaceLister {
- return virtualServiceNamespaceLister{indexer: s.indexer, namespace: namespace}
-}
-
-// VirtualServiceNamespaceLister helps list and get VirtualServices.
-type VirtualServiceNamespaceLister interface {
- // List lists all VirtualServices in the indexer for a given namespace.
- List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error)
- // Get retrieves the VirtualService from the indexer for a given namespace and name.
- Get(name string) (*v1alpha3.VirtualService, error)
- VirtualServiceNamespaceListerExpansion
-}
-
-// virtualServiceNamespaceLister implements the VirtualServiceNamespaceLister
-// interface.
-type virtualServiceNamespaceLister struct {
- indexer cache.Indexer
- namespace string
-}
-
-// List lists all VirtualServices in the indexer for a given namespace.
-func (s virtualServiceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) {
- err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
- ret = append(ret, m.(*v1alpha3.VirtualService))
- })
- return ret, err
-}
-
-// Get retrieves the VirtualService from the indexer for a given namespace and name.
-func (s virtualServiceNamespaceLister) Get(name string) (*v1alpha3.VirtualService, error) {
- obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, errors.NewNotFound(v1alpha3.Resource("virtualservice"), name)
- }
- return obj.(*v1alpha3.VirtualService), nil
-}
From 4253a4a0e4d296fa36ea2f7b47d97ff20bdceb91 Mon Sep 17 00:00:00 2001
From: Adam Harwayne
Date: Wed, 1 May 2019 14:39:33 -0700
Subject: [PATCH 75/76] Remove unneeded log keys. (#1135)
* Remove unneeded log keys.
knative.dev/controller is set by pkg/reconciler just before calling our reconciler, so adding a second one in main is redundant and produced entries with:
{
"knative.dev/controller": "controller",
"knative.dev/controller": "trigger-controller",
}
The reconcile key is already saved as 'knative.dev/key' by pkg/reconciler, so we don't need to add 'key' as well.
* Remove explict adds of key.
* PR comments.
---
cmd/controller/main.go | 2 --
pkg/reconciler/channel/channel.go | 8 ++++----
pkg/reconciler/eventtype/eventtype.go | 1 -
pkg/reconciler/namespace/namespace.go | 6 +++---
pkg/reconciler/subscription/subscription.go | 2 +-
pkg/reconciler/trigger/trigger.go | 2 --
pkg/reconciler/v1alpha1/broker/broker.go | 1 -
7 files changed, 8 insertions(+), 14 deletions(-)
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index 37afc799c65..ffc61e60a64 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -40,7 +40,6 @@ import (
"github.com/knative/eventing/pkg/reconciler/v1alpha1/broker"
"github.com/knative/pkg/configmap"
kncontroller "github.com/knative/pkg/controller"
- "github.com/knative/pkg/logging/logkey"
"github.com/knative/pkg/signals"
"go.uber.org/zap"
controllerruntime "sigs.k8s.io/controller-runtime/pkg/client/config"
@@ -57,7 +56,6 @@ func main() {
logger, atomicLevel := setupLogger()
defer logger.Sync()
- logger = logger.With(zap.String(logkey.ControllerType, logconfig.Controller))
// set up signals so we handle the first shutdown signal gracefully
stopCh := signals.SetupSignalHandler()
diff --git a/pkg/reconciler/channel/channel.go b/pkg/reconciler/channel/channel.go
index 9ea395ee854..f437eb5cbfc 100644
--- a/pkg/reconciler/channel/channel.go
+++ b/pkg/reconciler/channel/channel.go
@@ -86,7 +86,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
original, err := r.channelLister.Channels(namespace).Get(name)
if apierrs.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
- logging.FromContext(ctx).Error("channel key in work queue no longer exists", zap.Any("key", key))
+ logging.FromContext(ctx).Error("Channel key in work queue no longer exists")
return nil
} else if err != nil {
return err
@@ -104,14 +104,14 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
// updates regardless of whether the reconcile error out.
err = r.reconcile(ctx, channel)
if err != nil {
- logging.FromContext(ctx).Warn("Error reconciling Channel", zap.Error(err))
+ logging.FromContext(ctx).Error("Error reconciling Channel", zap.Error(err))
} else {
- logging.FromContext(ctx).Debug("Successfully reconciled Channel", zap.Any("key", key))
+ logging.FromContext(ctx).Debug("Successfully reconciled Channel")
r.Recorder.Eventf(channel, corev1.EventTypeNormal, channelReconciled, "Channel reconciled: %s", key)
}
if _, updateStatusErr := r.updateStatus(ctx, channel.DeepCopy()); updateStatusErr != nil {
- logging.FromContext(ctx).Warn("Error updating Channel status", zap.Any("key", key), zap.Error(updateStatusErr))
+ logging.FromContext(ctx).Warn("Error updating Channel status", zap.Error(updateStatusErr))
r.Recorder.Eventf(channel, corev1.EventTypeWarning, channelUpdateStatusFailed, "Failed to update channel status: %s", key)
return updateStatusErr
}
diff --git a/pkg/reconciler/eventtype/eventtype.go b/pkg/reconciler/eventtype/eventtype.go
index a1aa28113a8..0b614732133 100644
--- a/pkg/reconciler/eventtype/eventtype.go
+++ b/pkg/reconciler/eventtype/eventtype.go
@@ -106,7 +106,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
r.Logger.Errorf("invalid resource key: %s", key)
return nil
}
- ctx = logging.WithLogger(ctx, r.Logger.Desugar().With(zap.String("key", key)))
// Get the EventType resource with this namespace/name
original, err := r.eventTypeLister.EventTypes(namespace).Get(name)
diff --git a/pkg/reconciler/namespace/namespace.go b/pkg/reconciler/namespace/namespace.go
index d696fb8d4f0..11df3a5e2b2 100644
--- a/pkg/reconciler/namespace/namespace.go
+++ b/pkg/reconciler/namespace/namespace.go
@@ -130,7 +130,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
original, err := r.namespaceLister.Get(name)
if apierrs.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
- logging.FromContext(ctx).Error("namespace key in work queue no longer exists", zap.Any("key", key))
+ logging.FromContext(ctx).Error("namespace key in work queue no longer exists")
return nil
} else if err != nil {
return err
@@ -149,9 +149,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
// whether the reconcile error out.
err = r.reconcile(ctx, ns)
if err != nil {
- logging.FromContext(ctx).Error("Error reconciling Namespace", zap.Error(err), zap.Any("key", key))
+ logging.FromContext(ctx).Error("Error reconciling Namespace", zap.Error(err))
} else {
- logging.FromContext(ctx).Debug("Namespace reconciled", zap.Any("key", key))
+ logging.FromContext(ctx).Debug("Namespace reconciled")
}
// Requeue if the resource is not ready:
diff --git a/pkg/reconciler/subscription/subscription.go b/pkg/reconciler/subscription/subscription.go
index 4444539cc8a..ba99c76b5f7 100644
--- a/pkg/reconciler/subscription/subscription.go
+++ b/pkg/reconciler/subscription/subscription.go
@@ -107,7 +107,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
original, err := r.subscriptionLister.Subscriptions(namespace).Get(name)
if apierrs.IsNotFound(err) {
// The resource may no longer exist, in which case we stop processing.
- logging.FromContext(ctx).Error("subscription key in work queue no longer exists", zap.Any("key", key))
+ logging.FromContext(ctx).Error("subscription key in work queue no longer exists")
return nil
} else if err != nil {
return err
diff --git a/pkg/reconciler/trigger/trigger.go b/pkg/reconciler/trigger/trigger.go
index d046ce10a9f..ac6d9927dea 100644
--- a/pkg/reconciler/trigger/trigger.go
+++ b/pkg/reconciler/trigger/trigger.go
@@ -135,8 +135,6 @@ func NewController(
// converge the two. It then updates the Status block of the Trigger resource
// with the current status of the resource.
func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
- ctx = logging.WithLogger(ctx, r.Logger.Desugar().With(zap.String("key", key)))
-
// Convert the namespace/name string into a distinct namespace and name.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
diff --git a/pkg/reconciler/v1alpha1/broker/broker.go b/pkg/reconciler/v1alpha1/broker/broker.go
index d0751b786e1..f9abad9183e 100644
--- a/pkg/reconciler/v1alpha1/broker/broker.go
+++ b/pkg/reconciler/v1alpha1/broker/broker.go
@@ -146,7 +146,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
r.Logger.Errorf("invalid resource key: %s", key)
return nil
}
- ctx = logging.WithLogger(ctx, r.Logger.Desugar().With(zap.String("key", key)))
// Get the Broker resource with this namespace/name
original, err := r.brokerLister.Brokers(namespace).Get(name)
From 15bf0ea26b15d2d96ee6e1795b4b728fffd982c2 Mon Sep 17 00:00:00 2001
From: Grant Rodgers
Date: Wed, 1 May 2019 15:33:34 -0700
Subject: [PATCH 76/76] Upgrade knative/pkg for new stackdriver exporter
(#1139)
The latest knative/pkg upgrades its dependency on the stackdriver
exporter to v0.9.2, which is now compatible with protobuf 1.3.
---
Gopkg.lock | 33 +-
Gopkg.toml | 10 +-
third_party/VENDOR-LICENSE | 208 +
.../container/apiv1/cluster_manager_client.go | 676 ++
.../go/container/apiv1/doc.go | 49 +
.../exporter/stackdriver/metrics.go | 12 +-
.../monitoredresource/gcp_metadata_config.go | 27 +
.../monitoredresource/monitored_resources.go | 37 +-
.../exporter/stackdriver/resource.go | 109 +
.../exporter/stackdriver/stackdriver.go | 69 +-
.../exporter/stackdriver/stats.go | 30 +-
.../exporter/stackdriver/trace_proto.go | 8 +
.../contrib.go.opencensus.io/resource/AUTHORS | 1 +
.../contrib.go.opencensus.io/resource/LICENSE | 202 +
.../resource/resourcekeys/const.go | 49 +
.../knative/pkg/reconciler/testing/table.go | 5 +-
vendor/go.opencensus.io/exemplar/exemplar.go | 78 -
.../exporter/prometheus/prometheus.go | 59 +-
vendor/go.opencensus.io/internal/internal.go | 4 +-
.../internal/tagencoding/tagencoding.go | 5 +-
.../internal/traceinternals.go | 1 +
.../metricdata/doc.go} | 19 +-
.../metric/metricdata/exemplar.go | 33 +
.../metricdata/label.go} | 35 +-
.../metric/metricdata/metric.go | 46 +
.../metric/metricdata/point.go | 193 +
.../metric/metricdata/type_string.go | 16 +
.../metric/metricdata/unit.go | 27 +
.../metric/metricproducer/manager.go | 78 +
.../metric/metricproducer/producer.go | 28 +
vendor/go.opencensus.io/opencensus.go | 2 +-
.../plugin/ocgrpc/stats_common.go | 6 +-
.../plugin/ochttp/client_stats.go | 32 +-
.../plugin/ochttp/propagation/b3/b3.go | 2 +-
.../go.opencensus.io/plugin/ochttp/route.go | 10 +
.../go.opencensus.io/plugin/ochttp/server.go | 4 +-
.../go.opencensus.io/plugin/ochttp/stats.go | 61 +-
.../go.opencensus.io/plugin/ochttp/trace.go | 19 +-
.../plugin/ochttp/wrapped_body.go | 44 +
vendor/go.opencensus.io/resource/resource.go | 164 +
.../go.opencensus.io/stats/internal/record.go | 2 +-
vendor/go.opencensus.io/stats/measure.go | 20 +-
.../go.opencensus.io/stats/measure_float64.go | 23 +-
.../go.opencensus.io/stats/measure_int64.go | 23 +-
vendor/go.opencensus.io/stats/record.go | 6 +-
.../stats/view/aggregation_data.go | 138 +-
.../go.opencensus.io/stats/view/collector.go | 7 +-
vendor/go.opencensus.io/stats/view/view.go | 62 +-
.../stats/view/view_to_metric.go | 131 +
vendor/go.opencensus.io/stats/view/worker.go | 62 +-
.../stats/view/worker_commands.go | 19 +-
vendor/go.opencensus.io/tag/context.go | 24 -
vendor/go.opencensus.io/tag/map_codec.go | 3 +
vendor/go.opencensus.io/trace/basetypes.go | 9 +-
vendor/go.opencensus.io/trace/config.go | 38 +
vendor/go.opencensus.io/trace/evictedqueue.go | 38 +
vendor/go.opencensus.io/trace/export.go | 11 +-
.../trace/internal/internal.go | 1 +
vendor/go.opencensus.io/trace/lrumap.go | 37 +
vendor/go.opencensus.io/trace/trace.go | 114 +-
.../container/v1/cluster_service.pb.go | 7176 +++++++++++++++++
61 files changed, 10056 insertions(+), 379 deletions(-)
create mode 100644 vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go
create mode 100644 vendor/cloud.google.com/go/container/apiv1/doc.go
create mode 100644 vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go
create mode 100644 vendor/contrib.go.opencensus.io/resource/AUTHORS
create mode 100644 vendor/contrib.go.opencensus.io/resource/LICENSE
create mode 100644 vendor/contrib.go.opencensus.io/resource/resourcekeys/const.go
delete mode 100644 vendor/go.opencensus.io/exemplar/exemplar.go
rename vendor/go.opencensus.io/{stats/internal/validation.go => metric/metricdata/doc.go} (72%)
create mode 100644 vendor/go.opencensus.io/metric/metricdata/exemplar.go
rename vendor/go.opencensus.io/{trace/exemplar.go => metric/metricdata/label.go} (52%)
create mode 100644 vendor/go.opencensus.io/metric/metricdata/metric.go
create mode 100644 vendor/go.opencensus.io/metric/metricdata/point.go
create mode 100644 vendor/go.opencensus.io/metric/metricdata/type_string.go
create mode 100644 vendor/go.opencensus.io/metric/metricdata/unit.go
create mode 100644 vendor/go.opencensus.io/metric/metricproducer/manager.go
create mode 100644 vendor/go.opencensus.io/metric/metricproducer/producer.go
create mode 100644 vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go
create mode 100644 vendor/go.opencensus.io/resource/resource.go
create mode 100644 vendor/go.opencensus.io/stats/view/view_to_metric.go
create mode 100644 vendor/go.opencensus.io/trace/evictedqueue.go
create mode 100644 vendor/go.opencensus.io/trace/lrumap.go
create mode 100644 vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go
diff --git a/Gopkg.lock b/Gopkg.lock
index cb31b28128c..a8a559434e9 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -2,10 +2,11 @@
[[projects]]
- digest = "1:04a6834455a0a97cdd9bccc7394576ddc7a8a75f69fdc1431303c543df03bcac"
+ digest = "1:59c7473cb534fcc77eeb614ebdff78d0be3a50dad9beacb49123b1e2a0f480e8"
name = "cloud.google.com/go"
packages = [
"compute/metadata",
+ "container/apiv1",
"iam",
"internal/optional",
"internal/version",
@@ -20,14 +21,23 @@
version = "v0.34.0"
[[projects]]
- digest = "1:b6eb7c2538ec2999a072c0e372a18d7b7e3aedac249f26e159586fa5f892909f"
+ digest = "1:43fbf05ea84c860a4e86b557d156b1e72511cd29375d3f71adb522362710aea7"
name = "contrib.go.opencensus.io/exporter/stackdriver"
packages = [
".",
"monitoredresource",
]
pruneopts = "NUT"
- revision = "c06c82c832edca4eaf7b0241bd655560a1be0346"
+ revision = "ab5a58af316a529613aadf9f50eeed1b6f044b2f"
+ version = "v0.9.2"
+
+[[projects]]
+ branch = "master"
+ digest = "1:cef70b547ce62d12ea8e5dcb9905bccb57ea1bb253ee6809fd79a17c29ca3cd5"
+ name = "contrib.go.opencensus.io/resource"
+ packages = ["resourcekeys"]
+ pruneopts = "NUT"
+ revision = "21591786a5e0c21806209b266cc6dfdfa85b3cdb"
[[projects]]
digest = "1:a074ae0f4788ea4c4c7045ab37f21943920bc20cf6ff8afcb2d971154cfa87ab"
@@ -433,7 +443,7 @@
version = "v1.3.0"
[[projects]]
- digest = "1:8cd04a6109f3a15c2481ff5cffcb15a57eb6e19dc14eb94006d1a20d8db2b20c"
+ digest = "1:7537d6dc68f7d48e6d7f0c9696e026eae9b5cc4986c7d4654f2d499bf9963d82"
name = "github.com/knative/pkg"
packages = [
"apis",
@@ -475,7 +485,7 @@
"webhook",
]
pruneopts = "NUT"
- revision = "6916051a96ae4d87d0e0e557a3c3b887ee4a3cea"
+ revision = "f95c27ee8cdceb4545b57c7d4cf283a94d303dca"
[[projects]]
branch = "master"
@@ -708,17 +718,19 @@
version = "v1.0.3"
[[projects]]
- digest = "1:bce7c290509e40fd1c73d700305c1961004d08c9a1812e47533416a8742893a7"
+ digest = "1:69a97603fe8952de86ee1e74a065f7974ec7d7d1d2301d3f5da6d342156363f4"
name = "go.opencensus.io"
packages = [
".",
- "exemplar",
"exporter/prometheus",
"internal",
"internal/tagencoding",
+ "metric/metricdata",
+ "metric/metricproducer",
"plugin/ocgrpc",
"plugin/ochttp",
"plugin/ochttp/propagation/b3",
+ "resource",
"stats",
"stats/internal",
"stats/view",
@@ -729,8 +741,8 @@
"trace/tracestate",
]
pruneopts = "NUT"
- revision = "b7bf3cdb64150a8c8c53b769fdeb2ba581bd4d4b"
- version = "v0.18.0"
+ revision = "75c0cca22312e51bfd4fafdbe9197ae399e18b38"
+ version = "v0.20.2"
[[projects]]
digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
@@ -919,7 +931,7 @@
[[projects]]
branch = "master"
- digest = "1:dba8c4bdaeca16a46cf1a667fe54260501055ab54ebe020baef12ac6477b9e73"
+ digest = "1:7d224bd384f6fbb88bb34c0a939911edda870cbc83719545184e90daaad60ac3"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
@@ -927,6 +939,7 @@
"googleapis/api/label",
"googleapis/api/metric",
"googleapis/api/monitoredres",
+ "googleapis/container/v1",
"googleapis/devtools/cloudtrace/v2",
"googleapis/iam/v1",
"googleapis/monitoring/v3",
diff --git a/Gopkg.toml b/Gopkg.toml
index 0a1e6fde97d..cac1ba20f29 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -73,8 +73,8 @@ required = [
# This controls when we upgrade apis independently of Serving.
[[override]]
name = "github.com/knative/pkg"
- # HEAD as of 2019-04-23
- revision = "6916051a96ae4d87d0e0e557a3c3b887ee4a3cea"
+ # HEAD as of 2019-05-01
+ revision = "f95c27ee8cdceb4545b57c7d4cf283a94d303dca"
# TODO why is this overridden?
[[override]]
@@ -105,12 +105,6 @@ required = [
name = "github.com/cloudevents/sdk-go"
version = "=0.6.0"
-[[override]]
- name = "contrib.go.opencensus.io/exporter/stackdriver"
- # HEAD as of 2019-02-11
- # Needed because this includes a fix to support Stackdriver built-in metrics
- revision = "c06c82c832edca4eaf7b0241bd655560a1be0346"
-
# needed because pkg upgraded
[[override]]
name = "go.uber.org/zap"
diff --git a/third_party/VENDOR-LICENSE b/third_party/VENDOR-LICENSE
index 2a4f2a03a7b..80614ea4a35 100644
--- a/third_party/VENDOR-LICENSE
+++ b/third_party/VENDOR-LICENSE
@@ -415,6 +415,214 @@ Import: github.com/knative/eventing/vendor/contrib.go.opencensus.io/exporter/sta
+===========================================================
+Import: github.com/knative/eventing/vendor/contrib.go.opencensus.io/resource
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+
===========================================================
Import: github.com/knative/eventing/vendor/github.com/Shopify/sarama
diff --git a/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go
new file mode 100644
index 00000000000..bb3011c37d8
--- /dev/null
+++ b/vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go
@@ -0,0 +1,676 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package container
+
+import (
+ "context"
+ "time"
+
+ "cloud.google.com/go/internal/version"
+ gax "github.com/googleapis/gax-go"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ containerpb "google.golang.org/genproto/googleapis/container/v1"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// ClusterManagerCallOptions contains the retry settings for each method of ClusterManagerClient.
+type ClusterManagerCallOptions struct {
+ ListClusters []gax.CallOption
+ GetCluster []gax.CallOption
+ CreateCluster []gax.CallOption
+ UpdateCluster []gax.CallOption
+ UpdateNodePool []gax.CallOption
+ SetNodePoolAutoscaling []gax.CallOption
+ SetLoggingService []gax.CallOption
+ SetMonitoringService []gax.CallOption
+ SetAddonsConfig []gax.CallOption
+ SetLocations []gax.CallOption
+ UpdateMaster []gax.CallOption
+ SetMasterAuth []gax.CallOption
+ DeleteCluster []gax.CallOption
+ ListOperations []gax.CallOption
+ GetOperation []gax.CallOption
+ CancelOperation []gax.CallOption
+ GetServerConfig []gax.CallOption
+ ListNodePools []gax.CallOption
+ GetNodePool []gax.CallOption
+ CreateNodePool []gax.CallOption
+ DeleteNodePool []gax.CallOption
+ RollbackNodePoolUpgrade []gax.CallOption
+ SetNodePoolManagement []gax.CallOption
+ SetLabels []gax.CallOption
+ SetLegacyAbac []gax.CallOption
+ StartIPRotation []gax.CallOption
+ CompleteIPRotation []gax.CallOption
+ SetNodePoolSize []gax.CallOption
+ SetNetworkPolicy []gax.CallOption
+ SetMaintenancePolicy []gax.CallOption
+}
+
+func defaultClusterManagerClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("container.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ }
+}
+
+func defaultClusterManagerCallOptions() *ClusterManagerCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &ClusterManagerCallOptions{
+ ListClusters: retry[[2]string{"default", "idempotent"}],
+ GetCluster: retry[[2]string{"default", "idempotent"}],
+ CreateCluster: retry[[2]string{"default", "non_idempotent"}],
+ UpdateCluster: retry[[2]string{"default", "non_idempotent"}],
+ UpdateNodePool: retry[[2]string{"default", "non_idempotent"}],
+ SetNodePoolAutoscaling: retry[[2]string{"default", "non_idempotent"}],
+ SetLoggingService: retry[[2]string{"default", "non_idempotent"}],
+ SetMonitoringService: retry[[2]string{"default", "non_idempotent"}],
+ SetAddonsConfig: retry[[2]string{"default", "non_idempotent"}],
+ SetLocations: retry[[2]string{"default", "non_idempotent"}],
+ UpdateMaster: retry[[2]string{"default", "non_idempotent"}],
+ SetMasterAuth: retry[[2]string{"default", "non_idempotent"}],
+ DeleteCluster: retry[[2]string{"default", "idempotent"}],
+ ListOperations: retry[[2]string{"default", "idempotent"}],
+ GetOperation: retry[[2]string{"default", "idempotent"}],
+ CancelOperation: retry[[2]string{"default", "non_idempotent"}],
+ GetServerConfig: retry[[2]string{"default", "idempotent"}],
+ ListNodePools: retry[[2]string{"default", "idempotent"}],
+ GetNodePool: retry[[2]string{"default", "idempotent"}],
+ CreateNodePool: retry[[2]string{"default", "non_idempotent"}],
+ DeleteNodePool: retry[[2]string{"default", "idempotent"}],
+ RollbackNodePoolUpgrade: retry[[2]string{"default", "non_idempotent"}],
+ SetNodePoolManagement: retry[[2]string{"default", "non_idempotent"}],
+ SetLabels: retry[[2]string{"default", "non_idempotent"}],
+ SetLegacyAbac: retry[[2]string{"default", "non_idempotent"}],
+ StartIPRotation: retry[[2]string{"default", "non_idempotent"}],
+ CompleteIPRotation: retry[[2]string{"default", "non_idempotent"}],
+ SetNodePoolSize: retry[[2]string{"default", "non_idempotent"}],
+ SetNetworkPolicy: retry[[2]string{"default", "non_idempotent"}],
+ SetMaintenancePolicy: retry[[2]string{"default", "non_idempotent"}],
+ }
+}
+
+// ClusterManagerClient is a client for interacting with Google Container Engine API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type ClusterManagerClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ clusterManagerClient containerpb.ClusterManagerClient
+
+ // The call options for this service.
+ CallOptions *ClusterManagerCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewClusterManagerClient creates a new cluster manager client.
+//
+// Google Container Engine Cluster Manager v1
+func NewClusterManagerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterManagerClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultClusterManagerClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &ClusterManagerClient{
+ conn: conn,
+ CallOptions: defaultClusterManagerCallOptions(),
+
+ clusterManagerClient: containerpb.NewClusterManagerClient(conn),
+ }
+ c.setGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *ClusterManagerClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *ClusterManagerClient) Close() error {
+ return c.conn.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *ClusterManagerClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", version.Go()}, keyval...)
+ kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// ListClusters lists all clusters owned by a project in either the specified zone or all
+// zones.
+func (c *ClusterManagerClient) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest, opts ...gax.CallOption) (*containerpb.ListClustersResponse, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...)
+ var resp *containerpb.ListClustersResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.ListClusters(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// GetCluster gets the details of a specific cluster.
+func (c *ClusterManagerClient) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest, opts ...gax.CallOption) (*containerpb.Cluster, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...)
+ var resp *containerpb.Cluster
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.GetCluster(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateCluster creates a cluster, consisting of the specified number and type of Google
+// Compute Engine instances.
+//
+// By default, the cluster is created in the project's
+// default network (at /compute/docs/networks-and-firewalls#networks).
+//
+// One firewall is added for the cluster. After cluster creation,
+// the cluster creates routes for each node to allow the containers
+// on that node to communicate with all other instances in the
+// cluster.
+//
+// Finally, an entry is added to the project's global metadata indicating
+// which CIDR range is being used by the cluster.
+func (c *ClusterManagerClient) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.CreateCluster(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateCluster updates the settings of a specific cluster.
+func (c *ClusterManagerClient) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.UpdateCluster(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateNodePool updates the version and/or image type of a specific node pool.
+func (c *ClusterManagerClient) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.UpdateNodePool[0:len(c.CallOptions.UpdateNodePool):len(c.CallOptions.UpdateNodePool)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.UpdateNodePool(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetNodePoolAutoscaling sets the autoscaling settings of a specific node pool.
+func (c *ClusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetNodePoolAutoscaling[0:len(c.CallOptions.SetNodePoolAutoscaling):len(c.CallOptions.SetNodePoolAutoscaling)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetNodePoolAutoscaling(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetLoggingService sets the logging service of a specific cluster.
+func (c *ClusterManagerClient) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetLoggingService[0:len(c.CallOptions.SetLoggingService):len(c.CallOptions.SetLoggingService)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetLoggingService(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetMonitoringService sets the monitoring service of a specific cluster.
+func (c *ClusterManagerClient) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetMonitoringService[0:len(c.CallOptions.SetMonitoringService):len(c.CallOptions.SetMonitoringService)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetMonitoringService(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetAddonsConfig sets the addons of a specific cluster.
+func (c *ClusterManagerClient) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetAddonsConfig[0:len(c.CallOptions.SetAddonsConfig):len(c.CallOptions.SetAddonsConfig)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetAddonsConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetLocations sets the locations of a specific cluster.
+func (c *ClusterManagerClient) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetLocations[0:len(c.CallOptions.SetLocations):len(c.CallOptions.SetLocations)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetLocations(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateMaster updates the master of a specific cluster.
+func (c *ClusterManagerClient) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.UpdateMaster[0:len(c.CallOptions.UpdateMaster):len(c.CallOptions.UpdateMaster)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.UpdateMaster(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetMasterAuth used to set master auth materials. Currently supports :-
+// Changing the admin password of a specific cluster.
+// This can be either via password generation or explicitly set the password.
+func (c *ClusterManagerClient) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetMasterAuth[0:len(c.CallOptions.SetMasterAuth):len(c.CallOptions.SetMasterAuth)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetMasterAuth(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteCluster deletes the cluster, including the Kubernetes endpoint and all worker
+// nodes.
+//
+// Firewalls and routes that were configured during cluster creation
+// are also deleted.
+//
+// Other Google Compute Engine resources that might be in use by the cluster
+// (e.g. load balancer resources) will not be deleted if they weren't present
+// at the initial create time.
+func (c *ClusterManagerClient) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.DeleteCluster(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListOperations lists all operations in a project in a specific zone or all zones.
+func (c *ClusterManagerClient) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest, opts ...gax.CallOption) (*containerpb.ListOperationsResponse, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...)
+ var resp *containerpb.ListOperationsResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.ListOperations(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// GetOperation gets the specified operation.
+func (c *ClusterManagerClient) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.GetOperation(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CancelOperation cancels the specified operation.
+func (c *ClusterManagerClient) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest, opts ...gax.CallOption) error {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.clusterManagerClient.CancelOperation(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// GetServerConfig returns configuration info about the Container Engine service.
+func (c *ClusterManagerClient) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest, opts ...gax.CallOption) (*containerpb.ServerConfig, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetServerConfig[0:len(c.CallOptions.GetServerConfig):len(c.CallOptions.GetServerConfig)], opts...)
+ var resp *containerpb.ServerConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.GetServerConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListNodePools lists the node pools for a cluster.
+func (c *ClusterManagerClient) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest, opts ...gax.CallOption) (*containerpb.ListNodePoolsResponse, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.ListNodePools[0:len(c.CallOptions.ListNodePools):len(c.CallOptions.ListNodePools)], opts...)
+ var resp *containerpb.ListNodePoolsResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.ListNodePools(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// GetNodePool retrieves the node pool requested.
+func (c *ClusterManagerClient) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest, opts ...gax.CallOption) (*containerpb.NodePool, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.GetNodePool[0:len(c.CallOptions.GetNodePool):len(c.CallOptions.GetNodePool)], opts...)
+ var resp *containerpb.NodePool
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.GetNodePool(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateNodePool creates a node pool for a cluster.
+func (c *ClusterManagerClient) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CreateNodePool[0:len(c.CallOptions.CreateNodePool):len(c.CallOptions.CreateNodePool)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.CreateNodePool(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteNodePool deletes a node pool from a cluster.
+func (c *ClusterManagerClient) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.DeleteNodePool[0:len(c.CallOptions.DeleteNodePool):len(c.CallOptions.DeleteNodePool)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.DeleteNodePool(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// RollbackNodePoolUpgrade roll back the previously Aborted or Failed NodePool upgrade.
+// This will be an no-op if the last upgrade successfully completed.
+func (c *ClusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.RollbackNodePoolUpgrade[0:len(c.CallOptions.RollbackNodePoolUpgrade):len(c.CallOptions.RollbackNodePoolUpgrade)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.RollbackNodePoolUpgrade(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetNodePoolManagement sets the NodeManagement options for a node pool.
+func (c *ClusterManagerClient) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetNodePoolManagement[0:len(c.CallOptions.SetNodePoolManagement):len(c.CallOptions.SetNodePoolManagement)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetNodePoolManagement(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetLabels sets labels on a cluster.
+func (c *ClusterManagerClient) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetLabels[0:len(c.CallOptions.SetLabels):len(c.CallOptions.SetLabels)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetLabels(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetLegacyAbac enables or disables the ABAC authorization mechanism on a cluster.
+func (c *ClusterManagerClient) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetLegacyAbac[0:len(c.CallOptions.SetLegacyAbac):len(c.CallOptions.SetLegacyAbac)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetLegacyAbac(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// StartIPRotation start master IP rotation.
+func (c *ClusterManagerClient) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.StartIPRotation[0:len(c.CallOptions.StartIPRotation):len(c.CallOptions.StartIPRotation)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.StartIPRotation(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CompleteIPRotation completes master IP rotation.
+func (c *ClusterManagerClient) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.CompleteIPRotation[0:len(c.CallOptions.CompleteIPRotation):len(c.CallOptions.CompleteIPRotation)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.CompleteIPRotation(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetNodePoolSize sets the size of a specific node pool.
+func (c *ClusterManagerClient) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetNodePoolSize[0:len(c.CallOptions.SetNodePoolSize):len(c.CallOptions.SetNodePoolSize)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetNodePoolSize(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetNetworkPolicy enables/Disables Network Policy for a cluster.
+func (c *ClusterManagerClient) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetNetworkPolicy[0:len(c.CallOptions.SetNetworkPolicy):len(c.CallOptions.SetNetworkPolicy)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetNetworkPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SetMaintenancePolicy sets the maintenance policy for a cluster.
+func (c *ClusterManagerClient) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.SetMaintenancePolicy[0:len(c.CallOptions.SetMaintenancePolicy):len(c.CallOptions.SetMaintenancePolicy)], opts...)
+ var resp *containerpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.clusterManagerClient.SetMaintenancePolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/vendor/cloud.google.com/go/container/apiv1/doc.go b/vendor/cloud.google.com/go/container/apiv1/doc.go
new file mode 100644
index 00000000000..0f995054c2d
--- /dev/null
+++ b/vendor/cloud.google.com/go/container/apiv1/doc.go
@@ -0,0 +1,49 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+// Package container is an auto-generated package for the
+// Google Container Engine API.
+//
+// NOTE: This package is in alpha. It is not stable, and is likely to change.
+//
+// The Google Kubernetes Engine API is used for building and managing
+// container
+// based applications, powered by the open source Kubernetes technology.
+package container // import "cloud.google.com/go/container/apiv1"
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+ out, _ := metadata.FromOutgoingContext(ctx)
+ out = out.Copy()
+ for _, md := range mds {
+ for k, v := range md {
+ out[k] = append(out[k], v...)
+ }
+ }
+ return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ }
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go
index 9b3b7bf19ec..dc29bf79084 100644
--- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics.go
@@ -363,7 +363,7 @@ func labelDescriptorsFromProto(defaults map[string]labelValue, protoLabelKeys []
}
func metricProseFromProto(metric *metricspb.Metric) (name, description, unit string, ok bool) {
- mname := metric.GetName()
+ mname := metric.GetMetricDescriptor().GetName()
if mname != "" {
name = mname
return
@@ -461,22 +461,28 @@ func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) {
Count: dv.Count,
Mean: mean,
SumOfSquaredDeviation: dv.SumOfSquaredDeviation,
- BucketCounts: bucketCounts(dv.Buckets),
},
}
+ insertZeroBound := false
if bopts := dv.BucketOptions; bopts != nil && bopts.Type != nil {
bexp, ok := bopts.Type.(*metricspb.DistributionValue_BucketOptions_Explicit_)
if ok && bexp != nil && bexp.Explicit != nil {
+ insertZeroBound = shouldInsertZeroBound(bexp.Explicit.Bounds...)
mv.DistributionValue.BucketOptions = &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
- Bounds: bexp.Explicit.Bounds[:],
+ // The first bucket bound should be 0.0 because the Metrics first bucket is
+ // [0, first_bound) but Stackdriver monitoring bucket bounds begin with -infinity
+ // (first bucket is (-infinity, 0))
+ Bounds: addZeroBoundOnCondition(insertZeroBound, bexp.Explicit.Bounds...),
},
},
}
}
}
+ mv.DistributionValue.BucketCounts = addZeroBucketCountOnCondition(insertZeroBound, bucketCounts(dv.Buckets)...)
+
}
tval = &monitoringpb.TypedValue{Value: mv}
}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go
index ceb754e5156..412e347724e 100644
--- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp_metadata_config.go
@@ -15,11 +15,15 @@
package monitoredresource
import (
+ "context"
+ "fmt"
"log"
"os"
"strings"
"cloud.google.com/go/compute/metadata"
+ "cloud.google.com/go/container/apiv1"
+ containerpb "google.golang.org/genproto/googleapis/container/v1"
)
// gcpMetadata represents metadata retrieved from GCP (GKE and GCE) environment.
@@ -45,6 +49,8 @@ type gcpMetadata struct {
// zone is the Compute Engine zone in which the VM is running.
zone string
+
+ monitoringV2 bool
}
// retrieveGCPMetadata retrieves value of each Attribute from Metadata Server
@@ -70,6 +76,9 @@ func retrieveGCPMetadata() *gcpMetadata {
logError(err)
gcpMetadata.clusterName = strings.TrimSpace(clusterName)
+ clusterLocation, err := metadata.InstanceAttributeValue("cluster-location")
+ logError(err)
+
// Following attributes are derived from environment variables. They are configured
// via yaml file. For details refer to:
// https://cloud.google.com/kubernetes-engine/docs/tutorials/custom-metrics-autoscaling#exporting_metrics_from_the_application
@@ -77,6 +86,24 @@ func retrieveGCPMetadata() *gcpMetadata {
gcpMetadata.containerName = os.Getenv("CONTAINER_NAME")
gcpMetadata.podID = os.Getenv("HOSTNAME")
+ // Monitoring API version can be obtained from cluster info.q
+ if gcpMetadata.clusterName != "" {
+ ctx := context.Background()
+ c, err := container.NewClusterManagerClient(ctx)
+ logError(err)
+ if c != nil {
+ req := &containerpb.GetClusterRequest{
+ Name: fmt.Sprintf("projects/%s/locations/%s/clusters/%s", gcpMetadata.projectID, strings.TrimSpace(clusterLocation), gcpMetadata.clusterName),
+ }
+ resp, err := c.GetCluster(ctx, req)
+ logError(err)
+ if resp != nil && resp.GetMonitoringService() == "monitoring.googleapis.com/kubernetes" &&
+ resp.GetLoggingService() == "logging.googleapis.com/kubernetes" {
+ gcpMetadata.monitoringV2 = true
+ }
+ }
+ }
+
return &gcpMetadata
}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go
index c07e55ce07c..98dc3f5e52b 100644
--- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/monitored_resources.go
@@ -52,20 +52,32 @@ type GKEContainer struct {
// Zone is the Compute Engine zone in which the VM is running.
Zone string
+
+ // LoggingMonitoringV2Enabled is the identifier if user enabled V2 logging and monitoring for GKE
+ LoggingMonitoringV2Enabled bool
}
// MonitoredResource returns resource type and resource labels for GKEContainer
func (gke *GKEContainer) MonitoredResource() (resType string, labels map[string]string) {
labels = map[string]string{
"project_id": gke.ProjectID,
- "instance_id": gke.InstanceID,
- "zone": gke.Zone,
"cluster_name": gke.ClusterName,
"container_name": gke.ContainerName,
- "namespace_id": gke.NamespaceID,
- "pod_id": gke.PodID,
}
- return "gke_container", labels
+ var typ string
+ if gke.LoggingMonitoringV2Enabled {
+ typ = "k8s_container"
+ labels["pod_name"] = gke.PodID
+ labels["namespace_name"] = gke.NamespaceID
+ labels["location"] = gke.Zone
+ } else {
+ typ = "gke_container"
+ labels["pod_id"] = gke.PodID
+ labels["namespace_id"] = gke.NamespaceID
+ labels["zone"] = gke.Zone
+ labels["instance_id"] = gke.InstanceID
+ }
+ return typ, labels
}
// GCEInstance represents gce_instance type monitored resource.
@@ -187,13 +199,14 @@ func createGCEInstanceMonitoredResource(gcpMetadata *gcpMetadata) *GCEInstance {
// gcpMetadata contains GCP (GKE or GCE) specific attributes.
func createGKEContainerMonitoredResource(gcpMetadata *gcpMetadata) *GKEContainer {
gkeContainer := GKEContainer{
- ProjectID: gcpMetadata.projectID,
- InstanceID: gcpMetadata.instanceID,
- Zone: gcpMetadata.zone,
- ContainerName: gcpMetadata.containerName,
- ClusterName: gcpMetadata.clusterName,
- NamespaceID: gcpMetadata.namespaceID,
- PodID: gcpMetadata.podID,
+ ProjectID: gcpMetadata.projectID,
+ InstanceID: gcpMetadata.instanceID,
+ Zone: gcpMetadata.zone,
+ ContainerName: gcpMetadata.containerName,
+ ClusterName: gcpMetadata.clusterName,
+ NamespaceID: gcpMetadata.namespaceID,
+ PodID: gcpMetadata.podID,
+ LoggingMonitoringV2Enabled: gcpMetadata.monitoringV2,
}
return &gkeContainer
}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go
new file mode 100644
index 00000000000..957693169ee
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/resource.go
@@ -0,0 +1,109 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stackdriver // import "contrib.go.opencensus.io/exporter/stackdriver"
+
+import (
+ "contrib.go.opencensus.io/resource/resourcekeys"
+ "go.opencensus.io/resource"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+type resourceMap struct {
+ // Mapping from the input resource type to the monitored resource type in Stackdriver.
+ srcType, dstType string
+ // Mapping from Stackdriver monitored resource label to an OpenCensus resource label.
+ labels map[string]string
+}
+
+// Resource labels that are generally internal to the exporter.
+// Consider exposing these labels and a type identifier in the future to allow
+// for customization.
+const (
+ stackdriverLocation = "contrib.opencensus.io/exporter/stackdriver/location"
+ stackdriverProjectID = "contrib.opencensus.io/exporter/stackdriver/project_id"
+ stackdriverGenericTaskNamespace = "contrib.opencensus.io/exporter/stackdriver/generic_task/namespace"
+ stackdriverGenericTaskJob = "contrib.opencensus.io/exporter/stackdriver/generic_task/job"
+ stackdriverGenericTaskID = "contrib.opencensus.io/exporter/stackdriver/generic_task/task_id"
+)
+
+// Mappings for the well-known OpenCensus resources to applicable Stackdriver resources.
+var resourceMappings = []resourceMap{
+ {
+ srcType: resourcekeys.K8STypeContainer,
+ dstType: "k8s_container",
+ labels: map[string]string{
+ "project_id": stackdriverProjectID,
+ "location": stackdriverLocation,
+ "cluster_name": resourcekeys.K8SKeyClusterName,
+ "namespace_name": resourcekeys.K8SKeyNamespaceName,
+ "pod_name": resourcekeys.K8SKeyPodName,
+ "container_name": resourcekeys.K8SKeyContainerName,
+ },
+ },
+ {
+ srcType: resourcekeys.GCPTypeGCEInstance,
+ dstType: "gce_instance",
+ labels: map[string]string{
+ "project_id": resourcekeys.GCPKeyGCEProjectID,
+ "instance_id": resourcekeys.GCPKeyGCEInstanceID,
+ "zone": resourcekeys.GCPKeyGCEZone,
+ },
+ },
+ {
+ srcType: resourcekeys.AWSTypeEC2Instance,
+ dstType: "aws_ec2_instance",
+ labels: map[string]string{
+ "project_id": stackdriverProjectID,
+ "instance_id": resourcekeys.AWSKeyEC2InstanceID,
+ "region": resourcekeys.AWSKeyEC2Region,
+ "aws_account": resourcekeys.AWSKeyEC2AccountID,
+ },
+ },
+ // Fallback to generic task resource.
+ {
+ srcType: "",
+ dstType: "generic_task",
+ labels: map[string]string{
+ "project_id": stackdriverProjectID,
+ "location": stackdriverLocation,
+ "namespace": stackdriverGenericTaskNamespace,
+ "job": stackdriverGenericTaskJob,
+ "task_id": stackdriverGenericTaskID,
+ },
+ },
+}
+
+func DefaultMapResource(res *resource.Resource) *monitoredrespb.MonitoredResource {
+Outer:
+ for _, rm := range resourceMappings {
+ if res.Type != rm.srcType {
+ continue
+ }
+ result := &monitoredrespb.MonitoredResource{
+ Type: rm.dstType,
+ Labels: make(map[string]string, len(rm.labels)),
+ }
+ for dst, src := range rm.labels {
+ if v, ok := res.Labels[src]; ok {
+ result.Labels[dst] = v
+ } else {
+ // A required label wasn't filled at all. Try subsequent mappings.
+ continue Outer
+ }
+ }
+ return result
+ }
+ return nil
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go
index 595377368e2..a6e774887c9 100644
--- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stackdriver.go
@@ -52,10 +52,14 @@ import (
"errors"
"fmt"
"log"
+ "os"
+ "path"
"time"
+ metadataapi "cloud.google.com/go/compute/metadata"
traceapi "cloud.google.com/go/trace/apiv2"
"contrib.go.opencensus.io/exporter/stackdriver/monitoredresource"
+ "go.opencensus.io/resource"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.opencensus.io/trace"
@@ -73,9 +77,22 @@ type Options struct {
// ProjectID is the identifier of the Stackdriver
// project the user is uploading the stats data to.
// If not set, this will default to your "Application Default Credentials".
- // For details see: https://developers.google.com/accounts/docs/application-default-credentials
+ // For details see: https://developers.google.com/accounts/docs/application-default-credentials.
+ //
+ // It will be used in the project_id label of a Stackdriver monitored
+ // resource if the resource does not inherently belong to a specific
+ // project, e.g. on-premise resource like k8s_container or generic_task.
ProjectID string
+ // Location is the identifier of the GCP or AWS cloud region/zone in which
+ // the data for a resource is stored.
+ // If not set, it will default to the location provided by the metadata server.
+ //
+ // It will be used in the location label of a Stackdriver monitored resource
+ // if the resource does not inherently belong to a specific project, e.g.
+ // on-premise resource like k8s_container or generic_task.
+ Location string
+
// OnError is the hook to be called when there is
// an error uploading the stats or tracing data.
// If no custom hook is set, errors are logged.
@@ -153,6 +170,21 @@ type Options struct {
// Optional, but encouraged.
MonitoredResource monitoredresource.Interface
+ // ResourceDetector provides a hook to discover arbitrary resource information.
+ //
+ // The translation function provided in MapResource must be able to conver the
+ // the resource information to a Stackdriver monitored resource.
+ //
+ // If this field is unset, resource type and tags will automatically be discovered through
+ // the OC_RESOURCE_TYPE and OC_RESOURCE_LABELS environment variables.
+ ResourceDetector resource.Detector
+
+ // MapResource converts a OpenCensus resource to a Stackdriver monitored resource.
+ //
+ // If this field is unset, DefaultMapResource will be used which encodes a set of default
+ // conversions from auto-detected resources to well-known Stackdriver monitored resources.
+ MapResource func(*resource.Resource) *monitoredrespb.MonitoredResource
+
// MetricPrefix overrides the prefix of a Stackdriver metric display names.
// Optional. If unset defaults to "OpenCensus/".
// Deprecated: Provide GetMetricDisplayName to change the display name of
@@ -253,10 +285,45 @@ func NewExporter(o Options) (*Exporter, error) {
}
o.ProjectID = creds.ProjectID
}
+ if o.Location == "" {
+ ctx := o.Context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ zone, err := metadataapi.Zone()
+ if err != nil {
+ log.Printf("Setting Stackdriver default location failed: %s", err)
+ } else {
+ log.Printf("Setting Stackdriver default location to %q", zone)
+ o.Location = zone
+ }
+ }
if o.MonitoredResource != nil {
o.Resource = convertMonitoredResourceToPB(o.MonitoredResource)
}
+ if o.MapResource == nil {
+ o.MapResource = DefaultMapResource
+ }
+ if o.ResourceDetector != nil {
+ // For backwards-compatibility we still respect the deprecated resource field.
+ if o.Resource != nil {
+ return nil, errors.New("stackdriver: ResourceDetector must not be used in combination with deprecated resource fields")
+ }
+ res, err := o.ResourceDetector(o.Context)
+ if err != nil {
+ return nil, fmt.Errorf("stackdriver: detect resource: %s", err)
+ }
+ // Populate internal resource labels for defaulting project_id, location, and
+ // generic resource labels of applicable monitored resources.
+ res.Labels[stackdriverProjectID] = o.ProjectID
+ res.Labels[stackdriverLocation] = o.Location
+ res.Labels[stackdriverGenericTaskNamespace] = "default"
+ res.Labels[stackdriverGenericTaskJob] = path.Base(os.Args[0])
+ res.Labels[stackdriverGenericTaskID] = getTaskValue()
+
+ o.Resource = o.MapResource(res)
+ }
se, err := newStatsExporter(o)
if err != nil {
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go
index ca82ca71b40..9a404b832e9 100644
--- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/stats.go
@@ -25,13 +25,13 @@ import (
"sync"
"time"
- "go.opencensus.io"
+ opencensus "go.opencensus.io"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
"go.opencensus.io/trace"
- "cloud.google.com/go/monitoring/apiv3"
+ monitoring "cloud.google.com/go/monitoring/apiv3"
"github.com/golang/protobuf/ptypes/timestamp"
"google.golang.org/api/option"
"google.golang.org/api/support/bundler"
@@ -423,6 +423,7 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
}}
}
case *view.DistributionData:
+ insertZeroBound := shouldInsertZeroBound(vd.Aggregation.Buckets...)
return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{
DistributionValue: &distributionpb.Distribution{
Count: v.Count,
@@ -436,11 +437,11 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
BucketOptions: &distributionpb.Distribution_BucketOptions{
Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{
ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{
- Bounds: vd.Aggregation.Buckets,
+ Bounds: addZeroBoundOnCondition(insertZeroBound, vd.Aggregation.Buckets...),
},
},
},
- BucketCounts: v.CountPerBucket,
+ BucketCounts: addZeroBucketCountOnCondition(insertZeroBound, v.CountPerBucket...),
},
}}
case *view.LastValueData:
@@ -458,6 +459,27 @@ func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue {
return nil
}
+func shouldInsertZeroBound(bounds ...float64) bool {
+ if len(bounds) > 0 && bounds[0] != 0.0 {
+ return true
+ }
+ return false
+}
+
+func addZeroBucketCountOnCondition(insert bool, counts ...int64) []int64 {
+ if insert {
+ return append([]int64{0}, counts...)
+ }
+ return counts
+}
+
+func addZeroBoundOnCondition(insert bool, bounds ...float64) []float64 {
+ if insert {
+ return append([]float64{0.0}, bounds...)
+ }
+ return bounds
+}
+
func (e *statsExporter) metricType(v *view.View) string {
if formatter := e.o.GetMetricType; formatter != nil {
return formatter(v)
diff --git a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go
index 2d259cf3c39..f64ab1eec4a 100644
--- a/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go
+++ b/vendor/contrib.go.opencensus.io/exporter/stackdriver/trace_proto.go
@@ -17,6 +17,7 @@ package stackdriver
import (
"fmt"
"math"
+ "strconv"
"time"
"unicode/utf8"
@@ -234,6 +235,13 @@ func attributeValue(v interface{}) *tracepb.AttributeValue {
return &tracepb.AttributeValue{
Value: &tracepb.AttributeValue_IntValue{IntValue: value},
}
+ case float64:
+ // TODO: set double value if Stackdriver Trace support it in the future.
+ return &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_StringValue{
+ StringValue: trunc(strconv.FormatFloat(value, 'f', -1, 64),
+ maxAttributeStringValue)},
+ }
case string:
return &tracepb.AttributeValue{
Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)},
diff --git a/vendor/contrib.go.opencensus.io/resource/AUTHORS b/vendor/contrib.go.opencensus.io/resource/AUTHORS
new file mode 100644
index 00000000000..e491a9e7f78
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/resource/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
diff --git a/vendor/contrib.go.opencensus.io/resource/LICENSE b/vendor/contrib.go.opencensus.io/resource/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/resource/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/contrib.go.opencensus.io/resource/resourcekeys/const.go b/vendor/contrib.go.opencensus.io/resource/resourcekeys/const.go
new file mode 100644
index 00000000000..46c72969357
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/resource/resourcekeys/const.go
@@ -0,0 +1,49 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package resourcekeys contains well known type and label keys for resources.
+package resourcekeys // import "contrib.go.opencensus.io/resource/resourcekeys"
+
+// Constants for Kubernetes resources.
+const (
+ K8STypeContainer = "k8s.io/container"
+
+ // A uniquely identifying name for the Kubernetes cluster. Kubernetes
+ // does not have cluster names as an internal concept so this may be
+ // set to any meaningful value within the environment. For example,
+ // GKE clusters have a name which can be used for this label.
+ K8SKeyClusterName = "k8s.io/cluster/name"
+ K8SKeyNamespaceName = "k8s.io/namespace/name"
+ K8SKeyPodName = "k8s.io/pod/name"
+ K8SKeyContainerName = "k8s.io/container/name"
+)
+
+// Constants for AWS resources.
+const (
+ AWSTypeEC2Instance = "aws.com/ec2/instance"
+
+ AWSKeyEC2AccountID = "aws.com/ec2/account_id"
+ AWSKeyEC2Region = "aws.com/ec2/region"
+ AWSKeyEC2InstanceID = "aws.com/ec2/instance_id"
+)
+
+// Constants for GCP resources.
+const (
+ GCPTypeGCEInstance = "cloud.google.com/gce/instance"
+
+ // ProjectID of the GCE VM. This is not the project ID of the used client credentials.
+ GCPKeyGCEProjectID = "cloud.google.com/gce/project_id"
+ GCPKeyGCEZone = "cloud.google.com/gce/zone"
+ GCPKeyGCEInstanceID = "cloud.google.com/gce/instance_id"
+)
diff --git a/vendor/github.com/knative/pkg/reconciler/testing/table.go b/vendor/github.com/knative/pkg/reconciler/testing/table.go
index 0a0a3a9bff0..e2aaf6a13cf 100644
--- a/vendor/github.com/knative/pkg/reconciler/testing/table.go
+++ b/vendor/github.com/knative/pkg/reconciler/testing/table.go
@@ -264,9 +264,8 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
if got, want := got.GetListRestrictions().Labels, want.GetListRestrictions().Labels; (got != nil) != (want != nil) || got.String() != want.String() {
t.Errorf("Unexpected delete-collection[%d].Labels = %v, wanted %v", i, got, want)
}
- // TODO(mattmoor): Add this if/when we need support.
- if got := got.GetListRestrictions().Fields; got.String() != "" {
- t.Errorf("Unexpected delete-collection[%d].Fields = %v, wanted ''", i, got)
+ if got, want := got.GetListRestrictions().Fields, want.GetListRestrictions().Fields; (got != nil) != (want != nil) || got.String() != want.String() {
+ t.Errorf("Unexpected delete-collection[%d].Fields = %v, wanted %v", i, got, want)
}
if !r.SkipNamespaceValidation && got.GetNamespace() != expectedNamespace {
t.Errorf("Unexpected delete-collection[%d]: %#v, wanted %s", i, got, expectedNamespace)
diff --git a/vendor/go.opencensus.io/exemplar/exemplar.go b/vendor/go.opencensus.io/exemplar/exemplar.go
deleted file mode 100644
index e676df837fb..00000000000
--- a/vendor/go.opencensus.io/exemplar/exemplar.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package exemplar implements support for exemplars. Exemplars are additional
-// data associated with each measurement.
-//
-// Their purpose it to provide an example of the kind of thing
-// (request, RPC, trace span, etc.) that resulted in that measurement.
-package exemplar
-
-import (
- "context"
- "time"
-)
-
-const (
- KeyTraceID = "trace_id"
- KeySpanID = "span_id"
- KeyPrefixTag = "tag:"
-)
-
-// Exemplar is an example data point associated with each bucket of a
-// distribution type aggregation.
-type Exemplar struct {
- Value float64 // the value that was recorded
- Timestamp time.Time // the time the value was recorded
- Attachments Attachments // attachments (if any)
-}
-
-// Attachments is a map of extra values associated with a recorded data point.
-// The map should only be mutated from AttachmentExtractor functions.
-type Attachments map[string]string
-
-// AttachmentExtractor is a function capable of extracting exemplar attachments
-// from the context used to record measurements.
-// The map passed to the function should be mutated and returned. It will
-// initially be nil: the first AttachmentExtractor that would like to add keys to the
-// map is responsible for initializing it.
-type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments
-
-var extractors []AttachmentExtractor
-
-// RegisterAttachmentExtractor registers the given extractor associated with the exemplar
-// type name.
-//
-// Extractors will be used to attempt to extract exemplars from the context
-// associated with each recorded measurement.
-//
-// Packages that support exemplars should register their extractor functions on
-// initialization.
-//
-// RegisterAttachmentExtractor should not be called after any measurements have
-// been recorded.
-func RegisterAttachmentExtractor(e AttachmentExtractor) {
- extractors = append(extractors, e)
-}
-
-// NewFromContext extracts exemplars from the given context.
-// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an
-// unspecified order to add attachments to the exemplar.
-func AttachmentsFromContext(ctx context.Context) Attachments {
- var a Attachments
- for _, extractor := range extractors {
- a = extractor(ctx, a)
- }
- return a
-}
diff --git a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go
index 50665dcb1ee..203bd38adfe 100644
--- a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go
+++ b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go
@@ -21,7 +21,6 @@ import (
"fmt"
"log"
"net/http"
- "sort"
"sync"
"go.opencensus.io/internal"
@@ -44,9 +43,10 @@ type Exporter struct {
// Options contains options for configuring the exporter.
type Options struct {
- Namespace string
- Registry *prometheus.Registry
- OnError func(err error)
+ Namespace string
+ Registry *prometheus.Registry
+ OnError func(err error)
+ ConstLabels prometheus.Labels // ConstLabels will be set as labels on all views.
}
// NewExporter returns an exporter that exports stats to Prometheus.
@@ -80,7 +80,7 @@ func (c *collector) registerViews(views ...*view.View) {
viewName(c.opts.Namespace, view),
view.Description,
tagKeysToLabels(view.TagKeys),
- nil,
+ c.opts.ConstLabels,
)
c.registeredViewsMu.Lock()
c.registeredViews[sig] = desc
@@ -207,40 +207,24 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) {
switch data := row.Data.(type) {
case *view.CountData:
- return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags)...)
+ return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags, v.TagKeys)...)
case *view.DistributionData:
points := make(map[float64]uint64)
// Histograms are cumulative in Prometheus.
- // 1. Sort buckets in ascending order but, retain
- // their indices for reverse lookup later on.
- // TODO: If there is a guarantee that distribution elements
- // are always sorted, then skip the sorting.
- indicesMap := make(map[float64]int)
- buckets := make([]float64, 0, len(v.Aggregation.Buckets))
- for i, b := range v.Aggregation.Buckets {
- if _, ok := indicesMap[b]; !ok {
- indicesMap[b] = i
- buckets = append(buckets, b)
- }
- }
- sort.Float64s(buckets)
-
- // 2. Now that the buckets are sorted by magnitude
- // we can create cumulative indicesmap them back by reverse index
+ // Get cumulative bucket counts.
cumCount := uint64(0)
- for _, b := range buckets {
- i := indicesMap[b]
+ for i, b := range v.Aggregation.Buckets {
cumCount += uint64(data.CountPerBucket[i])
points[b] = cumCount
}
- return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...)
+ return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags, v.TagKeys)...)
case *view.SumData:
- return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...)
+ return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags, v.TagKeys)...)
case *view.LastValueData:
- return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags)...)
+ return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags, v.TagKeys)...)
default:
return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation)
@@ -254,14 +238,6 @@ func tagKeysToLabels(keys []tag.Key) (labels []string) {
return labels
}
-func tagsToLabels(tags []tag.Tag) []string {
- var names []string
- for _, tag := range tags {
- names = append(names, internal.Sanitize(tag.Key.Name()))
- }
- return names
-}
-
func newCollector(opts Options, registrar *prometheus.Registry) *collector {
return &collector{
reg: registrar,
@@ -271,10 +247,21 @@ func newCollector(opts Options, registrar *prometheus.Registry) *collector {
}
}
-func tagValues(t []tag.Tag) []string {
+func tagValues(t []tag.Tag, expectedKeys []tag.Key) []string {
var values []string
+ // Add empty string for all missing keys in the tags map.
+ idx := 0
for _, t := range t {
+ for t.Key != expectedKeys[idx] {
+ idx++
+ values = append(values, "")
+ }
values = append(values, t.Value)
+ idx++
+ }
+ for idx < len(expectedKeys) {
+ idx++
+ values = append(values, "")
}
return values
}
diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go
index e1d1238d011..9a638781cf1 100644
--- a/vendor/go.opencensus.io/internal/internal.go
+++ b/vendor/go.opencensus.io/internal/internal.go
@@ -18,12 +18,12 @@ import (
"fmt"
"time"
- "go.opencensus.io"
+ opencensus "go.opencensus.io"
)
// UserAgent is the user agent to be added to the outgoing
// requests from the exporters.
-var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version())
+var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version())
// MonotonicEndTime returns the end time at present
// but offset from start, monotonically.
diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
index 3b1af8b4b8b..41b2c3fc038 100644
--- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
+++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
@@ -17,6 +17,7 @@
// used interally by the stats collector.
package tagencoding // import "go.opencensus.io/internal/tagencoding"
+// Values represent the encoded buffer for the values.
type Values struct {
Buffer []byte
WriteIndex int
@@ -31,6 +32,7 @@ func (vb *Values) growIfRequired(expected int) {
}
}
+// WriteValue is the helper method to encode Values from map[Key][]byte.
func (vb *Values) WriteValue(v []byte) {
length := len(v) & 0xff
vb.growIfRequired(1 + length)
@@ -49,7 +51,7 @@ func (vb *Values) WriteValue(v []byte) {
vb.WriteIndex += length
}
-// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte.
+// ReadValue is the helper method to decode Values to a map[Key][]byte.
func (vb *Values) ReadValue() []byte {
// read length of v
length := int(vb.Buffer[vb.ReadIndex])
@@ -67,6 +69,7 @@ func (vb *Values) ReadValue() []byte {
return v
}
+// Bytes returns a reference to already written bytes in the Buffer.
func (vb *Values) Bytes() []byte {
return vb.Buffer[:vb.WriteIndex]
}
diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go
index 553ca68dc4b..073af7b473a 100644
--- a/vendor/go.opencensus.io/internal/traceinternals.go
+++ b/vendor/go.opencensus.io/internal/traceinternals.go
@@ -22,6 +22,7 @@ import (
// TODO(#412): remove this
var Trace interface{}
+// LocalSpanStoreEnabled true if the local span store is enabled.
var LocalSpanStoreEnabled bool
// BucketConfiguration stores the number of samples to store for span buckets
diff --git a/vendor/go.opencensus.io/stats/internal/validation.go b/vendor/go.opencensus.io/metric/metricdata/doc.go
similarity index 72%
rename from vendor/go.opencensus.io/stats/internal/validation.go
rename to vendor/go.opencensus.io/metric/metricdata/doc.go
index b946667f961..52a7b3bf850 100644
--- a/vendor/go.opencensus.io/stats/internal/validation.go
+++ b/vendor/go.opencensus.io/metric/metricdata/doc.go
@@ -12,17 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package internal // import "go.opencensus.io/stats/internal"
-
-const (
- MaxNameLength = 255
-)
-
-func IsPrintable(str string) bool {
- for _, r := range str {
- if !(r >= ' ' && r <= '~') {
- return false
- }
- }
- return true
-}
+// Package metricdata contains the metrics data model.
+//
+// This is an EXPERIMENTAL package, and may change in arbitrary ways without
+// notice.
+package metricdata // import "go.opencensus.io/metric/metricdata"
diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go
new file mode 100644
index 00000000000..cdbeef0586a
--- /dev/null
+++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go
@@ -0,0 +1,33 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata
+
+import (
+ "time"
+)
+
+// Exemplar is an example data point associated with each bucket of a
+// distribution type aggregation.
+//
+// Their purpose is to provide an example of the kind of thing
+// (request, RPC, trace span, etc.) that resulted in that measurement.
+type Exemplar struct {
+ Value float64 // the value that was recorded
+ Timestamp time.Time // the time the value was recorded
+ Attachments Attachments // attachments (if any)
+}
+
+// Attachments is a map of extra values associated with a recorded data point.
+type Attachments map[string]interface{}
diff --git a/vendor/go.opencensus.io/trace/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/label.go
similarity index 52%
rename from vendor/go.opencensus.io/trace/exemplar.go
rename to vendor/go.opencensus.io/metric/metricdata/label.go
index 416d80590db..87c55b9c86e 100644
--- a/vendor/go.opencensus.io/trace/exemplar.go
+++ b/vendor/go.opencensus.io/metric/metricdata/label.go
@@ -12,32 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package trace
+package metricdata
-import (
- "context"
- "encoding/hex"
-
- "go.opencensus.io/exemplar"
-)
-
-func init() {
- exemplar.RegisterAttachmentExtractor(attachSpanContext)
+// LabelValue represents the value of a label.
+// The zero value represents a missing label value, which may be treated
+// differently to an empty string value by some back ends.
+type LabelValue struct {
+ Value string // string value of the label
+ Present bool // flag that indicated whether a value is present or not
}
-func attachSpanContext(ctx context.Context, a exemplar.Attachments) exemplar.Attachments {
- span := FromContext(ctx)
- if span == nil {
- return a
- }
- sc := span.SpanContext()
- if !sc.IsSampled() {
- return a
- }
- if a == nil {
- a = make(exemplar.Attachments)
- }
- a[exemplar.KeyTraceID] = hex.EncodeToString(sc.TraceID[:])
- a[exemplar.KeySpanID] = hex.EncodeToString(sc.SpanID[:])
- return a
+// NewLabelValue creates a new non-nil LabelValue that represents the given string.
+func NewLabelValue(val string) LabelValue {
+ return LabelValue{Value: val, Present: true}
}
diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go
new file mode 100644
index 00000000000..6ccdec58372
--- /dev/null
+++ b/vendor/go.opencensus.io/metric/metricdata/metric.go
@@ -0,0 +1,46 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata
+
+import (
+ "time"
+
+ "go.opencensus.io/resource"
+)
+
+// Descriptor holds metadata about a metric.
+type Descriptor struct {
+ Name string // full name of the metric
+ Description string // human-readable description
+ Unit Unit // units for the measure
+ Type Type // type of measure
+ LabelKeys []string // label keys
+}
+
+// Metric represents a quantity measured against a resource with different
+// label value combinations.
+type Metric struct {
+ Descriptor Descriptor // metric descriptor
+ Resource *resource.Resource // resource against which this was measured
+ TimeSeries []*TimeSeries // one time series for each combination of label values
+}
+
+// TimeSeries is a sequence of points associated with a combination of label
+// values.
+type TimeSeries struct {
+ LabelValues []LabelValue // label values, same order as keys in the metric descriptor
+ Points []Point // points sequence
+ StartTime time.Time // time we started recording this time series
+}
diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go
new file mode 100644
index 00000000000..7fe057b19cf
--- /dev/null
+++ b/vendor/go.opencensus.io/metric/metricdata/point.go
@@ -0,0 +1,193 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata
+
+import (
+ "time"
+)
+
+// Point is a single data point of a time series.
+type Point struct {
+ // Time is the point in time that this point represents in a time series.
+ Time time.Time
+ // Value is the value of this point. Prefer using ReadValue to switching on
+ // the value type, since new value types might be added.
+ Value interface{}
+}
+
+//go:generate stringer -type ValueType
+
+// NewFloat64Point creates a new Point holding a float64 value.
+func NewFloat64Point(t time.Time, val float64) Point {
+ return Point{
+ Value: val,
+ Time: t,
+ }
+}
+
+// NewInt64Point creates a new Point holding an int64 value.
+func NewInt64Point(t time.Time, val int64) Point {
+ return Point{
+ Value: val,
+ Time: t,
+ }
+}
+
+// NewDistributionPoint creates a new Point holding a Distribution value.
+func NewDistributionPoint(t time.Time, val *Distribution) Point {
+ return Point{
+ Value: val,
+ Time: t,
+ }
+}
+
+// NewSummaryPoint creates a new Point holding a Summary value.
+func NewSummaryPoint(t time.Time, val *Summary) Point {
+ return Point{
+ Value: val,
+ Time: t,
+ }
+}
+
+// ValueVisitor allows reading the value of a point.
+type ValueVisitor interface {
+ VisitFloat64Value(float64)
+ VisitInt64Value(int64)
+ VisitDistributionValue(*Distribution)
+ VisitSummaryValue(*Summary)
+}
+
+// ReadValue accepts a ValueVisitor and calls the appropriate method with the
+// value of this point.
+// Consumers of Point should use this in preference to switching on the type
+// of the value directly, since new value types may be added.
+func (p Point) ReadValue(vv ValueVisitor) {
+ switch v := p.Value.(type) {
+ case int64:
+ vv.VisitInt64Value(v)
+ case float64:
+ vv.VisitFloat64Value(v)
+ case *Distribution:
+ vv.VisitDistributionValue(v)
+ case *Summary:
+ vv.VisitSummaryValue(v)
+ default:
+ panic("unexpected value type")
+ }
+}
+
+// Distribution contains summary statistics for a population of values. It
+// optionally contains a histogram representing the distribution of those
+// values across a set of buckets.
+type Distribution struct {
+ // Count is the number of values in the population. Must be non-negative. This value
+ // must equal the sum of the values in bucket_counts if a histogram is
+ // provided.
+ Count int64
+ // Sum is the sum of the values in the population. If count is zero then this field
+ // must be zero.
+ Sum float64
+ // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the
+ // population. For values x_i this is:
+ //
+ // Sum[i=1..n]((x_i - mean)^2)
+ //
+ // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
+ // describes Welford's method for accumulating this sum in one pass.
+ //
+ // If count is zero then this field must be zero.
+ SumOfSquaredDeviation float64
+ // BucketOptions describes the bounds of the histogram buckets in this
+ // distribution.
+ //
+ // A Distribution may optionally contain a histogram of the values in the
+ // population.
+ //
+ // If nil, there is no associated histogram.
+ BucketOptions *BucketOptions
+ // Bucket If the distribution does not have a histogram, then omit this field.
+ // If there is a histogram, then the sum of the values in the Bucket counts
+ // must equal the value in the count field of the distribution.
+ Buckets []Bucket
+}
+
+// BucketOptions describes the bounds of the histogram buckets in this
+// distribution.
+type BucketOptions struct {
+ // Bounds specifies a set of bucket upper bounds.
+ // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket
+ // index i are:
+ //
+ // [0, Bounds[i]) for i == 0
+ // [Bounds[i-1], Bounds[i]) for 0 < i < N-1
+ // [Bounds[i-1], +infinity) for i == N-1
+ Bounds []float64
+}
+
+// Bucket represents a single bucket (value range) in a distribution.
+type Bucket struct {
+ // Count is the number of values in each bucket of the histogram, as described in
+ // bucket_bounds.
+ Count int64
+ // Exemplar associated with this bucket (if any).
+ Exemplar *Exemplar
+}
+
+// Summary is a representation of percentiles.
+type Summary struct {
+ // Count is the cumulative count (if available).
+ Count int64
+ // Sum is the cumulative sum of values (if available).
+ Sum float64
+ // HasCountAndSum is true if Count and Sum are available.
+ HasCountAndSum bool
+ // Snapshot represents percentiles calculated over an arbitrary time window.
+ // The values in this struct can be reset at arbitrary unknown times, with
+ // the requirement that all of them are reset at the same time.
+ Snapshot Snapshot
+}
+
+// Snapshot represents percentiles over an arbitrary time.
+// The values in this struct can be reset at arbitrary unknown times, with
+// the requirement that all of them are reset at the same time.
+type Snapshot struct {
+ // Count is the number of values in the snapshot. Optional since some systems don't
+ // expose this. Set to 0 if not available.
+ Count int64
+ // Sum is the sum of values in the snapshot. Optional since some systems don't
+ // expose this. If count is 0 then this field must be zero.
+ Sum float64
+ // Percentiles is a map from percentile (range (0-100.0]) to the value of
+ // the percentile.
+ Percentiles map[float64]float64
+}
+
+//go:generate stringer -type Type
+
+// Type is the overall type of metric, including its value type and whether it
+// represents a cumulative total (since the start time) or if it represents a
+// gauge value.
+type Type int
+
+// Metric types.
+const (
+ TypeGaugeInt64 Type = iota
+ TypeGaugeFloat64
+ TypeGaugeDistribution
+ TypeCumulativeInt64
+ TypeCumulativeFloat64
+ TypeCumulativeDistribution
+ TypeSummary
+)
diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go
new file mode 100644
index 00000000000..c3f8ec27b53
--- /dev/null
+++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type Type"; DO NOT EDIT.
+
+package metricdata
+
+import "strconv"
+
+const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary"
+
+var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128}
+
+func (i Type) String() string {
+ if i < 0 || i >= Type(len(_Type_index)-1) {
+ return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Type_name[_Type_index[i]:_Type_index[i+1]]
+}
diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go
new file mode 100644
index 00000000000..b483a1371b0
--- /dev/null
+++ b/vendor/go.opencensus.io/metric/metricdata/unit.go
@@ -0,0 +1,27 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricdata
+
+// Unit is a string encoded according to the case-sensitive abbreviations from the
+// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
+type Unit string
+
+// Predefined units. To record against a unit not represented here, create your
+// own Unit type constant from a string.
+const (
+ UnitDimensionless Unit = "1"
+ UnitBytes Unit = "By"
+ UnitMilliseconds Unit = "ms"
+)
diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go
new file mode 100644
index 00000000000..ca1f3904938
--- /dev/null
+++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go
@@ -0,0 +1,78 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricproducer
+
+import (
+ "sync"
+)
+
+// Manager maintains a list of active producers. Producers can register
+// with the manager to allow readers to read all metrics provided by them.
+// Readers can retrieve all producers registered with the manager,
+// read metrics from the producers and export them.
+type Manager struct {
+ mu sync.RWMutex
+ producers map[Producer]struct{}
+}
+
+var prodMgr *Manager
+var once sync.Once
+
+// GlobalManager is a single instance of producer manager
+// that is used by all producers and all readers.
+func GlobalManager() *Manager {
+ once.Do(func() {
+ prodMgr = &Manager{}
+ prodMgr.producers = make(map[Producer]struct{})
+ })
+ return prodMgr
+}
+
+// AddProducer adds the producer to the Manager if it is not already present.
+func (pm *Manager) AddProducer(producer Producer) {
+ if producer == nil {
+ return
+ }
+ pm.mu.Lock()
+ defer pm.mu.Unlock()
+ pm.producers[producer] = struct{}{}
+}
+
+// DeleteProducer deletes the producer from the Manager if it is present.
+func (pm *Manager) DeleteProducer(producer Producer) {
+ if producer == nil {
+ return
+ }
+ pm.mu.Lock()
+ defer pm.mu.Unlock()
+ delete(pm.producers, producer)
+}
+
+// GetAll returns a slice of all producer currently registered with
+// the Manager. For each call it generates a new slice. The slice
+// should not be cached as registration may change at any time. It is
+// typically called periodically by exporter to read metrics from
+// the producers.
+func (pm *Manager) GetAll() []Producer {
+ pm.mu.Lock()
+ defer pm.mu.Unlock()
+ producers := make([]Producer, len(pm.producers))
+ i := 0
+ for producer := range pm.producers {
+ producers[i] = producer
+ i++
+ }
+ return producers
+}
diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go
new file mode 100644
index 00000000000..6cee9ed1783
--- /dev/null
+++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go
@@ -0,0 +1,28 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metricproducer
+
+import (
+ "go.opencensus.io/metric/metricdata"
+)
+
+// Producer is a source of metrics.
+type Producer interface {
+ // Read should return the current values of all metrics supported by this
+ // metric provider.
+ // The returned metrics should be unique for each combination of name and
+ // resource.
+ Read() []*metricdata.Metric
+}
diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go
index 62f03486a2e..d2565f1e2bf 100644
--- a/vendor/go.opencensus.io/opencensus.go
+++ b/vendor/go.opencensus.io/opencensus.go
@@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io"
// Version is the current release version of OpenCensus in use.
func Version() string {
- return "0.18.0"
+ return "0.21.0"
}
diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
index 1737809e721..e9991fe0fb1 100644
--- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
+++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go
@@ -51,9 +51,9 @@ type rpcData struct {
// The following variables define the default hard-coded auxiliary data used by
// both the default GRPC client and GRPC server metrics.
var (
- DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
- DefaultMillisecondsDistribution = view.Distribution(0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
- DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
+ DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
+ DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
+ DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536)
)
// Server tags are applied to the context used to process each RPC, as well as
diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go
index 066ebb87f85..17142aabe00 100644
--- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go
+++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go
@@ -34,8 +34,8 @@ type statsTransport struct {
// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request.
func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
ctx, _ := tag.New(req.Context(),
- tag.Upsert(KeyClientHost, req.URL.Host),
- tag.Upsert(Host, req.URL.Host),
+ tag.Upsert(KeyClientHost, req.Host),
+ tag.Upsert(Host, req.Host),
tag.Upsert(KeyClientPath, req.URL.Path),
tag.Upsert(Path, req.URL.Path),
tag.Upsert(KeyClientMethod, req.Method),
@@ -61,11 +61,14 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
track.end()
} else {
track.statusCode = resp.StatusCode
+ if req.Method != "HEAD" {
+ track.respContentLength = resp.ContentLength
+ }
if resp.Body == nil {
track.end()
} else {
track.body = resp.Body
- resp.Body = track
+ resp.Body = wrappedBody(track, resp.Body)
}
}
return resp, err
@@ -82,13 +85,14 @@ func (t statsTransport) CancelRequest(req *http.Request) {
}
type tracker struct {
- ctx context.Context
- respSize int64
- reqSize int64
- start time.Time
- body io.ReadCloser
- statusCode int
- endOnce sync.Once
+ ctx context.Context
+ respSize int64
+ respContentLength int64
+ reqSize int64
+ start time.Time
+ body io.ReadCloser
+ statusCode int
+ endOnce sync.Once
}
var _ io.ReadCloser = (*tracker)(nil)
@@ -96,9 +100,13 @@ var _ io.ReadCloser = (*tracker)(nil)
func (t *tracker) end() {
t.endOnce.Do(func() {
latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond)
+ respSize := t.respSize
+ if t.respSize == 0 && t.respContentLength > 0 {
+ respSize = t.respContentLength
+ }
m := []stats.Measurement{
ClientSentBytes.M(t.reqSize),
- ClientReceivedBytes.M(t.respSize),
+ ClientReceivedBytes.M(respSize),
ClientRoundtripLatency.M(latencyMs),
ClientLatency.M(latencyMs),
ClientResponseBytes.M(t.respSize),
@@ -116,9 +124,9 @@ func (t *tracker) end() {
func (t *tracker) Read(b []byte) (int, error) {
n, err := t.body.Read(b)
+ t.respSize += int64(n)
switch err {
case nil:
- t.respSize += int64(n)
return n, nil
case io.EOF:
t.end()
diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
index f777772ec93..2f1c7f0063e 100644
--- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
+++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
@@ -38,7 +38,7 @@ const (
// because there are additional fields not represented in the
// OpenCensus span context. Spans created from the incoming
// header will be the direct children of the client-side span.
-// Similarly, reciever of the outgoing spans should use client-side
+// Similarly, receiver of the outgoing spans should use client-side
// span created by OpenCensus as the parent.
type HTTPFormat struct{}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go
index dbe22d58611..5e6a3430760 100644
--- a/vendor/go.opencensus.io/plugin/ochttp/route.go
+++ b/vendor/go.opencensus.io/plugin/ochttp/route.go
@@ -15,11 +15,21 @@
package ochttp
import (
+ "context"
"net/http"
"go.opencensus.io/tag"
)
+// SetRoute sets the http_server_route tag to the given value.
+// It's useful when an HTTP framework does not support the http.Handler interface
+// and using WithRouteTag is not an option, but provides a way to hook into the request flow.
+func SetRoute(ctx context.Context, route string) {
+ if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok {
+ a.t = append(a.t, tag.Upsert(KeyServerRoute, route))
+ }
+}
+
// WithRouteTag returns an http.Handler that records stats with the
// http_server_route tag set to the given value.
func WithRouteTag(handler http.Handler, route string) http.Handler {
diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go
index ff72de97a8c..5fe15e89ff1 100644
--- a/vendor/go.opencensus.io/plugin/ochttp/server.go
+++ b/vendor/go.opencensus.io/plugin/ochttp/server.go
@@ -118,7 +118,7 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ
span.AddLink(trace.Link{
TraceID: sc.TraceID,
SpanID: sc.SpanID,
- Type: trace.LinkTypeChild,
+ Type: trace.LinkTypeParent,
Attributes: nil,
})
}
@@ -136,7 +136,7 @@ func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool)
func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) {
ctx, _ := tag.New(r.Context(),
- tag.Upsert(Host, r.URL.Host),
+ tag.Upsert(Host, r.Host),
tag.Upsert(Path, r.URL.Path),
tag.Upsert(Method, r.Method))
track := &trackingResponseWriter{
diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go
index 46dcc8e57eb..63bbcda5e33 100644
--- a/vendor/go.opencensus.io/plugin/ochttp/stats.go
+++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go
@@ -20,19 +20,31 @@ import (
"go.opencensus.io/tag"
)
-// The following client HTTP measures are supported for use in custom views.
+// Deprecated: client HTTP measures.
var (
// Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect.
- ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
+ ClientRequestCount = stats.Int64(
+ "opencensus.io/http/client/request_count",
+ "Number of HTTP requests started",
+ stats.UnitDimensionless)
// Deprecated: Use ClientSentBytes.
- ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
+ ClientRequestBytes = stats.Int64(
+ "opencensus.io/http/client/request_bytes",
+ "HTTP request body size if set as ContentLength (uncompressed)",
+ stats.UnitBytes)
// Deprecated: Use ClientReceivedBytes.
- ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
+ ClientResponseBytes = stats.Int64(
+ "opencensus.io/http/client/response_bytes",
+ "HTTP response body size (uncompressed)",
+ stats.UnitBytes)
// Deprecated: Use ClientRoundtripLatency.
- ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds)
+ ClientLatency = stats.Float64(
+ "opencensus.io/http/client/latency",
+ "End-to-end latency",
+ stats.UnitMilliseconds)
)
-// Client measures supported for use in custom views.
+// The following client HTTP measures are supported for use in custom views.
var (
ClientSentBytes = stats.Int64(
"opencensus.io/http/client/sent_bytes",
@@ -53,10 +65,22 @@ var (
// The following server HTTP measures are supported for use in custom views:
var (
- ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless)
- ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes)
- ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes)
- ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds)
+ ServerRequestCount = stats.Int64(
+ "opencensus.io/http/server/request_count",
+ "Number of HTTP requests started",
+ stats.UnitDimensionless)
+ ServerRequestBytes = stats.Int64(
+ "opencensus.io/http/server/request_bytes",
+ "HTTP request body size if set as ContentLength (uncompressed)",
+ stats.UnitBytes)
+ ServerResponseBytes = stats.Int64(
+ "opencensus.io/http/server/response_bytes",
+ "HTTP response body size (uncompressed)",
+ stats.UnitBytes)
+ ServerLatency = stats.Float64(
+ "opencensus.io/http/server/latency",
+ "End-to-end latency",
+ stats.UnitMilliseconds)
)
// The following tags are applied to stats recorded by this package. Host, Path
@@ -104,11 +128,11 @@ var (
// Default distributions used by views in this package.
var (
- DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
- DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
+ DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
+ DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
)
-// Package ochttp provides some convenience views.
+// Package ochttp provides some convenience views for client measures.
// You still need to register these views for data to actually be collected.
var (
ClientSentBytesDistribution = &view.View{
@@ -144,6 +168,7 @@ var (
}
)
+// Deprecated: Old client Views.
var (
// Deprecated: No direct replacement, but see ClientCompletedCount.
ClientRequestCountView = &view.View{
@@ -161,7 +186,7 @@ var (
Aggregation: DefaultSizeDistribution,
}
- // Deprecated: Use ClientReceivedBytesDistribution.
+ // Deprecated: Use ClientReceivedBytesDistribution instead.
ClientResponseBytesView = &view.View{
Name: "opencensus.io/http/client/response_bytes",
Description: "Size distribution of HTTP response body",
@@ -169,7 +194,7 @@ var (
Aggregation: DefaultSizeDistribution,
}
- // Deprecated: Use ClientRoundtripLatencyDistribution.
+ // Deprecated: Use ClientRoundtripLatencyDistribution instead.
ClientLatencyView = &view.View{
Name: "opencensus.io/http/client/latency",
Description: "Latency distribution of HTTP requests",
@@ -177,7 +202,7 @@ var (
Aggregation: DefaultLatencyDistribution,
}
- // Deprecated: Use ClientCompletedCount.
+ // Deprecated: Use ClientCompletedCount instead.
ClientRequestCountByMethod = &view.View{
Name: "opencensus.io/http/client/request_count_by_method",
Description: "Client request count by HTTP method",
@@ -186,7 +211,7 @@ var (
Aggregation: view.Count(),
}
- // Deprecated: Use ClientCompletedCount.
+ // Deprecated: Use ClientCompletedCount instead.
ClientResponseCountByStatusCode = &view.View{
Name: "opencensus.io/http/client/response_count_by_status_code",
Description: "Client response count by status code",
@@ -196,6 +221,8 @@ var (
}
)
+// Package ochttp provides some convenience views for server measures.
+// You still need to register these views for data to actually be collected.
var (
ServerRequestCountView = &view.View{
Name: "opencensus.io/http/server/request_count",
diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go
index 819a2d5ff9a..c23b97fb1fe 100644
--- a/vendor/go.opencensus.io/plugin/ochttp/trace.go
+++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go
@@ -34,6 +34,7 @@ const (
HostAttribute = "http.host"
MethodAttribute = "http.method"
PathAttribute = "http.path"
+ URLAttribute = "http.url"
UserAgentAttribute = "http.user_agent"
StatusCodeAttribute = "http.status_code"
)
@@ -93,7 +94,8 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
// span.End() will be invoked after
// a read from resp.Body returns io.EOF or when
// resp.Body.Close() is invoked.
- resp.Body = &bodyTracker{rc: resp.Body, span: span}
+ bt := &bodyTracker{rc: resp.Body, span: span}
+ resp.Body = wrappedBody(bt, resp.Body)
return resp, err
}
@@ -149,12 +151,21 @@ func spanNameFromURL(req *http.Request) string {
}
func requestAttrs(r *http.Request) []trace.Attribute {
- return []trace.Attribute{
+ userAgent := r.UserAgent()
+
+ attrs := make([]trace.Attribute, 0, 5)
+ attrs = append(attrs,
trace.StringAttribute(PathAttribute, r.URL.Path),
- trace.StringAttribute(HostAttribute, r.URL.Host),
+ trace.StringAttribute(URLAttribute, r.URL.String()),
+ trace.StringAttribute(HostAttribute, r.Host),
trace.StringAttribute(MethodAttribute, r.Method),
- trace.StringAttribute(UserAgentAttribute, r.UserAgent()),
+ )
+
+ if userAgent != "" {
+ attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent))
}
+
+ return attrs
}
func responseAttrs(resp *http.Response) []trace.Attribute {
diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go
new file mode 100644
index 00000000000..7d75cae2b18
--- /dev/null
+++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go
@@ -0,0 +1,44 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ochttp
+
+import (
+ "io"
+)
+
+// wrappedBody returns a wrapped version of the original
+// Body and only implements the same combination of additional
+// interfaces as the original.
+func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser {
+ var (
+ wr, i0 = body.(io.Writer)
+ )
+ switch {
+ case !i0:
+ return struct {
+ io.ReadCloser
+ }{wrapper}
+
+ case i0:
+ return struct {
+ io.ReadCloser
+ io.Writer
+ }{wrapper, wr}
+ default:
+ return struct {
+ io.ReadCloser
+ }{wrapper}
+ }
+}
diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go
new file mode 100644
index 00000000000..b1764e1d3b9
--- /dev/null
+++ b/vendor/go.opencensus.io/resource/resource.go
@@ -0,0 +1,164 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package resource provides functionality for resource, which capture
+// identifying information about the entities for which signals are exported.
+package resource
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Environment variables used by FromEnv to decode a resource.
+const (
+ EnvVarType = "OC_RESOURCE_TYPE"
+ EnvVarLabels = "OC_RESOURCE_LABELS"
+)
+
+// Resource describes an entity about which identifying information and metadata is exposed.
+// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace.
+type Resource struct {
+ Type string
+ Labels map[string]string
+}
+
+// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable.
+func EncodeLabels(labels map[string]string) string {
+ sortedKeys := make([]string, 0, len(labels))
+ for k := range labels {
+ sortedKeys = append(sortedKeys, k)
+ }
+ sort.Strings(sortedKeys)
+
+ s := ""
+ for i, k := range sortedKeys {
+ if i > 0 {
+ s += ","
+ }
+ s += k + "=" + strconv.Quote(labels[k])
+ }
+ return s
+}
+
+var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`)
+
+// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable.
+// A list of labels of the form `="",="",...` is accepted.
+// Domain names and paths are accepted as label keys.
+// Most users will want to use FromEnv instead.
+func DecodeLabels(s string) (map[string]string, error) {
+ m := map[string]string{}
+ // Ensure a trailing comma, which allows us to keep the regex simpler
+ s = strings.TrimRight(strings.TrimSpace(s), ",") + ","
+
+ for len(s) > 0 {
+ match := labelRegex.FindStringSubmatch(s)
+ if len(match) == 0 {
+ return nil, fmt.Errorf("invalid label formatting, remainder: %s", s)
+ }
+ v := match[2]
+ if v == "" {
+ v = match[3]
+ } else {
+ var err error
+ if v, err = strconv.Unquote(v); err != nil {
+ return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err)
+ }
+ }
+ m[match[1]] = v
+
+ s = s[len(match[0]):]
+ }
+ return m, nil
+}
+
+// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE
+// and OC_RESOURCE_labelS environment variables.
+func FromEnv(context.Context) (*Resource, error) {
+ res := &Resource{
+ Type: strings.TrimSpace(os.Getenv(EnvVarType)),
+ }
+ labels := strings.TrimSpace(os.Getenv(EnvVarLabels))
+ if labels == "" {
+ return res, nil
+ }
+ var err error
+ if res.Labels, err = DecodeLabels(labels); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+var _ Detector = FromEnv
+
+// merge resource information from b into a. In case of a collision, a takes precedence.
+func merge(a, b *Resource) *Resource {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+ res := &Resource{
+ Type: a.Type,
+ Labels: map[string]string{},
+ }
+ if res.Type == "" {
+ res.Type = b.Type
+ }
+ for k, v := range b.Labels {
+ res.Labels[k] = v
+ }
+ // Labels from resource a overwrite labels from resource b.
+ for k, v := range a.Labels {
+ res.Labels[k] = v
+ }
+ return res
+}
+
+// Detector attempts to detect resource information.
+// If the detector cannot find resource information, the returned resource is nil but no
+// error is returned.
+// An error is only returned on unexpected failures.
+type Detector func(context.Context) (*Resource, error)
+
+// MultiDetector returns a Detector that calls all input detectors in order and
+// merges each result with the previous one. In case a type of label key is already set,
+// the first set value is takes precedence.
+// It returns on the first error that a sub-detector encounters.
+func MultiDetector(detectors ...Detector) Detector {
+ return func(ctx context.Context) (*Resource, error) {
+ return detectAll(ctx, detectors...)
+ }
+}
+
+// detectall calls all input detectors sequentially an merges each result with the previous one.
+// It returns on the first error that a sub-detector encounters.
+func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) {
+ var res *Resource
+ for _, d := range detectors {
+ r, err := d(ctx)
+ if err != nil {
+ return nil, err
+ }
+ res = merge(res, r)
+ }
+ return res, nil
+}
diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go
index ed545520542..36935e629b6 100644
--- a/vendor/go.opencensus.io/stats/internal/record.go
+++ b/vendor/go.opencensus.io/stats/internal/record.go
@@ -19,7 +19,7 @@ import (
)
// DefaultRecorder will be called for each Record call.
-var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]string)
+var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{})
// SubscriptionReporter reports when a view subscribed with a measure.
var SubscriptionReporter func(measure string)
diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go
index 64d02b19618..1ffd3cefc73 100644
--- a/vendor/go.opencensus.io/stats/measure.go
+++ b/vendor/go.opencensus.io/stats/measure.go
@@ -68,21 +68,6 @@ func (m *measureDescriptor) subscribed() bool {
return atomic.LoadInt32(&m.subs) == 1
}
-// Name returns the name of the measure.
-func (m *measureDescriptor) Name() string {
- return m.name
-}
-
-// Description returns the description of the measure.
-func (m *measureDescriptor) Description() string {
- return m.description
-}
-
-// Unit returns the unit of the measure.
-func (m *measureDescriptor) Unit() string {
- return m.unit
-}
-
var (
mu sync.RWMutex
measures = make(map[string]*measureDescriptor)
@@ -108,8 +93,9 @@ func registerMeasureHandle(name, desc, unit string) *measureDescriptor {
// provides methods to create measurements of their kind. For example, Int64Measure
// provides M to convert an int64 into a measurement.
type Measurement struct {
- v float64
- m *measureDescriptor
+ v float64
+ m Measure
+ desc *measureDescriptor
}
// Value returns the value of the Measurement as a float64.
diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go
index acedb21c44c..f02c1eda845 100644
--- a/vendor/go.opencensus.io/stats/measure_float64.go
+++ b/vendor/go.opencensus.io/stats/measure_float64.go
@@ -17,13 +17,17 @@ package stats
// Float64Measure is a measure for float64 values.
type Float64Measure struct {
- *measureDescriptor
+ desc *measureDescriptor
}
// M creates a new float64 measurement.
// Use Record to record measurements.
func (m *Float64Measure) M(v float64) Measurement {
- return Measurement{m: m.measureDescriptor, v: v}
+ return Measurement{
+ m: m,
+ desc: m.desc,
+ v: v,
+ }
}
// Float64 creates a new measure for float64 values.
@@ -34,3 +38,18 @@ func Float64(name, description, unit string) *Float64Measure {
mi := registerMeasureHandle(name, description, unit)
return &Float64Measure{mi}
}
+
+// Name returns the name of the measure.
+func (m *Float64Measure) Name() string {
+ return m.desc.name
+}
+
+// Description returns the description of the measure.
+func (m *Float64Measure) Description() string {
+ return m.desc.description
+}
+
+// Unit returns the unit of the measure.
+func (m *Float64Measure) Unit() string {
+ return m.desc.unit
+}
diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go
index c4243ba7494..d101d797358 100644
--- a/vendor/go.opencensus.io/stats/measure_int64.go
+++ b/vendor/go.opencensus.io/stats/measure_int64.go
@@ -17,13 +17,17 @@ package stats
// Int64Measure is a measure for int64 values.
type Int64Measure struct {
- *measureDescriptor
+ desc *measureDescriptor
}
// M creates a new int64 measurement.
// Use Record to record measurements.
func (m *Int64Measure) M(v int64) Measurement {
- return Measurement{m: m.measureDescriptor, v: float64(v)}
+ return Measurement{
+ m: m,
+ desc: m.desc,
+ v: float64(v),
+ }
}
// Int64 creates a new measure for int64 values.
@@ -34,3 +38,18 @@ func Int64(name, description, unit string) *Int64Measure {
mi := registerMeasureHandle(name, description, unit)
return &Int64Measure{mi}
}
+
+// Name returns the name of the measure.
+func (m *Int64Measure) Name() string {
+ return m.desc.name
+}
+
+// Description returns the description of the measure.
+func (m *Int64Measure) Description() string {
+ return m.desc.description
+}
+
+// Unit returns the unit of the measure.
+func (m *Int64Measure) Unit() string {
+ return m.desc.unit
+}
diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go
index 0aced02c304..d2af0a60dab 100644
--- a/vendor/go.opencensus.io/stats/record.go
+++ b/vendor/go.opencensus.io/stats/record.go
@@ -18,7 +18,6 @@ package stats
import (
"context"
- "go.opencensus.io/exemplar"
"go.opencensus.io/stats/internal"
"go.opencensus.io/tag"
)
@@ -43,7 +42,7 @@ func Record(ctx context.Context, ms ...Measurement) {
}
record := false
for _, m := range ms {
- if m.m.subscribed() {
+ if m.desc.subscribed() {
record = true
break
}
@@ -51,7 +50,8 @@ func Record(ctx context.Context, ms ...Measurement) {
if !record {
return
}
- recorder(tag.FromContext(ctx), ms, exemplar.AttachmentsFromContext(ctx))
+ // TODO(songy23): fix attachments.
+ recorder(tag.FromContext(ctx), ms, map[string]interface{}{})
}
// RecordWithTags records one or multiple measurements at once.
diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go
index 960b94601ff..d500e67f733 100644
--- a/vendor/go.opencensus.io/stats/view/aggregation_data.go
+++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go
@@ -17,8 +17,9 @@ package view
import (
"math"
+ "time"
- "go.opencensus.io/exemplar"
+ "go.opencensus.io/metric/metricdata"
)
// AggregationData represents an aggregated value from a collection.
@@ -26,9 +27,10 @@ import (
// Mosts users won't directly access aggregration data.
type AggregationData interface {
isAggregationData() bool
- addSample(e *exemplar.Exemplar)
+ addSample(v float64, attachments map[string]interface{}, t time.Time)
clone() AggregationData
equal(other AggregationData) bool
+ toPoint(t metricdata.Type, time time.Time) metricdata.Point
}
const epsilon = 1e-9
@@ -43,7 +45,7 @@ type CountData struct {
func (a *CountData) isAggregationData() bool { return true }
-func (a *CountData) addSample(_ *exemplar.Exemplar) {
+func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) {
a.Value = a.Value + 1
}
@@ -60,6 +62,15 @@ func (a *CountData) equal(other AggregationData) bool {
return a.Value == a2.Value
}
+func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
+ switch metricType {
+ case metricdata.TypeCumulativeInt64:
+ return metricdata.NewInt64Point(t, a.Value)
+ default:
+ panic("unsupported metricdata.Type")
+ }
+}
+
// SumData is the aggregated data for the Sum aggregation.
// A sum aggregation processes data and sums up the recordings.
//
@@ -70,8 +81,8 @@ type SumData struct {
func (a *SumData) isAggregationData() bool { return true }
-func (a *SumData) addSample(e *exemplar.Exemplar) {
- a.Value += e.Value
+func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
+ a.Value += v
}
func (a *SumData) clone() AggregationData {
@@ -86,6 +97,17 @@ func (a *SumData) equal(other AggregationData) bool {
return math.Pow(a.Value-a2.Value, 2) < epsilon
}
+func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
+ switch metricType {
+ case metricdata.TypeCumulativeInt64:
+ return metricdata.NewInt64Point(t, int64(a.Value))
+ case metricdata.TypeCumulativeFloat64:
+ return metricdata.NewFloat64Point(t, a.Value)
+ default:
+ panic("unsupported metricdata.Type")
+ }
+}
+
// DistributionData is the aggregated data for the
// Distribution aggregation.
//
@@ -102,7 +124,7 @@ type DistributionData struct {
CountPerBucket []int64 // number of occurrences per bucket
// ExemplarsPerBucket is slice the same length as CountPerBucket containing
// an exemplar for the associated bucket, or nil.
- ExemplarsPerBucket []*exemplar.Exemplar
+ ExemplarsPerBucket []*metricdata.Exemplar
bounds []float64 // histogram distribution of the values
}
@@ -110,7 +132,7 @@ func newDistributionData(bounds []float64) *DistributionData {
bucketCount := len(bounds) + 1
return &DistributionData{
CountPerBucket: make([]int64, bucketCount),
- ExemplarsPerBucket: make([]*exemplar.Exemplar, bucketCount),
+ ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount),
bounds: bounds,
Min: math.MaxFloat64,
Max: math.SmallestNonzeroFloat64,
@@ -129,64 +151,62 @@ func (a *DistributionData) variance() float64 {
func (a *DistributionData) isAggregationData() bool { return true }
-func (a *DistributionData) addSample(e *exemplar.Exemplar) {
- f := e.Value
- if f < a.Min {
- a.Min = f
+// TODO(songy23): support exemplar attachments.
+func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) {
+ if v < a.Min {
+ a.Min = v
}
- if f > a.Max {
- a.Max = f
+ if v > a.Max {
+ a.Max = v
}
a.Count++
- a.addToBucket(e)
+ a.addToBucket(v, attachments, t)
if a.Count == 1 {
- a.Mean = f
+ a.Mean = v
return
}
oldMean := a.Mean
- a.Mean = a.Mean + (f-a.Mean)/float64(a.Count)
- a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean)
+ a.Mean = a.Mean + (v-a.Mean)/float64(a.Count)
+ a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean)
}
-func (a *DistributionData) addToBucket(e *exemplar.Exemplar) {
+func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) {
var count *int64
- var ex **exemplar.Exemplar
- for i, b := range a.bounds {
- if e.Value < b {
+ var i int
+ var b float64
+ for i, b = range a.bounds {
+ if v < b {
count = &a.CountPerBucket[i]
- ex = &a.ExemplarsPerBucket[i]
break
}
}
- if count == nil {
- count = &a.CountPerBucket[len(a.bounds)]
- ex = &a.ExemplarsPerBucket[len(a.bounds)]
+ if count == nil { // Last bucket.
+ i = len(a.bounds)
+ count = &a.CountPerBucket[i]
}
*count++
- *ex = maybeRetainExemplar(*ex, e)
+ if exemplar := getExemplar(v, attachments, t); exemplar != nil {
+ a.ExemplarsPerBucket[i] = exemplar
+ }
}
-func maybeRetainExemplar(old, cur *exemplar.Exemplar) *exemplar.Exemplar {
- if old == nil {
- return cur
+func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar {
+ if len(attachments) == 0 {
+ return nil
}
-
- // Heuristic to pick the "better" exemplar: first keep the one with a
- // sampled trace attachment, if neither have a trace attachment, pick the
- // one with more attachments.
- _, haveTraceID := cur.Attachments[exemplar.KeyTraceID]
- if haveTraceID || len(cur.Attachments) >= len(old.Attachments) {
- return cur
+ return &metricdata.Exemplar{
+ Value: v,
+ Timestamp: t,
+ Attachments: attachments,
}
- return old
}
func (a *DistributionData) clone() AggregationData {
c := *a
c.CountPerBucket = append([]int64(nil), a.CountPerBucket...)
- c.ExemplarsPerBucket = append([]*exemplar.Exemplar(nil), a.ExemplarsPerBucket...)
+ c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...)
return &c
}
@@ -209,6 +229,33 @@ func (a *DistributionData) equal(other AggregationData) bool {
return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
}
+func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
+ switch metricType {
+ case metricdata.TypeCumulativeDistribution:
+ buckets := []metricdata.Bucket{}
+ for i := 0; i < len(a.CountPerBucket); i++ {
+ buckets = append(buckets, metricdata.Bucket{
+ Count: a.CountPerBucket[i],
+ Exemplar: a.ExemplarsPerBucket[i],
+ })
+ }
+ bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds}
+
+ val := &metricdata.Distribution{
+ Count: a.Count,
+ Sum: a.Sum(),
+ SumOfSquaredDeviation: a.SumOfSquaredDev,
+ BucketOptions: bucketOptions,
+ Buckets: buckets,
+ }
+ return metricdata.NewDistributionPoint(t, val)
+
+ default:
+ // TODO: [rghetia] when we have a use case for TypeGaugeDistribution.
+ panic("unsupported metricdata.Type")
+ }
+}
+
// LastValueData returns the last value recorded for LastValue aggregation.
type LastValueData struct {
Value float64
@@ -218,8 +265,8 @@ func (l *LastValueData) isAggregationData() bool {
return true
}
-func (l *LastValueData) addSample(e *exemplar.Exemplar) {
- l.Value = e.Value
+func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
+ l.Value = v
}
func (l *LastValueData) clone() AggregationData {
@@ -233,3 +280,14 @@ func (l *LastValueData) equal(other AggregationData) bool {
}
return l.Value == a2.Value
}
+
+func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
+ switch metricType {
+ case metricdata.TypeGaugeInt64:
+ return metricdata.NewInt64Point(t, int64(l.Value))
+ case metricdata.TypeGaugeFloat64:
+ return metricdata.NewFloat64Point(t, l.Value)
+ default:
+ panic("unsupported metricdata.Type")
+ }
+}
diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go
index 32415d48597..8a6a2c0fdc9 100644
--- a/vendor/go.opencensus.io/stats/view/collector.go
+++ b/vendor/go.opencensus.io/stats/view/collector.go
@@ -17,8 +17,7 @@ package view
import (
"sort"
-
- "go.opencensus.io/exemplar"
+ "time"
"go.opencensus.io/internal/tagencoding"
"go.opencensus.io/tag"
@@ -33,13 +32,13 @@ type collector struct {
a *Aggregation
}
-func (c *collector) addSample(s string, e *exemplar.Exemplar) {
+func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) {
aggregator, ok := c.signatures[s]
if !ok {
aggregator = c.a.newData()
c.signatures[s] = aggregator
}
- aggregator.addSample(e)
+ aggregator.addSample(v, attachments, t)
}
// collectRows returns a snapshot of the collected Row values.
diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go
index c2a08af6784..37f88e1d9fa 100644
--- a/vendor/go.opencensus.io/stats/view/view.go
+++ b/vendor/go.opencensus.io/stats/view/view.go
@@ -17,16 +17,15 @@ package view
import (
"bytes"
+ "errors"
"fmt"
"reflect"
"sort"
"sync/atomic"
"time"
- "go.opencensus.io/exemplar"
-
+ "go.opencensus.io/metric/metricdata"
"go.opencensus.io/stats"
- "go.opencensus.io/stats/internal"
"go.opencensus.io/tag"
)
@@ -69,6 +68,11 @@ func (v *View) same(other *View) bool {
v.Measure.Name() == other.Measure.Name()
}
+// ErrNegativeBucketBounds error returned if histogram contains negative bounds.
+//
+// Deprecated: this should not be public.
+var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported")
+
// canonicalize canonicalizes v by setting explicit
// defaults for Name and Description and sorting the TagKeys
func (v *View) canonicalize() error {
@@ -90,20 +94,40 @@ func (v *View) canonicalize() error {
sort.Slice(v.TagKeys, func(i, j int) bool {
return v.TagKeys[i].Name() < v.TagKeys[j].Name()
})
+ sort.Float64s(v.Aggregation.Buckets)
+ for _, b := range v.Aggregation.Buckets {
+ if b < 0 {
+ return ErrNegativeBucketBounds
+ }
+ }
+ // drop 0 bucket silently.
+ v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...)
+
return nil
}
+func dropZeroBounds(bounds ...float64) []float64 {
+ for i, bound := range bounds {
+ if bound > 0 {
+ return bounds[i:]
+ }
+ }
+ return []float64{}
+}
+
// viewInternal is the internal representation of a View.
type viewInternal struct {
- view *View // view is the canonicalized View definition associated with this view.
- subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access
- collector *collector
+ view *View // view is the canonicalized View definition associated with this view.
+ subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access
+ collector *collector
+ metricDescriptor *metricdata.Descriptor
}
func newViewInternal(v *View) (*viewInternal, error) {
return &viewInternal{
- view: v,
- collector: &collector{make(map[string]AggregationData), v.Aggregation},
+ view: v,
+ collector: &collector{make(map[string]AggregationData), v.Aggregation},
+ metricDescriptor: viewToMetricDescriptor(v),
}, nil
}
@@ -129,12 +153,12 @@ func (v *viewInternal) collectedRows() []*Row {
return v.collector.collectedRows(v.view.TagKeys)
}
-func (v *viewInternal) addSample(m *tag.Map, e *exemplar.Exemplar) {
+func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) {
if !v.isSubscribed() {
return
}
sig := string(encodeWithKeys(m, v.view.TagKeys))
- v.collector.addSample(sig, e)
+ v.collector.addSample(sig, val, attachments, t)
}
// A Data is a set of rows about usage of the single measure associated
@@ -174,11 +198,23 @@ func (r *Row) Equal(other *Row) bool {
return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data)
}
+const maxNameLength = 255
+
+// Returns true if the given string contains only printable characters.
+func isPrintable(str string) bool {
+ for _, r := range str {
+ if !(r >= ' ' && r <= '~') {
+ return false
+ }
+ }
+ return true
+}
+
func checkViewName(name string) error {
- if len(name) > internal.MaxNameLength {
- return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength)
+ if len(name) > maxNameLength {
+ return fmt.Errorf("view name cannot be larger than %v", maxNameLength)
}
- if !internal.IsPrintable(name) {
+ if !isPrintable(name) {
return fmt.Errorf("view name needs to be an ASCII string")
}
return nil
diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go
new file mode 100644
index 00000000000..284299fafa2
--- /dev/null
+++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go
@@ -0,0 +1,131 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package view
+
+import (
+ "time"
+
+ "go.opencensus.io/metric/metricdata"
+ "go.opencensus.io/stats"
+)
+
+func getUnit(unit string) metricdata.Unit {
+ switch unit {
+ case "1":
+ return metricdata.UnitDimensionless
+ case "ms":
+ return metricdata.UnitMilliseconds
+ case "By":
+ return metricdata.UnitBytes
+ }
+ return metricdata.UnitDimensionless
+}
+
+func getType(v *View) metricdata.Type {
+ m := v.Measure
+ agg := v.Aggregation
+
+ switch agg.Type {
+ case AggTypeSum:
+ switch m.(type) {
+ case *stats.Int64Measure:
+ return metricdata.TypeCumulativeInt64
+ case *stats.Float64Measure:
+ return metricdata.TypeCumulativeFloat64
+ default:
+ panic("unexpected measure type")
+ }
+ case AggTypeDistribution:
+ return metricdata.TypeCumulativeDistribution
+ case AggTypeLastValue:
+ switch m.(type) {
+ case *stats.Int64Measure:
+ return metricdata.TypeGaugeInt64
+ case *stats.Float64Measure:
+ return metricdata.TypeGaugeFloat64
+ default:
+ panic("unexpected measure type")
+ }
+ case AggTypeCount:
+ switch m.(type) {
+ case *stats.Int64Measure:
+ return metricdata.TypeCumulativeInt64
+ case *stats.Float64Measure:
+ return metricdata.TypeCumulativeInt64
+ default:
+ panic("unexpected measure type")
+ }
+ default:
+ panic("unexpected aggregation type")
+ }
+}
+
+func getLableKeys(v *View) []string {
+ labelKeys := []string{}
+ for _, k := range v.TagKeys {
+ labelKeys = append(labelKeys, k.Name())
+ }
+ return labelKeys
+}
+
+func viewToMetricDescriptor(v *View) *metricdata.Descriptor {
+ return &metricdata.Descriptor{
+ Name: v.Name,
+ Description: v.Description,
+ Unit: getUnit(v.Measure.Unit()),
+ Type: getType(v),
+ LabelKeys: getLableKeys(v),
+ }
+}
+
+func toLabelValues(row *Row) []metricdata.LabelValue {
+ labelValues := []metricdata.LabelValue{}
+ for _, tag := range row.Tags {
+ labelValues = append(labelValues, metricdata.NewLabelValue(tag.Value))
+ }
+ return labelValues
+}
+
+func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries {
+ return &metricdata.TimeSeries{
+ Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)},
+ LabelValues: toLabelValues(row),
+ StartTime: startTime,
+ }
+}
+
+func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric {
+ if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 ||
+ v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 {
+ startTime = time.Time{}
+ }
+
+ rows := v.collectedRows()
+ if len(rows) == 0 {
+ return nil
+ }
+
+ ts := []*metricdata.TimeSeries{}
+ for _, row := range rows {
+ ts = append(ts, rowToTimeseries(v, row, now, startTime))
+ }
+
+ m := &metricdata.Metric{
+ Descriptor: *v.metricDescriptor,
+ TimeSeries: ts,
+ }
+ return m
+}
diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go
index 63b0ee3cc37..37279b39e9d 100644
--- a/vendor/go.opencensus.io/stats/view/worker.go
+++ b/vendor/go.opencensus.io/stats/view/worker.go
@@ -17,8 +17,11 @@ package view
import (
"fmt"
+ "sync"
"time"
+ "go.opencensus.io/metric/metricdata"
+ "go.opencensus.io/metric/metricproducer"
"go.opencensus.io/stats"
"go.opencensus.io/stats/internal"
"go.opencensus.io/tag"
@@ -43,6 +46,7 @@ type worker struct {
timer *time.Ticker
c chan command
quit, done chan bool
+ mu sync.RWMutex
}
var defaultWorker *worker
@@ -64,11 +68,6 @@ func Find(name string) (v *View) {
// Register begins collecting data for the given views.
// Once a view is registered, it reports data to the registered exporters.
func Register(views ...*View) error {
- for _, v := range views {
- if err := v.canonicalize(); err != nil {
- return err
- }
- }
req := ®isterViewReq{
views: views,
err: make(chan error),
@@ -107,7 +106,7 @@ func RetrieveData(viewName string) ([]*Row, error) {
return resp.rows, resp.err
}
-func record(tags *tag.Map, ms interface{}, attachments map[string]string) {
+func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
req := &recordReq{
tm: tags,
ms: ms.([]stats.Measurement),
@@ -148,6 +147,9 @@ func newWorker() *worker {
}
func (w *worker) start() {
+ prodMgr := metricproducer.GlobalManager()
+ prodMgr.AddProducer(w)
+
for {
select {
case cmd := <-w.c:
@@ -164,6 +166,9 @@ func (w *worker) start() {
}
func (w *worker) stop() {
+ prodMgr := metricproducer.GlobalManager()
+ prodMgr.DeleteProducer(w)
+
w.quit <- true
<-w.done
}
@@ -181,6 +186,8 @@ func (w *worker) getMeasureRef(name string) *measureRef {
}
func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
vi, err := newViewInternal(v)
if err != nil {
return nil, err
@@ -200,6 +207,12 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
return vi, nil
}
+func (w *worker) unregisterView(viewName string) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ delete(w.views, viewName)
+}
+
func (w *worker) reportView(v *viewInternal, now time.Time) {
if !v.isSubscribed() {
return
@@ -227,3 +240,40 @@ func (w *worker) reportUsage(now time.Time) {
w.reportView(v, now)
}
}
+
+func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric {
+ if !v.isSubscribed() {
+ return nil
+ }
+
+ _, ok := w.startTimes[v]
+ if !ok {
+ w.startTimes[v] = now
+ }
+
+ var startTime time.Time
+ if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 ||
+ v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 {
+ startTime = time.Time{}
+ } else {
+ startTime = w.startTimes[v]
+ }
+
+ return viewToMetric(v, now, startTime)
+}
+
+// Read reads all view data and returns them as metrics.
+// It is typically invoked by metric reader to export stats in metric format.
+func (w *worker) Read() []*metricdata.Metric {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ now := time.Now()
+ metrics := make([]*metricdata.Metric, 0, len(w.views))
+ for _, v := range w.views {
+ metric := w.toMetric(v, now)
+ if metric != nil {
+ metrics = append(metrics, metric)
+ }
+ }
+ return metrics
+}
diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go
index b38f26f4241..ba6203a50b7 100644
--- a/vendor/go.opencensus.io/stats/view/worker_commands.go
+++ b/vendor/go.opencensus.io/stats/view/worker_commands.go
@@ -21,8 +21,6 @@ import (
"strings"
"time"
- "go.opencensus.io/exemplar"
-
"go.opencensus.io/stats"
"go.opencensus.io/stats/internal"
"go.opencensus.io/tag"
@@ -58,6 +56,12 @@ type registerViewReq struct {
}
func (cmd *registerViewReq) handleCommand(w *worker) {
+ for _, v := range cmd.views {
+ if err := v.canonicalize(); err != nil {
+ cmd.err <- err
+ return
+ }
+ }
var errstr []string
for _, view := range cmd.views {
vi, err := w.tryRegisterView(view)
@@ -99,7 +103,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
// The collected data can be cleared.
vi.clearRows()
}
- delete(w.views, name)
+ w.unregisterView(name)
}
cmd.done <- struct{}{}
}
@@ -144,7 +148,7 @@ func (cmd *retrieveDataReq) handleCommand(w *worker) {
type recordReq struct {
tm *tag.Map
ms []stats.Measurement
- attachments map[string]string
+ attachments map[string]interface{}
t time.Time
}
@@ -155,12 +159,7 @@ func (cmd *recordReq) handleCommand(w *worker) {
}
ref := w.getMeasureRef(m.Measure().Name())
for v := range ref.views {
- e := &exemplar.Exemplar{
- Value: m.Value(),
- Timestamp: cmd.t,
- Attachments: cmd.attachments,
- }
- v.addSample(cmd.tm, e)
+ v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now())
}
}
}
diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go
index dcc13f49878..b27d1b26b13 100644
--- a/vendor/go.opencensus.io/tag/context.go
+++ b/vendor/go.opencensus.io/tag/context.go
@@ -17,8 +17,6 @@ package tag
import (
"context"
-
- "go.opencensus.io/exemplar"
)
// FromContext returns the tag map stored in the context.
@@ -43,25 +41,3 @@ func NewContext(ctx context.Context, m *Map) context.Context {
type ctxKey struct{}
var mapCtxKey = ctxKey{}
-
-func init() {
- exemplar.RegisterAttachmentExtractor(extractTagsAttachments)
-}
-
-func extractTagsAttachments(ctx context.Context, a exemplar.Attachments) exemplar.Attachments {
- m := FromContext(ctx)
- if m == nil {
- return a
- }
- if len(m.m) == 0 {
- return a
- }
- if a == nil {
- a = make(map[string]string)
- }
-
- for k, v := range m.m {
- a[exemplar.KeyPrefixTag+k.Name()] = v
- }
- return a
-}
diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go
index 3e998950c3e..e88e727775b 100644
--- a/vendor/go.opencensus.io/tag/map_codec.go
+++ b/vendor/go.opencensus.io/tag/map_codec.go
@@ -162,6 +162,9 @@ func (eg *encoderGRPC) bytes() []byte {
// Encode encodes the tag map into a []byte. It is useful to propagate
// the tag maps on wire in binary format.
func Encode(m *Map) []byte {
+ if m == nil {
+ return nil
+ }
eg := &encoderGRPC{
buf: make([]byte, len(m.m)),
}
diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go
index 01f0f908319..0c54492a2b1 100644
--- a/vendor/go.opencensus.io/trace/basetypes.go
+++ b/vendor/go.opencensus.io/trace/basetypes.go
@@ -59,6 +59,11 @@ func Int64Attribute(key string, value int64) Attribute {
return Attribute{key: key, value: value}
}
+// Float64Attribute returns a float64-valued attribute.
+func Float64Attribute(key string, value float64) Attribute {
+ return Attribute{key: key, value: value}
+}
+
// StringAttribute returns a string-valued attribute.
func StringAttribute(key string, value string) Attribute {
return Attribute{key: key, value: value}
@@ -71,8 +76,8 @@ type LinkType int32
// LinkType values.
const (
LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown.
- LinkTypeChild // The current span is a child of the linked span.
- LinkTypeParent // The current span is the parent of the linked span.
+ LinkTypeChild // The linked span is a child of the current span.
+ LinkTypeParent // The linked span is the parent of the current span.
)
// Link represents a reference from one span to another span.
diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go
index 0816892ea1c..775f8274faa 100644
--- a/vendor/go.opencensus.io/trace/config.go
+++ b/vendor/go.opencensus.io/trace/config.go
@@ -27,10 +27,36 @@ type Config struct {
// IDGenerator is for internal use only.
IDGenerator internal.IDGenerator
+
+ // MaxAnnotationEventsPerSpan is max number of annotation events per span
+ MaxAnnotationEventsPerSpan int
+
+ // MaxMessageEventsPerSpan is max number of message events per span
+ MaxMessageEventsPerSpan int
+
+ // MaxAnnotationEventsPerSpan is max number of attributes per span
+ MaxAttributesPerSpan int
+
+ // MaxLinksPerSpan is max number of links per span
+ MaxLinksPerSpan int
}
var configWriteMu sync.Mutex
+const (
+ // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span
+ DefaultMaxAnnotationEventsPerSpan = 32
+
+ // DefaultMaxMessageEventsPerSpan is default max number of message events per span
+ DefaultMaxMessageEventsPerSpan = 128
+
+ // DefaultMaxAttributesPerSpan is default max number of attributes per span
+ DefaultMaxAttributesPerSpan = 32
+
+ // DefaultMaxLinksPerSpan is default max number of links per span
+ DefaultMaxLinksPerSpan = 32
+)
+
// ApplyConfig applies changes to the global tracing configuration.
//
// Fields not provided in the given config are going to be preserved.
@@ -44,5 +70,17 @@ func ApplyConfig(cfg Config) {
if cfg.IDGenerator != nil {
c.IDGenerator = cfg.IDGenerator
}
+ if cfg.MaxAnnotationEventsPerSpan > 0 {
+ c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan
+ }
+ if cfg.MaxMessageEventsPerSpan > 0 {
+ c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan
+ }
+ if cfg.MaxAttributesPerSpan > 0 {
+ c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan
+ }
+ if cfg.MaxLinksPerSpan > 0 {
+ c.MaxLinksPerSpan = cfg.MaxLinksPerSpan
+ }
config.Store(&c)
}
diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go
new file mode 100644
index 00000000000..ffc264f23d2
--- /dev/null
+++ b/vendor/go.opencensus.io/trace/evictedqueue.go
@@ -0,0 +1,38 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+type evictedQueue struct {
+ queue []interface{}
+ capacity int
+ droppedCount int
+}
+
+func newEvictedQueue(capacity int) *evictedQueue {
+ eq := &evictedQueue{
+ capacity: capacity,
+ queue: make([]interface{}, 0),
+ }
+
+ return eq
+}
+
+func (eq *evictedQueue) add(value interface{}) {
+ if len(eq.queue) == eq.capacity {
+ eq.queue = eq.queue[1:]
+ eq.droppedCount++
+ }
+ eq.queue = append(eq.queue, value)
+}
diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go
index 77a8c73575f..e0d9a4b99e9 100644
--- a/vendor/go.opencensus.io/trace/export.go
+++ b/vendor/go.opencensus.io/trace/export.go
@@ -85,6 +85,13 @@ type SpanData struct {
Annotations []Annotation
MessageEvents []MessageEvent
Status
- Links []Link
- HasRemoteParent bool
+ Links []Link
+ HasRemoteParent bool
+ DroppedAttributeCount int
+ DroppedAnnotationCount int
+ DroppedMessageEventCount int
+ DroppedLinkCount int
+
+ // ChildSpanCount holds the number of child span created for this span.
+ ChildSpanCount int
}
diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go
index 1c8b9b34b2a..7e808d8f30e 100644
--- a/vendor/go.opencensus.io/trace/internal/internal.go
+++ b/vendor/go.opencensus.io/trace/internal/internal.go
@@ -15,6 +15,7 @@
// Package internal provides trace internals.
package internal
+// IDGenerator allows custom generators for TraceId and SpanId.
type IDGenerator interface {
NewTraceID() [16]byte
NewSpanID() [8]byte
diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go
new file mode 100644
index 00000000000..3f80a336813
--- /dev/null
+++ b/vendor/go.opencensus.io/trace/lrumap.go
@@ -0,0 +1,37 @@
+// Copyright 2019, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+type lruMap struct {
+ simpleLruMap *simplelru.LRU
+ droppedCount int
+}
+
+func newLruMap(size int) *lruMap {
+ lm := &lruMap{}
+ lm.simpleLruMap, _ = simplelru.NewLRU(size, nil)
+ return lm
+}
+
+func (lm *lruMap) add(key, value interface{}) {
+ evicted := lm.simpleLruMap.Add(key, value)
+ if evicted {
+ lm.droppedCount++
+ }
+}
diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go
index 9e5e5f03313..38ead7bf0ad 100644
--- a/vendor/go.opencensus.io/trace/trace.go
+++ b/vendor/go.opencensus.io/trace/trace.go
@@ -42,6 +42,20 @@ type Span struct {
data *SpanData
mu sync.Mutex // protects the contents of *data (but not the pointer value.)
spanContext SpanContext
+
+ // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry
+ // is removed to create room for a new entry.
+ lruAttributes *lruMap
+
+ // annotations are stored in FIFO queue capped by configured limit.
+ annotations *evictedQueue
+
+ // messageEvents are stored in FIFO queue capped by configured limit.
+ messageEvents *evictedQueue
+
+ // links are stored in FIFO queue capped by configured limit.
+ links *evictedQueue
+
// spanStore is the spanStore this span belongs to, if any, otherwise it is nil.
*spanStore
endOnce sync.Once
@@ -156,6 +170,7 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont
var opts StartOptions
var parent SpanContext
if p := FromContext(ctx); p != nil {
+ p.addChild()
parent = p.spanContext
}
for _, op := range o {
@@ -226,6 +241,11 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa
Name: name,
HasRemoteParent: remoteParent,
}
+ span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan)
+ span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan)
+ span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan)
+ span.links = newEvictedQueue(cfg.MaxLinksPerSpan)
+
if hasParent {
span.data.ParentSpanID = parent.SpanID
}
@@ -276,11 +296,21 @@ func (s *Span) makeSpanData() *SpanData {
var sd SpanData
s.mu.Lock()
sd = *s.data
- if s.data.Attributes != nil {
- sd.Attributes = make(map[string]interface{})
- for k, v := range s.data.Attributes {
- sd.Attributes[k] = v
- }
+ if s.lruAttributes.simpleLruMap.Len() > 0 {
+ sd.Attributes = s.lruAttributesToAttributeMap()
+ sd.DroppedAttributeCount = s.lruAttributes.droppedCount
+ }
+ if len(s.annotations.queue) > 0 {
+ sd.Annotations = s.interfaceArrayToAnnotationArray()
+ sd.DroppedAnnotationCount = s.annotations.droppedCount
+ }
+ if len(s.messageEvents.queue) > 0 {
+ sd.MessageEvents = s.interfaceArrayToMessageEventArray()
+ sd.DroppedMessageEventCount = s.messageEvents.droppedCount
+ }
+ if len(s.links.queue) > 0 {
+ sd.Links = s.interfaceArrayToLinksArray()
+ sd.DroppedLinkCount = s.links.droppedCount
}
s.mu.Unlock()
return &sd
@@ -314,6 +344,57 @@ func (s *Span) SetStatus(status Status) {
s.mu.Unlock()
}
+func (s *Span) interfaceArrayToLinksArray() []Link {
+ linksArr := make([]Link, 0)
+ for _, value := range s.links.queue {
+ linksArr = append(linksArr, value.(Link))
+ }
+ return linksArr
+}
+
+func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent {
+ messageEventArr := make([]MessageEvent, 0)
+ for _, value := range s.messageEvents.queue {
+ messageEventArr = append(messageEventArr, value.(MessageEvent))
+ }
+ return messageEventArr
+}
+
+func (s *Span) interfaceArrayToAnnotationArray() []Annotation {
+ annotationArr := make([]Annotation, 0)
+ for _, value := range s.annotations.queue {
+ annotationArr = append(annotationArr, value.(Annotation))
+ }
+ return annotationArr
+}
+
+func (s *Span) lruAttributesToAttributeMap() map[string]interface{} {
+ attributes := make(map[string]interface{})
+ for _, key := range s.lruAttributes.simpleLruMap.Keys() {
+ value, ok := s.lruAttributes.simpleLruMap.Get(key)
+ if ok {
+ keyStr := key.(string)
+ attributes[keyStr] = value
+ }
+ }
+ return attributes
+}
+
+func (s *Span) copyToCappedAttributes(attributes []Attribute) {
+ for _, a := range attributes {
+ s.lruAttributes.add(a.key, a.value)
+ }
+}
+
+func (s *Span) addChild() {
+ if !s.IsRecordingEvents() {
+ return
+ }
+ s.mu.Lock()
+ s.data.ChildSpanCount++
+ s.mu.Unlock()
+}
+
// AddAttributes sets attributes in the span.
//
// Existing attributes whose keys appear in the attributes parameter are overwritten.
@@ -322,10 +403,7 @@ func (s *Span) AddAttributes(attributes ...Attribute) {
return
}
s.mu.Lock()
- if s.data.Attributes == nil {
- s.data.Attributes = make(map[string]interface{})
- }
- copyAttributes(s.data.Attributes, attributes)
+ s.copyToCappedAttributes(attributes)
s.mu.Unlock()
}
@@ -345,7 +423,7 @@ func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...in
m = make(map[string]interface{})
copyAttributes(m, attributes)
}
- s.data.Annotations = append(s.data.Annotations, Annotation{
+ s.annotations.add(Annotation{
Time: now,
Message: msg,
Attributes: m,
@@ -361,7 +439,7 @@ func (s *Span) printStringInternal(attributes []Attribute, str string) {
a = make(map[string]interface{})
copyAttributes(a, attributes)
}
- s.data.Annotations = append(s.data.Annotations, Annotation{
+ s.annotations.add(Annotation{
Time: now,
Message: str,
Attributes: a,
@@ -398,7 +476,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy
}
now := time.Now()
s.mu.Lock()
- s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{
+ s.messageEvents.add(MessageEvent{
Time: now,
EventType: MessageEventTypeSent,
MessageID: messageID,
@@ -420,7 +498,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse
}
now := time.Now()
s.mu.Lock()
- s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{
+ s.messageEvents.add(MessageEvent{
Time: now,
EventType: MessageEventTypeRecv,
MessageID: messageID,
@@ -436,7 +514,7 @@ func (s *Span) AddLink(l Link) {
return
}
s.mu.Lock()
- s.data.Links = append(s.data.Links, l)
+ s.links.add(l)
s.mu.Unlock()
}
@@ -468,8 +546,12 @@ func init() {
gen.spanIDInc |= 1
config.Store(&Config{
- DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
- IDGenerator: gen,
+ DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
+ IDGenerator: gen,
+ MaxAttributesPerSpan: DefaultMaxAttributesPerSpan,
+ MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan,
+ MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan,
+ MaxLinksPerSpan: DefaultMaxLinksPerSpan,
})
}
diff --git a/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go b/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go
new file mode 100644
index 00000000000..431bdde6787
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/container/v1/cluster_service.pb.go
@@ -0,0 +1,7176 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/container/v1/cluster_service.proto
+
+package container
+
+import (
+ context "context"
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ empty "github.com/golang/protobuf/ptypes/empty"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// Allowed Network Policy providers.
+type NetworkPolicy_Provider int32
+
+const (
+ // Not set
+ NetworkPolicy_PROVIDER_UNSPECIFIED NetworkPolicy_Provider = 0
+ // Tigera (Calico Felix).
+ NetworkPolicy_CALICO NetworkPolicy_Provider = 1
+)
+
+var NetworkPolicy_Provider_name = map[int32]string{
+ 0: "PROVIDER_UNSPECIFIED",
+ 1: "CALICO",
+}
+
+var NetworkPolicy_Provider_value = map[string]int32{
+ "PROVIDER_UNSPECIFIED": 0,
+ "CALICO": 1,
+}
+
+func (x NetworkPolicy_Provider) String() string {
+ return proto.EnumName(NetworkPolicy_Provider_name, int32(x))
+}
+
+func (NetworkPolicy_Provider) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{11, 0}
+}
+
+// The current status of the cluster.
+type Cluster_Status int32
+
+const (
+ // Not set.
+ Cluster_STATUS_UNSPECIFIED Cluster_Status = 0
+ // The PROVISIONING state indicates the cluster is being created.
+ Cluster_PROVISIONING Cluster_Status = 1
+ // The RUNNING state indicates the cluster has been created and is fully
+ // usable.
+ Cluster_RUNNING Cluster_Status = 2
+ // The RECONCILING state indicates that some work is actively being done on
+ // the cluster, such as upgrading the master or node software. Details can
+ // be found in the `statusMessage` field.
+ Cluster_RECONCILING Cluster_Status = 3
+ // The STOPPING state indicates the cluster is being deleted.
+ Cluster_STOPPING Cluster_Status = 4
+ // The ERROR state indicates the cluster may be unusable. Details
+ // can be found in the `statusMessage` field.
+ Cluster_ERROR Cluster_Status = 5
+ // The DEGRADED state indicates the cluster requires user action to restore
+ // full functionality. Details can be found in the `statusMessage` field.
+ Cluster_DEGRADED Cluster_Status = 6
+)
+
+var Cluster_Status_name = map[int32]string{
+ 0: "STATUS_UNSPECIFIED",
+ 1: "PROVISIONING",
+ 2: "RUNNING",
+ 3: "RECONCILING",
+ 4: "STOPPING",
+ 5: "ERROR",
+ 6: "DEGRADED",
+}
+
+var Cluster_Status_value = map[string]int32{
+ "STATUS_UNSPECIFIED": 0,
+ "PROVISIONING": 1,
+ "RUNNING": 2,
+ "RECONCILING": 3,
+ "STOPPING": 4,
+ "ERROR": 5,
+ "DEGRADED": 6,
+}
+
+func (x Cluster_Status) String() string {
+ return proto.EnumName(Cluster_Status_name, int32(x))
+}
+
+func (Cluster_Status) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{13, 0}
+}
+
+// Current status of the operation.
+type Operation_Status int32
+
+const (
+ // Not set.
+ Operation_STATUS_UNSPECIFIED Operation_Status = 0
+ // The operation has been created.
+ Operation_PENDING Operation_Status = 1
+ // The operation is currently running.
+ Operation_RUNNING Operation_Status = 2
+ // The operation is done, either cancelled or completed.
+ Operation_DONE Operation_Status = 3
+ // The operation is aborting.
+ Operation_ABORTING Operation_Status = 4
+)
+
+var Operation_Status_name = map[int32]string{
+ 0: "STATUS_UNSPECIFIED",
+ 1: "PENDING",
+ 2: "RUNNING",
+ 3: "DONE",
+ 4: "ABORTING",
+}
+
+var Operation_Status_value = map[string]int32{
+ "STATUS_UNSPECIFIED": 0,
+ "PENDING": 1,
+ "RUNNING": 2,
+ "DONE": 3,
+ "ABORTING": 4,
+}
+
+func (x Operation_Status) String() string {
+ return proto.EnumName(Operation_Status_name, int32(x))
+}
+
+func (Operation_Status) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{15, 0}
+}
+
+// Operation type.
+type Operation_Type int32
+
+const (
+ // Not set.
+ Operation_TYPE_UNSPECIFIED Operation_Type = 0
+ // Cluster create.
+ Operation_CREATE_CLUSTER Operation_Type = 1
+ // Cluster delete.
+ Operation_DELETE_CLUSTER Operation_Type = 2
+ // A master upgrade.
+ Operation_UPGRADE_MASTER Operation_Type = 3
+ // A node upgrade.
+ Operation_UPGRADE_NODES Operation_Type = 4
+ // Cluster repair.
+ Operation_REPAIR_CLUSTER Operation_Type = 5
+ // Cluster update.
+ Operation_UPDATE_CLUSTER Operation_Type = 6
+ // Node pool create.
+ Operation_CREATE_NODE_POOL Operation_Type = 7
+ // Node pool delete.
+ Operation_DELETE_NODE_POOL Operation_Type = 8
+ // Set node pool management.
+ Operation_SET_NODE_POOL_MANAGEMENT Operation_Type = 9
+ // Automatic node pool repair.
+ Operation_AUTO_REPAIR_NODES Operation_Type = 10
+ // Automatic node upgrade.
+ Operation_AUTO_UPGRADE_NODES Operation_Type = 11
+ // Set labels.
+ Operation_SET_LABELS Operation_Type = 12
+ // Set/generate master auth materials
+ Operation_SET_MASTER_AUTH Operation_Type = 13
+ // Set node pool size.
+ Operation_SET_NODE_POOL_SIZE Operation_Type = 14
+ // Updates network policy for a cluster.
+ Operation_SET_NETWORK_POLICY Operation_Type = 15
+ // Set the maintenance policy.
+ Operation_SET_MAINTENANCE_POLICY Operation_Type = 16
+)
+
+var Operation_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "CREATE_CLUSTER",
+ 2: "DELETE_CLUSTER",
+ 3: "UPGRADE_MASTER",
+ 4: "UPGRADE_NODES",
+ 5: "REPAIR_CLUSTER",
+ 6: "UPDATE_CLUSTER",
+ 7: "CREATE_NODE_POOL",
+ 8: "DELETE_NODE_POOL",
+ 9: "SET_NODE_POOL_MANAGEMENT",
+ 10: "AUTO_REPAIR_NODES",
+ 11: "AUTO_UPGRADE_NODES",
+ 12: "SET_LABELS",
+ 13: "SET_MASTER_AUTH",
+ 14: "SET_NODE_POOL_SIZE",
+ 15: "SET_NETWORK_POLICY",
+ 16: "SET_MAINTENANCE_POLICY",
+}
+
+var Operation_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "CREATE_CLUSTER": 1,
+ "DELETE_CLUSTER": 2,
+ "UPGRADE_MASTER": 3,
+ "UPGRADE_NODES": 4,
+ "REPAIR_CLUSTER": 5,
+ "UPDATE_CLUSTER": 6,
+ "CREATE_NODE_POOL": 7,
+ "DELETE_NODE_POOL": 8,
+ "SET_NODE_POOL_MANAGEMENT": 9,
+ "AUTO_REPAIR_NODES": 10,
+ "AUTO_UPGRADE_NODES": 11,
+ "SET_LABELS": 12,
+ "SET_MASTER_AUTH": 13,
+ "SET_NODE_POOL_SIZE": 14,
+ "SET_NETWORK_POLICY": 15,
+ "SET_MAINTENANCE_POLICY": 16,
+}
+
+func (x Operation_Type) String() string {
+ return proto.EnumName(Operation_Type_name, int32(x))
+}
+
+func (Operation_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{15, 1}
+}
+
+// Operation type: what type update to perform.
+type SetMasterAuthRequest_Action int32
+
+const (
+ // Operation is unknown and will error out.
+ SetMasterAuthRequest_UNKNOWN SetMasterAuthRequest_Action = 0
+ // Set the password to a user generated value.
+ SetMasterAuthRequest_SET_PASSWORD SetMasterAuthRequest_Action = 1
+ // Generate a new password and set it to that.
+ SetMasterAuthRequest_GENERATE_PASSWORD SetMasterAuthRequest_Action = 2
+ // Set the username. If an empty username is provided, basic authentication
+ // is disabled for the cluster. If a non-empty username is provided, basic
+ // authentication is enabled, with either a provided password or a generated
+ // one.
+ SetMasterAuthRequest_SET_USERNAME SetMasterAuthRequest_Action = 3
+)
+
+var SetMasterAuthRequest_Action_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "SET_PASSWORD",
+ 2: "GENERATE_PASSWORD",
+ 3: "SET_USERNAME",
+}
+
+var SetMasterAuthRequest_Action_value = map[string]int32{
+ "UNKNOWN": 0,
+ "SET_PASSWORD": 1,
+ "GENERATE_PASSWORD": 2,
+ "SET_USERNAME": 3,
+}
+
+func (x SetMasterAuthRequest_Action) String() string {
+ return proto.EnumName(SetMasterAuthRequest_Action_name, int32(x))
+}
+
+func (SetMasterAuthRequest_Action) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{26, 0}
+}
+
+// The current status of the node pool instance.
+type NodePool_Status int32
+
+const (
+ // Not set.
+ NodePool_STATUS_UNSPECIFIED NodePool_Status = 0
+ // The PROVISIONING state indicates the node pool is being created.
+ NodePool_PROVISIONING NodePool_Status = 1
+ // The RUNNING state indicates the node pool has been created
+ // and is fully usable.
+ NodePool_RUNNING NodePool_Status = 2
+ // The RUNNING_WITH_ERROR state indicates the node pool has been created
+ // and is partially usable. Some error state has occurred and some
+ // functionality may be impaired. Customer may need to reissue a request
+ // or trigger a new update.
+ NodePool_RUNNING_WITH_ERROR NodePool_Status = 3
+ // The RECONCILING state indicates that some work is actively being done on
+ // the node pool, such as upgrading node software. Details can
+ // be found in the `statusMessage` field.
+ NodePool_RECONCILING NodePool_Status = 4
+ // The STOPPING state indicates the node pool is being deleted.
+ NodePool_STOPPING NodePool_Status = 5
+ // The ERROR state indicates the node pool may be unusable. Details
+ // can be found in the `statusMessage` field.
+ NodePool_ERROR NodePool_Status = 6
+)
+
+var NodePool_Status_name = map[int32]string{
+ 0: "STATUS_UNSPECIFIED",
+ 1: "PROVISIONING",
+ 2: "RUNNING",
+ 3: "RUNNING_WITH_ERROR",
+ 4: "RECONCILING",
+ 5: "STOPPING",
+ 6: "ERROR",
+}
+
+var NodePool_Status_value = map[string]int32{
+ "STATUS_UNSPECIFIED": 0,
+ "PROVISIONING": 1,
+ "RUNNING": 2,
+ "RUNNING_WITH_ERROR": 3,
+ "RECONCILING": 4,
+ "STOPPING": 5,
+ "ERROR": 6,
+}
+
+func (x NodePool_Status) String() string {
+ return proto.EnumName(NodePool_Status_name, int32(x))
+}
+
+func (NodePool_Status) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{40, 0}
+}
+
+// Parameters that describe the nodes in a cluster.
+type NodeConfig struct {
+ // The name of a Google Compute Engine [machine
+ // type](/compute/docs/machine-types) (e.g.
+ // `n1-standard-1`).
+ //
+ // If unspecified, the default machine type is
+ // `n1-standard-1`.
+ MachineType string `protobuf:"bytes,1,opt,name=machine_type,json=machineType,proto3" json:"machine_type,omitempty"`
+ // Size of the disk attached to each node, specified in GB.
+ // The smallest allowed disk size is 10GB.
+ //
+ // If unspecified, the default disk size is 100GB.
+ DiskSizeGb int32 `protobuf:"varint,2,opt,name=disk_size_gb,json=diskSizeGb,proto3" json:"disk_size_gb,omitempty"`
+ // The set of Google API scopes to be made available on all of the
+ // node VMs under the "default" service account.
+ //
+ // The following scopes are recommended, but not required, and by default are
+ // not included:
+ //
+ // * `https://www.googleapis.com/auth/compute` is required for mounting
+ // persistent storage on your nodes.
+ // * `https://www.googleapis.com/auth/devstorage.read_only` is required for
+ // communicating with **gcr.io**
+ // (the [Google Container Registry](/container-registry/)).
+ //
+ // If unspecified, no scopes are added, unless Cloud Logging or Cloud
+ // Monitoring are enabled, in which case their required scopes will be added.
+ OauthScopes []string `protobuf:"bytes,3,rep,name=oauth_scopes,json=oauthScopes,proto3" json:"oauth_scopes,omitempty"`
+ // The Google Cloud Platform Service Account to be used by the node VMs. If
+ // no Service Account is specified, the "default" service account is used.
+ ServiceAccount string `protobuf:"bytes,9,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`
+ // The metadata key/value pairs assigned to instances in the cluster.
+ //
+ // Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
+ // in length. These are reflected as part of a URL in the metadata server.
+ // Additionally, to avoid ambiguity, keys must not conflict with any other
+ // metadata keys for the project or be one of the reserved keys:
+ // "cluster-location"
+ // "cluster-name"
+ // "cluster-uid"
+ // "configure-sh"
+ // "enable-os-login"
+ // "gci-update-strategy"
+ // "gci-ensure-gke-docker"
+ // "instance-template"
+ // "kube-env"
+ // "startup-script"
+ // "user-data"
+ //
+ // Values are free-form strings, and only have meaning as interpreted by
+ // the image running in the instance. The only restriction placed on them is
+ // that each value's size must be less than or equal to 32 KB.
+ //
+ // The total size of all keys and values must be less than 512 KB.
+ Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The image type to use for this node. Note that for a given image type,
+ // the latest version of it will be used.
+ ImageType string `protobuf:"bytes,5,opt,name=image_type,json=imageType,proto3" json:"image_type,omitempty"`
+ // The map of Kubernetes labels (key/value pairs) to be applied to each node.
+ // These will added in addition to any default label(s) that
+ // Kubernetes may apply to the node.
+ // In case of conflict in label keys, the applied set may differ depending on
+ // the Kubernetes version -- it's best to assume the behavior is undefined
+ // and conflicts should be avoided.
+ // For more information, including usage and the valid values, see:
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The number of local SSD disks to be attached to the node.
+ //
+ // The limit for this value is dependant upon the maximum number of
+ // disks available on a machine per zone. See:
+ // https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits
+ // for more information.
+ LocalSsdCount int32 `protobuf:"varint,7,opt,name=local_ssd_count,json=localSsdCount,proto3" json:"local_ssd_count,omitempty"`
+ // The list of instance tags applied to all nodes. Tags are used to identify
+ // valid sources or targets for network firewalls and are specified by
+ // the client during cluster or node pool creation. Each tag within the list
+ // must comply with RFC1035.
+ Tags []string `protobuf:"bytes,8,rep,name=tags,proto3" json:"tags,omitempty"`
+ // Whether the nodes are created as preemptible VM instances. See:
+ // https://cloud.google.com/compute/docs/instances/preemptible for more
+ // information about preemptible VM instances.
+ Preemptible bool `protobuf:"varint,10,opt,name=preemptible,proto3" json:"preemptible,omitempty"`
+ // A list of hardware accelerators to be attached to each node.
+ // See https://cloud.google.com/compute/docs/gpus for more information about
+ // support for GPUs.
+ Accelerators []*AcceleratorConfig `protobuf:"bytes,11,rep,name=accelerators,proto3" json:"accelerators,omitempty"`
+ // Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd')
+ //
+ // If unspecified, the default disk type is 'pd-standard'
+ DiskType string `protobuf:"bytes,12,opt,name=disk_type,json=diskType,proto3" json:"disk_type,omitempty"`
+ // Minimum CPU platform to be used by this instance. The instance may be
+ // scheduled on the specified or newer CPU platform. Applicable values are the
+ // friendly names of CPU platforms, such as
+ // minCpuPlatform: "Intel Haswell" or
+ // minCpuPlatform: "Intel Sandy Bridge". For more
+ // information, read [how to specify min CPU
+ // platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ MinCpuPlatform string `protobuf:"bytes,13,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NodeConfig) Reset() { *m = NodeConfig{} }
+func (m *NodeConfig) String() string { return proto.CompactTextString(m) }
+func (*NodeConfig) ProtoMessage() {}
+func (*NodeConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{0}
+}
+
+func (m *NodeConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NodeConfig.Unmarshal(m, b)
+}
+func (m *NodeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NodeConfig.Marshal(b, m, deterministic)
+}
+func (m *NodeConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NodeConfig.Merge(m, src)
+}
+func (m *NodeConfig) XXX_Size() int {
+ return xxx_messageInfo_NodeConfig.Size(m)
+}
+func (m *NodeConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_NodeConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeConfig proto.InternalMessageInfo
+
+func (m *NodeConfig) GetMachineType() string {
+ if m != nil {
+ return m.MachineType
+ }
+ return ""
+}
+
+func (m *NodeConfig) GetDiskSizeGb() int32 {
+ if m != nil {
+ return m.DiskSizeGb
+ }
+ return 0
+}
+
+func (m *NodeConfig) GetOauthScopes() []string {
+ if m != nil {
+ return m.OauthScopes
+ }
+ return nil
+}
+
+func (m *NodeConfig) GetServiceAccount() string {
+ if m != nil {
+ return m.ServiceAccount
+ }
+ return ""
+}
+
+func (m *NodeConfig) GetMetadata() map[string]string {
+ if m != nil {
+ return m.Metadata
+ }
+ return nil
+}
+
+func (m *NodeConfig) GetImageType() string {
+ if m != nil {
+ return m.ImageType
+ }
+ return ""
+}
+
+func (m *NodeConfig) GetLabels() map[string]string {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *NodeConfig) GetLocalSsdCount() int32 {
+ if m != nil {
+ return m.LocalSsdCount
+ }
+ return 0
+}
+
+func (m *NodeConfig) GetTags() []string {
+ if m != nil {
+ return m.Tags
+ }
+ return nil
+}
+
+func (m *NodeConfig) GetPreemptible() bool {
+ if m != nil {
+ return m.Preemptible
+ }
+ return false
+}
+
+func (m *NodeConfig) GetAccelerators() []*AcceleratorConfig {
+ if m != nil {
+ return m.Accelerators
+ }
+ return nil
+}
+
+func (m *NodeConfig) GetDiskType() string {
+ if m != nil {
+ return m.DiskType
+ }
+ return ""
+}
+
+func (m *NodeConfig) GetMinCpuPlatform() string {
+ if m != nil {
+ return m.MinCpuPlatform
+ }
+ return ""
+}
+
+// The authentication information for accessing the master endpoint.
+// Authentication can be done using HTTP basic auth or using client
+// certificates.
+type MasterAuth struct {
+ // The username to use for HTTP basic authentication to the master endpoint.
+ // For clusters v1.6.0 and later, you can disable basic authentication by
+ // providing an empty username.
+ Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
+ // The password to use for HTTP basic authentication to the master endpoint.
+ // Because the master endpoint is open to the Internet, you should create a
+ // strong password. If a password is provided for cluster creation, username
+ // must be non-empty.
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ // Configuration for client certificate authentication on the cluster. For
+ // clusters before v1.12, if no configuration is specified, a client
+ // certificate is issued.
+ ClientCertificateConfig *ClientCertificateConfig `protobuf:"bytes,3,opt,name=client_certificate_config,json=clientCertificateConfig,proto3" json:"client_certificate_config,omitempty"`
+ // [Output only] Base64-encoded public certificate that is the root of
+ // trust for the cluster.
+ ClusterCaCertificate string `protobuf:"bytes,100,opt,name=cluster_ca_certificate,json=clusterCaCertificate,proto3" json:"cluster_ca_certificate,omitempty"`
+ // [Output only] Base64-encoded public certificate used by clients to
+ // authenticate to the cluster endpoint.
+ ClientCertificate string `protobuf:"bytes,101,opt,name=client_certificate,json=clientCertificate,proto3" json:"client_certificate,omitempty"`
+ // [Output only] Base64-encoded private key used by clients to authenticate
+ // to the cluster endpoint.
+ ClientKey string `protobuf:"bytes,102,opt,name=client_key,json=clientKey,proto3" json:"client_key,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MasterAuth) Reset() { *m = MasterAuth{} }
+func (m *MasterAuth) String() string { return proto.CompactTextString(m) }
+func (*MasterAuth) ProtoMessage() {}
+func (*MasterAuth) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{1}
+}
+
+func (m *MasterAuth) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MasterAuth.Unmarshal(m, b)
+}
+func (m *MasterAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MasterAuth.Marshal(b, m, deterministic)
+}
+func (m *MasterAuth) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MasterAuth.Merge(m, src)
+}
+func (m *MasterAuth) XXX_Size() int {
+ return xxx_messageInfo_MasterAuth.Size(m)
+}
+func (m *MasterAuth) XXX_DiscardUnknown() {
+ xxx_messageInfo_MasterAuth.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MasterAuth proto.InternalMessageInfo
+
+func (m *MasterAuth) GetUsername() string {
+ if m != nil {
+ return m.Username
+ }
+ return ""
+}
+
+func (m *MasterAuth) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+func (m *MasterAuth) GetClientCertificateConfig() *ClientCertificateConfig {
+ if m != nil {
+ return m.ClientCertificateConfig
+ }
+ return nil
+}
+
+func (m *MasterAuth) GetClusterCaCertificate() string {
+ if m != nil {
+ return m.ClusterCaCertificate
+ }
+ return ""
+}
+
+func (m *MasterAuth) GetClientCertificate() string {
+ if m != nil {
+ return m.ClientCertificate
+ }
+ return ""
+}
+
+func (m *MasterAuth) GetClientKey() string {
+ if m != nil {
+ return m.ClientKey
+ }
+ return ""
+}
+
+// Configuration for client certificates on the cluster.
+type ClientCertificateConfig struct {
+ // Issue a client certificate.
+ IssueClientCertificate bool `protobuf:"varint,1,opt,name=issue_client_certificate,json=issueClientCertificate,proto3" json:"issue_client_certificate,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ClientCertificateConfig) Reset() { *m = ClientCertificateConfig{} }
+func (m *ClientCertificateConfig) String() string { return proto.CompactTextString(m) }
+func (*ClientCertificateConfig) ProtoMessage() {}
+func (*ClientCertificateConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{2}
+}
+
+func (m *ClientCertificateConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ClientCertificateConfig.Unmarshal(m, b)
+}
+func (m *ClientCertificateConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ClientCertificateConfig.Marshal(b, m, deterministic)
+}
+func (m *ClientCertificateConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientCertificateConfig.Merge(m, src)
+}
+func (m *ClientCertificateConfig) XXX_Size() int {
+ return xxx_messageInfo_ClientCertificateConfig.Size(m)
+}
+func (m *ClientCertificateConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientCertificateConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientCertificateConfig proto.InternalMessageInfo
+
+func (m *ClientCertificateConfig) GetIssueClientCertificate() bool {
+ if m != nil {
+ return m.IssueClientCertificate
+ }
+ return false
+}
+
+// Configuration for the addons that can be automatically spun up in the
+// cluster, enabling additional functionality.
+type AddonsConfig struct {
+ // Configuration for the HTTP (L7) load balancing controller addon, which
+ // makes it easy to set up HTTP load balancers for services in a cluster.
+ HttpLoadBalancing *HttpLoadBalancing `protobuf:"bytes,1,opt,name=http_load_balancing,json=httpLoadBalancing,proto3" json:"http_load_balancing,omitempty"`
+ // Configuration for the horizontal pod autoscaling feature, which
+ // increases or decreases the number of replica pods a replication controller
+ // has based on the resource usage of the existing pods.
+ HorizontalPodAutoscaling *HorizontalPodAutoscaling `protobuf:"bytes,2,opt,name=horizontal_pod_autoscaling,json=horizontalPodAutoscaling,proto3" json:"horizontal_pod_autoscaling,omitempty"`
+ // Configuration for the Kubernetes Dashboard.
+ KubernetesDashboard *KubernetesDashboard `protobuf:"bytes,3,opt,name=kubernetes_dashboard,json=kubernetesDashboard,proto3" json:"kubernetes_dashboard,omitempty"`
+ // Configuration for NetworkPolicy. This only tracks whether the addon
+ // is enabled or not on the Master, it does not track whether network policy
+ // is enabled for the nodes.
+ NetworkPolicyConfig *NetworkPolicyConfig `protobuf:"bytes,4,opt,name=network_policy_config,json=networkPolicyConfig,proto3" json:"network_policy_config,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AddonsConfig) Reset() { *m = AddonsConfig{} }
+func (m *AddonsConfig) String() string { return proto.CompactTextString(m) }
+func (*AddonsConfig) ProtoMessage() {}
+func (*AddonsConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{3}
+}
+
+func (m *AddonsConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AddonsConfig.Unmarshal(m, b)
+}
+func (m *AddonsConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AddonsConfig.Marshal(b, m, deterministic)
+}
+func (m *AddonsConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AddonsConfig.Merge(m, src)
+}
+func (m *AddonsConfig) XXX_Size() int {
+ return xxx_messageInfo_AddonsConfig.Size(m)
+}
+func (m *AddonsConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_AddonsConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AddonsConfig proto.InternalMessageInfo
+
+func (m *AddonsConfig) GetHttpLoadBalancing() *HttpLoadBalancing {
+ if m != nil {
+ return m.HttpLoadBalancing
+ }
+ return nil
+}
+
+func (m *AddonsConfig) GetHorizontalPodAutoscaling() *HorizontalPodAutoscaling {
+ if m != nil {
+ return m.HorizontalPodAutoscaling
+ }
+ return nil
+}
+
+func (m *AddonsConfig) GetKubernetesDashboard() *KubernetesDashboard {
+ if m != nil {
+ return m.KubernetesDashboard
+ }
+ return nil
+}
+
+func (m *AddonsConfig) GetNetworkPolicyConfig() *NetworkPolicyConfig {
+ if m != nil {
+ return m.NetworkPolicyConfig
+ }
+ return nil
+}
+
+// Configuration options for the HTTP (L7) load balancing controller addon,
+// which makes it easy to set up HTTP load balancers for services in a cluster.
+type HttpLoadBalancing struct {
+ // Whether the HTTP Load Balancing controller is enabled in the cluster.
+ // When enabled, it runs a small pod in the cluster that manages the load
+ // balancers.
+ Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *HttpLoadBalancing) Reset() { *m = HttpLoadBalancing{} }
+func (m *HttpLoadBalancing) String() string { return proto.CompactTextString(m) }
+func (*HttpLoadBalancing) ProtoMessage() {}
+func (*HttpLoadBalancing) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{4}
+}
+
+func (m *HttpLoadBalancing) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_HttpLoadBalancing.Unmarshal(m, b)
+}
+func (m *HttpLoadBalancing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_HttpLoadBalancing.Marshal(b, m, deterministic)
+}
+func (m *HttpLoadBalancing) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HttpLoadBalancing.Merge(m, src)
+}
+func (m *HttpLoadBalancing) XXX_Size() int {
+ return xxx_messageInfo_HttpLoadBalancing.Size(m)
+}
+func (m *HttpLoadBalancing) XXX_DiscardUnknown() {
+ xxx_messageInfo_HttpLoadBalancing.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HttpLoadBalancing proto.InternalMessageInfo
+
+func (m *HttpLoadBalancing) GetDisabled() bool {
+ if m != nil {
+ return m.Disabled
+ }
+ return false
+}
+
+// Configuration options for the horizontal pod autoscaling feature, which
+// increases or decreases the number of replica pods a replication controller
+// has based on the resource usage of the existing pods.
+type HorizontalPodAutoscaling struct {
+ // Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.
+ // When enabled, it ensures that a Heapster pod is running in the cluster,
+ // which is also used by the Cloud Monitoring service.
+ Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *HorizontalPodAutoscaling) Reset() { *m = HorizontalPodAutoscaling{} }
+func (m *HorizontalPodAutoscaling) String() string { return proto.CompactTextString(m) }
+func (*HorizontalPodAutoscaling) ProtoMessage() {}
+func (*HorizontalPodAutoscaling) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{5}
+}
+
+func (m *HorizontalPodAutoscaling) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_HorizontalPodAutoscaling.Unmarshal(m, b)
+}
+func (m *HorizontalPodAutoscaling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_HorizontalPodAutoscaling.Marshal(b, m, deterministic)
+}
+func (m *HorizontalPodAutoscaling) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HorizontalPodAutoscaling.Merge(m, src)
+}
+func (m *HorizontalPodAutoscaling) XXX_Size() int {
+ return xxx_messageInfo_HorizontalPodAutoscaling.Size(m)
+}
+func (m *HorizontalPodAutoscaling) XXX_DiscardUnknown() {
+ xxx_messageInfo_HorizontalPodAutoscaling.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HorizontalPodAutoscaling proto.InternalMessageInfo
+
+func (m *HorizontalPodAutoscaling) GetDisabled() bool {
+ if m != nil {
+ return m.Disabled
+ }
+ return false
+}
+
+// Configuration for the Kubernetes Dashboard.
+type KubernetesDashboard struct {
+ // Whether the Kubernetes Dashboard is enabled for this cluster.
+ Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *KubernetesDashboard) Reset() { *m = KubernetesDashboard{} }
+func (m *KubernetesDashboard) String() string { return proto.CompactTextString(m) }
+func (*KubernetesDashboard) ProtoMessage() {}
+func (*KubernetesDashboard) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{6}
+}
+
+func (m *KubernetesDashboard) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_KubernetesDashboard.Unmarshal(m, b)
+}
+func (m *KubernetesDashboard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_KubernetesDashboard.Marshal(b, m, deterministic)
+}
+func (m *KubernetesDashboard) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KubernetesDashboard.Merge(m, src)
+}
+func (m *KubernetesDashboard) XXX_Size() int {
+ return xxx_messageInfo_KubernetesDashboard.Size(m)
+}
+func (m *KubernetesDashboard) XXX_DiscardUnknown() {
+ xxx_messageInfo_KubernetesDashboard.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KubernetesDashboard proto.InternalMessageInfo
+
+func (m *KubernetesDashboard) GetDisabled() bool {
+ if m != nil {
+ return m.Disabled
+ }
+ return false
+}
+
+// Configuration for NetworkPolicy. This only tracks whether the addon
+// is enabled or not on the Master, it does not track whether network policy
+// is enabled for the nodes.
+type NetworkPolicyConfig struct {
+ // Whether NetworkPolicy is enabled for this cluster.
+ Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NetworkPolicyConfig) Reset() { *m = NetworkPolicyConfig{} }
+func (m *NetworkPolicyConfig) String() string { return proto.CompactTextString(m) }
+func (*NetworkPolicyConfig) ProtoMessage() {}
+func (*NetworkPolicyConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{7}
+}
+
+func (m *NetworkPolicyConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NetworkPolicyConfig.Unmarshal(m, b)
+}
+func (m *NetworkPolicyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NetworkPolicyConfig.Marshal(b, m, deterministic)
+}
+func (m *NetworkPolicyConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkPolicyConfig.Merge(m, src)
+}
+func (m *NetworkPolicyConfig) XXX_Size() int {
+ return xxx_messageInfo_NetworkPolicyConfig.Size(m)
+}
+func (m *NetworkPolicyConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkPolicyConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkPolicyConfig proto.InternalMessageInfo
+
+func (m *NetworkPolicyConfig) GetDisabled() bool {
+ if m != nil {
+ return m.Disabled
+ }
+ return false
+}
+
+// Configuration options for private clusters.
+type PrivateClusterConfig struct {
+ // Whether nodes have internal IP addresses only. If enabled, all nodes are
+ // given only RFC 1918 private addresses and communicate with the master via
+ // private networking.
+ EnablePrivateNodes bool `protobuf:"varint,1,opt,name=enable_private_nodes,json=enablePrivateNodes,proto3" json:"enable_private_nodes,omitempty"`
+ // Whether the master's internal IP address is used as the cluster endpoint.
+ EnablePrivateEndpoint bool `protobuf:"varint,2,opt,name=enable_private_endpoint,json=enablePrivateEndpoint,proto3" json:"enable_private_endpoint,omitempty"`
+ // The IP range in CIDR notation to use for the hosted master network. This
+ // range will be used for assigning internal IP addresses to the master or
+ // set of masters, as well as the ILB VIP. This range must not overlap with
+ // any other ranges in use within the cluster's network.
+ MasterIpv4CidrBlock string `protobuf:"bytes,3,opt,name=master_ipv4_cidr_block,json=masterIpv4CidrBlock,proto3" json:"master_ipv4_cidr_block,omitempty"`
+ // Output only. The internal IP address of this cluster's master endpoint.
+ PrivateEndpoint string `protobuf:"bytes,4,opt,name=private_endpoint,json=privateEndpoint,proto3" json:"private_endpoint,omitempty"`
+ // Output only. The external IP address of this cluster's master endpoint.
+ PublicEndpoint string `protobuf:"bytes,5,opt,name=public_endpoint,json=publicEndpoint,proto3" json:"public_endpoint,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PrivateClusterConfig) Reset() { *m = PrivateClusterConfig{} }
+func (m *PrivateClusterConfig) String() string { return proto.CompactTextString(m) }
+func (*PrivateClusterConfig) ProtoMessage() {}
+func (*PrivateClusterConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{8}
+}
+
+func (m *PrivateClusterConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PrivateClusterConfig.Unmarshal(m, b)
+}
+func (m *PrivateClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PrivateClusterConfig.Marshal(b, m, deterministic)
+}
+func (m *PrivateClusterConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PrivateClusterConfig.Merge(m, src)
+}
+func (m *PrivateClusterConfig) XXX_Size() int {
+ return xxx_messageInfo_PrivateClusterConfig.Size(m)
+}
+func (m *PrivateClusterConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_PrivateClusterConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PrivateClusterConfig proto.InternalMessageInfo
+
+func (m *PrivateClusterConfig) GetEnablePrivateNodes() bool {
+ if m != nil {
+ return m.EnablePrivateNodes
+ }
+ return false
+}
+
+func (m *PrivateClusterConfig) GetEnablePrivateEndpoint() bool {
+ if m != nil {
+ return m.EnablePrivateEndpoint
+ }
+ return false
+}
+
+func (m *PrivateClusterConfig) GetMasterIpv4CidrBlock() string {
+ if m != nil {
+ return m.MasterIpv4CidrBlock
+ }
+ return ""
+}
+
+func (m *PrivateClusterConfig) GetPrivateEndpoint() string {
+ if m != nil {
+ return m.PrivateEndpoint
+ }
+ return ""
+}
+
+func (m *PrivateClusterConfig) GetPublicEndpoint() string {
+ if m != nil {
+ return m.PublicEndpoint
+ }
+ return ""
+}
+
+// Configuration options for the master authorized networks feature. Enabled
+// master authorized networks will disallow all external traffic to access
+// Kubernetes master through HTTPS except traffic from the given CIDR blocks,
+// Google Compute Engine Public IPs and Google Prod IPs.
+type MasterAuthorizedNetworksConfig struct {
+ // Whether or not master authorized networks is enabled.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // cidr_blocks define up to 10 external networks that could access
+ // Kubernetes master through HTTPS.
+ CidrBlocks []*MasterAuthorizedNetworksConfig_CidrBlock `protobuf:"bytes,2,rep,name=cidr_blocks,json=cidrBlocks,proto3" json:"cidr_blocks,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MasterAuthorizedNetworksConfig) Reset() { *m = MasterAuthorizedNetworksConfig{} }
+func (m *MasterAuthorizedNetworksConfig) String() string { return proto.CompactTextString(m) }
+func (*MasterAuthorizedNetworksConfig) ProtoMessage() {}
+func (*MasterAuthorizedNetworksConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{9}
+}
+
+func (m *MasterAuthorizedNetworksConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MasterAuthorizedNetworksConfig.Unmarshal(m, b)
+}
+func (m *MasterAuthorizedNetworksConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MasterAuthorizedNetworksConfig.Marshal(b, m, deterministic)
+}
+func (m *MasterAuthorizedNetworksConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MasterAuthorizedNetworksConfig.Merge(m, src)
+}
+func (m *MasterAuthorizedNetworksConfig) XXX_Size() int {
+ return xxx_messageInfo_MasterAuthorizedNetworksConfig.Size(m)
+}
+func (m *MasterAuthorizedNetworksConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_MasterAuthorizedNetworksConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MasterAuthorizedNetworksConfig proto.InternalMessageInfo
+
+func (m *MasterAuthorizedNetworksConfig) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+func (m *MasterAuthorizedNetworksConfig) GetCidrBlocks() []*MasterAuthorizedNetworksConfig_CidrBlock {
+ if m != nil {
+ return m.CidrBlocks
+ }
+ return nil
+}
+
+// CidrBlock contains an optional name and one CIDR block.
+type MasterAuthorizedNetworksConfig_CidrBlock struct {
+ // display_name is an optional field for users to identify CIDR blocks.
+ DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // cidr_block must be specified in CIDR notation.
+ CidrBlock string `protobuf:"bytes,2,opt,name=cidr_block,json=cidrBlock,proto3" json:"cidr_block,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MasterAuthorizedNetworksConfig_CidrBlock) Reset() {
+ *m = MasterAuthorizedNetworksConfig_CidrBlock{}
+}
+func (m *MasterAuthorizedNetworksConfig_CidrBlock) String() string { return proto.CompactTextString(m) }
+func (*MasterAuthorizedNetworksConfig_CidrBlock) ProtoMessage() {}
+func (*MasterAuthorizedNetworksConfig_CidrBlock) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{9, 0}
+}
+
+func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.Unmarshal(m, b)
+}
+func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.Marshal(b, m, deterministic)
+}
+func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.Merge(m, src)
+}
+func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_Size() int {
+ return xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.Size(m)
+}
+func (m *MasterAuthorizedNetworksConfig_CidrBlock) XXX_DiscardUnknown() {
+ xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MasterAuthorizedNetworksConfig_CidrBlock proto.InternalMessageInfo
+
+func (m *MasterAuthorizedNetworksConfig_CidrBlock) GetDisplayName() string {
+ if m != nil {
+ return m.DisplayName
+ }
+ return ""
+}
+
+func (m *MasterAuthorizedNetworksConfig_CidrBlock) GetCidrBlock() string {
+ if m != nil {
+ return m.CidrBlock
+ }
+ return ""
+}
+
+// Configuration for the legacy Attribute Based Access Control authorization
+// mode.
+type LegacyAbac struct {
+ // Whether the ABAC authorizer is enabled for this cluster. When enabled,
+ // identities in the system, including service accounts, nodes, and
+ // controllers, will have statically granted permissions beyond those
+ // provided by the RBAC configuration or IAM.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LegacyAbac) Reset() { *m = LegacyAbac{} }
+func (m *LegacyAbac) String() string { return proto.CompactTextString(m) }
+func (*LegacyAbac) ProtoMessage() {}
+func (*LegacyAbac) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{10}
+}
+
+func (m *LegacyAbac) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LegacyAbac.Unmarshal(m, b)
+}
+func (m *LegacyAbac) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LegacyAbac.Marshal(b, m, deterministic)
+}
+func (m *LegacyAbac) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LegacyAbac.Merge(m, src)
+}
+func (m *LegacyAbac) XXX_Size() int {
+ return xxx_messageInfo_LegacyAbac.Size(m)
+}
+func (m *LegacyAbac) XXX_DiscardUnknown() {
+ xxx_messageInfo_LegacyAbac.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LegacyAbac proto.InternalMessageInfo
+
+func (m *LegacyAbac) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+// Configuration options for the NetworkPolicy feature.
+// https://kubernetes.io/docs/concepts/services-networking/networkpolicies/
+type NetworkPolicy struct {
+ // The selected network policy provider.
+ Provider NetworkPolicy_Provider `protobuf:"varint,1,opt,name=provider,proto3,enum=google.container.v1.NetworkPolicy_Provider" json:"provider,omitempty"`
+ // Whether network policy is enabled on the cluster.
+ Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
+func (m *NetworkPolicy) String() string { return proto.CompactTextString(m) }
+func (*NetworkPolicy) ProtoMessage() {}
+func (*NetworkPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{11}
+}
+
+func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NetworkPolicy.Unmarshal(m, b)
+}
+func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NetworkPolicy.Marshal(b, m, deterministic)
+}
+func (m *NetworkPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkPolicy.Merge(m, src)
+}
+func (m *NetworkPolicy) XXX_Size() int {
+ return xxx_messageInfo_NetworkPolicy.Size(m)
+}
+func (m *NetworkPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo
+
+func (m *NetworkPolicy) GetProvider() NetworkPolicy_Provider {
+ if m != nil {
+ return m.Provider
+ }
+ return NetworkPolicy_PROVIDER_UNSPECIFIED
+}
+
+func (m *NetworkPolicy) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+// Configuration for controlling how IPs are allocated in the cluster.
+type IPAllocationPolicy struct {
+ // Whether alias IPs will be used for pod IPs in the cluster.
+ UseIpAliases bool `protobuf:"varint,1,opt,name=use_ip_aliases,json=useIpAliases,proto3" json:"use_ip_aliases,omitempty"`
+ // Whether a new subnetwork will be created automatically for the cluster.
+ //
+ // This field is only applicable when `use_ip_aliases` is true.
+ CreateSubnetwork bool `protobuf:"varint,2,opt,name=create_subnetwork,json=createSubnetwork,proto3" json:"create_subnetwork,omitempty"`
+ // A custom subnetwork name to be used if `create_subnetwork` is true. If
+ // this field is empty, then an automatic name will be chosen for the new
+ // subnetwork.
+ SubnetworkName string `protobuf:"bytes,3,opt,name=subnetwork_name,json=subnetworkName,proto3" json:"subnetwork_name,omitempty"`
+ // This field is deprecated, use cluster_ipv4_cidr_block.
+ ClusterIpv4Cidr string `protobuf:"bytes,4,opt,name=cluster_ipv4_cidr,json=clusterIpv4Cidr,proto3" json:"cluster_ipv4_cidr,omitempty"` // Deprecated: Do not use.
+ // This field is deprecated, use node_ipv4_cidr_block.
+ NodeIpv4Cidr string `protobuf:"bytes,5,opt,name=node_ipv4_cidr,json=nodeIpv4Cidr,proto3" json:"node_ipv4_cidr,omitempty"` // Deprecated: Do not use.
+ // This field is deprecated, use services_ipv4_cidr_block.
+ ServicesIpv4Cidr string `protobuf:"bytes,6,opt,name=services_ipv4_cidr,json=servicesIpv4Cidr,proto3" json:"services_ipv4_cidr,omitempty"` // Deprecated: Do not use.
+ // The name of the secondary range to be used for the cluster CIDR
+ // block. The secondary range will be used for pod IP
+ // addresses. This must be an existing secondary range associated
+ // with the cluster subnetwork.
+ //
+ // This field is only applicable with use_ip_aliases is true and
+ // create_subnetwork is false.
+ ClusterSecondaryRangeName string `protobuf:"bytes,7,opt,name=cluster_secondary_range_name,json=clusterSecondaryRangeName,proto3" json:"cluster_secondary_range_name,omitempty"`
+ // The name of the secondary range to be used as for the services
+ // CIDR block. The secondary range will be used for service
+ // ClusterIPs. This must be an existing secondary range associated
+ // with the cluster subnetwork.
+ //
+ // This field is only applicable with use_ip_aliases is true and
+ // create_subnetwork is false.
+ ServicesSecondaryRangeName string `protobuf:"bytes,8,opt,name=services_secondary_range_name,json=servicesSecondaryRangeName,proto3" json:"services_secondary_range_name,omitempty"`
+ // The IP address range for the cluster pod IPs. If this field is set, then
+ // `cluster.cluster_ipv4_cidr` must be left blank.
+ //
+ // This field is only applicable when `use_ip_aliases` is true.
+ //
+ // Set to blank to have a range chosen with the default size.
+ //
+ // Set to /netmask (e.g. `/14`) to have a range chosen with a specific
+ // netmask.
+ //
+ // Set to a
+ // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
+ // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
+ // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range
+ // to use.
+ ClusterIpv4CidrBlock string `protobuf:"bytes,9,opt,name=cluster_ipv4_cidr_block,json=clusterIpv4CidrBlock,proto3" json:"cluster_ipv4_cidr_block,omitempty"`
+ // The IP address range of the instance IPs in this cluster.
+ //
+ // This is applicable only if `create_subnetwork` is true.
+ //
+ // Set to blank to have a range chosen with the default size.
+ //
+ // Set to /netmask (e.g. `/14`) to have a range chosen with a specific
+ // netmask.
+ //
+ // Set to a
+ // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
+ // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
+ // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range
+ // to use.
+ NodeIpv4CidrBlock string `protobuf:"bytes,10,opt,name=node_ipv4_cidr_block,json=nodeIpv4CidrBlock,proto3" json:"node_ipv4_cidr_block,omitempty"`
+ // The IP address range of the services IPs in this cluster. If blank, a range
+ // will be automatically chosen with the default size.
+ //
+ // This field is only applicable when `use_ip_aliases` is true.
+ //
+ // Set to blank to have a range chosen with the default size.
+ //
+ // Set to /netmask (e.g. `/14`) to have a range chosen with a specific
+ // netmask.
+ //
+ // Set to a
+ // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
+ // notation (e.g. `10.96.0.0/14`) from the RFC-1918 private networks (e.g.
+ // `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range
+ // to use.
+ ServicesIpv4CidrBlock string `protobuf:"bytes,11,opt,name=services_ipv4_cidr_block,json=servicesIpv4CidrBlock,proto3" json:"services_ipv4_cidr_block,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *IPAllocationPolicy) Reset() { *m = IPAllocationPolicy{} }
+func (m *IPAllocationPolicy) String() string { return proto.CompactTextString(m) }
+func (*IPAllocationPolicy) ProtoMessage() {}
+func (*IPAllocationPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{12}
+}
+
+func (m *IPAllocationPolicy) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_IPAllocationPolicy.Unmarshal(m, b)
+}
+func (m *IPAllocationPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_IPAllocationPolicy.Marshal(b, m, deterministic)
+}
+func (m *IPAllocationPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IPAllocationPolicy.Merge(m, src)
+}
+func (m *IPAllocationPolicy) XXX_Size() int {
+ return xxx_messageInfo_IPAllocationPolicy.Size(m)
+}
+func (m *IPAllocationPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_IPAllocationPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IPAllocationPolicy proto.InternalMessageInfo
+
+func (m *IPAllocationPolicy) GetUseIpAliases() bool {
+ if m != nil {
+ return m.UseIpAliases
+ }
+ return false
+}
+
+func (m *IPAllocationPolicy) GetCreateSubnetwork() bool {
+ if m != nil {
+ return m.CreateSubnetwork
+ }
+ return false
+}
+
+func (m *IPAllocationPolicy) GetSubnetworkName() string {
+ if m != nil {
+ return m.SubnetworkName
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *IPAllocationPolicy) GetClusterIpv4Cidr() string {
+ if m != nil {
+ return m.ClusterIpv4Cidr
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *IPAllocationPolicy) GetNodeIpv4Cidr() string {
+ if m != nil {
+ return m.NodeIpv4Cidr
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *IPAllocationPolicy) GetServicesIpv4Cidr() string {
+ if m != nil {
+ return m.ServicesIpv4Cidr
+ }
+ return ""
+}
+
+func (m *IPAllocationPolicy) GetClusterSecondaryRangeName() string {
+ if m != nil {
+ return m.ClusterSecondaryRangeName
+ }
+ return ""
+}
+
+func (m *IPAllocationPolicy) GetServicesSecondaryRangeName() string {
+ if m != nil {
+ return m.ServicesSecondaryRangeName
+ }
+ return ""
+}
+
+func (m *IPAllocationPolicy) GetClusterIpv4CidrBlock() string {
+ if m != nil {
+ return m.ClusterIpv4CidrBlock
+ }
+ return ""
+}
+
+func (m *IPAllocationPolicy) GetNodeIpv4CidrBlock() string {
+ if m != nil {
+ return m.NodeIpv4CidrBlock
+ }
+ return ""
+}
+
+func (m *IPAllocationPolicy) GetServicesIpv4CidrBlock() string {
+ if m != nil {
+ return m.ServicesIpv4CidrBlock
+ }
+ return ""
+}
+
+// A Google Kubernetes Engine cluster.
+type Cluster struct {
+ // The name of this cluster. The name must be unique within this project
+ // and zone, and can be up to 40 characters with the following restrictions:
+ //
+ // * Lowercase letters, numbers, and hyphens only.
+ // * Must start with a letter.
+ // * Must end with a number or a letter.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional description of this cluster.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The number of nodes to create in this cluster. You must ensure that your
+ // Compute Engine resource quota
+ // is sufficient for this number of instances. You must also have available
+ // firewall and routes quota.
+ // For requests, this field should only be used in lieu of a
+ // "node_pool" object, since this configuration (along with the
+ // "node_config") will be used to create a "NodePool" object with an
+ // auto-generated name. Do not use this and a node_pool at the same time.
+ InitialNodeCount int32 `protobuf:"varint,3,opt,name=initial_node_count,json=initialNodeCount,proto3" json:"initial_node_count,omitempty"`
+ // Parameters used in creating the cluster's nodes.
+ // See `nodeConfig` for the description of its properties.
+ // For requests, this field should only be used in lieu of a
+ // "node_pool" object, since this configuration (along with the
+ // "initial_node_count") will be used to create a "NodePool" object with an
+ // auto-generated name. Do not use this and a node_pool at the same time.
+ // For responses, this field will be populated with the node configuration of
+ // the first node pool.
+ //
+ // If unspecified, the defaults are used.
+ NodeConfig *NodeConfig `protobuf:"bytes,4,opt,name=node_config,json=nodeConfig,proto3" json:"node_config,omitempty"`
+ // The authentication information for accessing the master endpoint.
+ MasterAuth *MasterAuth `protobuf:"bytes,5,opt,name=master_auth,json=masterAuth,proto3" json:"master_auth,omitempty"`
+ // The logging service the cluster should use to write logs.
+ // Currently available options:
+ //
+ // * `logging.googleapis.com` - the Google Cloud Logging service.
+ // * `none` - no logs will be exported from the cluster.
+ // * if left as an empty string,`logging.googleapis.com` will be used.
+ LoggingService string `protobuf:"bytes,6,opt,name=logging_service,json=loggingService,proto3" json:"logging_service,omitempty"`
+ // The monitoring service the cluster should use to write metrics.
+ // Currently available options:
+ //
+ // * `monitoring.googleapis.com` - the Google Cloud Monitoring service.
+ // * `none` - no metrics will be exported from the cluster.
+ // * if left as an empty string, `monitoring.googleapis.com` will be used.
+ MonitoringService string `protobuf:"bytes,7,opt,name=monitoring_service,json=monitoringService,proto3" json:"monitoring_service,omitempty"`
+ // The name of the Google Compute Engine
+ // [network](/compute/docs/networks-and-firewalls#networks) to which the
+ // cluster is connected. If left unspecified, the `default` network
+ // will be used.
+ Network string `protobuf:"bytes,8,opt,name=network,proto3" json:"network,omitempty"`
+ // The IP address range of the container pods in this cluster, in
+ // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
+ // notation (e.g. `10.96.0.0/14`). Leave blank to have
+ // one automatically chosen or specify a `/14` block in `10.0.0.0/8`.
+ ClusterIpv4Cidr string `protobuf:"bytes,9,opt,name=cluster_ipv4_cidr,json=clusterIpv4Cidr,proto3" json:"cluster_ipv4_cidr,omitempty"`
+ // Configurations for the various addons available to run in the cluster.
+ AddonsConfig *AddonsConfig `protobuf:"bytes,10,opt,name=addons_config,json=addonsConfig,proto3" json:"addons_config,omitempty"`
+ // The name of the Google Compute Engine
+ // [subnetwork](/compute/docs/subnetworks) to which the
+ // cluster is connected.
+ Subnetwork string `protobuf:"bytes,11,opt,name=subnetwork,proto3" json:"subnetwork,omitempty"`
+ // The node pools associated with this cluster.
+ // This field should not be set if "node_config" or "initial_node_count" are
+ // specified.
+ NodePools []*NodePool `protobuf:"bytes,12,rep,name=node_pools,json=nodePools,proto3" json:"node_pools,omitempty"`
+ // The list of Google Compute Engine
+ // [locations](/compute/docs/zones#available) in which the cluster's nodes
+ // should be located.
+ Locations []string `protobuf:"bytes,13,rep,name=locations,proto3" json:"locations,omitempty"`
+ // Kubernetes alpha features are enabled on this cluster. This includes alpha
+ // API groups (e.g. v1alpha1) and features that may not be production ready in
+ // the kubernetes version of the master and nodes.
+ // The cluster has no SLA for uptime and master/node upgrades are disabled.
+ // Alpha enabled clusters are automatically deleted thirty days after
+ // creation.
+ EnableKubernetesAlpha bool `protobuf:"varint,14,opt,name=enable_kubernetes_alpha,json=enableKubernetesAlpha,proto3" json:"enable_kubernetes_alpha,omitempty"`
+ // The resource labels for the cluster to use to annotate any related
+ // Google Compute Engine resources.
+ ResourceLabels map[string]string `protobuf:"bytes,15,rep,name=resource_labels,json=resourceLabels,proto3" json:"resource_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The fingerprint of the set of labels for this cluster.
+ LabelFingerprint string `protobuf:"bytes,16,opt,name=label_fingerprint,json=labelFingerprint,proto3" json:"label_fingerprint,omitempty"`
+ // Configuration for the legacy ABAC authorization mode.
+ LegacyAbac *LegacyAbac `protobuf:"bytes,18,opt,name=legacy_abac,json=legacyAbac,proto3" json:"legacy_abac,omitempty"`
+ // Configuration options for the NetworkPolicy feature.
+ NetworkPolicy *NetworkPolicy `protobuf:"bytes,19,opt,name=network_policy,json=networkPolicy,proto3" json:"network_policy,omitempty"`
+ // Configuration for cluster IP allocation.
+ IpAllocationPolicy *IPAllocationPolicy `protobuf:"bytes,20,opt,name=ip_allocation_policy,json=ipAllocationPolicy,proto3" json:"ip_allocation_policy,omitempty"`
+ // The configuration options for master authorized networks feature.
+ MasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `protobuf:"bytes,22,opt,name=master_authorized_networks_config,json=masterAuthorizedNetworksConfig,proto3" json:"master_authorized_networks_config,omitempty"`
+ // Configure the maintenance policy for this cluster.
+ MaintenancePolicy *MaintenancePolicy `protobuf:"bytes,23,opt,name=maintenance_policy,json=maintenancePolicy,proto3" json:"maintenance_policy,omitempty"`
+ // Configuration for cluster networking.
+ NetworkConfig *NetworkConfig `protobuf:"bytes,27,opt,name=network_config,json=networkConfig,proto3" json:"network_config,omitempty"`
+ // Configuration for private cluster.
+ PrivateClusterConfig *PrivateClusterConfig `protobuf:"bytes,37,opt,name=private_cluster_config,json=privateClusterConfig,proto3" json:"private_cluster_config,omitempty"`
+ // [Output only] Server-defined URL for the resource.
+ SelfLink string `protobuf:"bytes,100,opt,name=self_link,json=selfLink,proto3" json:"self_link,omitempty"`
+ // [Output only] The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field is deprecated, use location instead.
+ Zone string `protobuf:"bytes,101,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // [Output only] The IP address of this cluster's master endpoint.
+ // The endpoint can be accessed from the internet at
+ // `https://username:password@endpoint/`.
+ //
+ // See the `masterAuth` property of this resource for username and
+ // password information.
+ Endpoint string `protobuf:"bytes,102,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
+ // The initial Kubernetes version for this cluster. Valid versions are those
+ // found in validMasterVersions returned by getServerConfig. The version can
+ // be upgraded over time; such upgrades are reflected in
+ // currentMasterVersion and currentNodeVersion.
+ //
+ // Users may specify either explicit versions offered by
+ // Kubernetes Engine or version aliases, which have the following behavior:
+ //
+ // - "latest": picks the highest valid Kubernetes version
+ // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version
+ // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version
+ // - "1.X.Y-gke.N": picks an explicit Kubernetes version
+ // - "","-": picks the default Kubernetes version
+ InitialClusterVersion string `protobuf:"bytes,103,opt,name=initial_cluster_version,json=initialClusterVersion,proto3" json:"initial_cluster_version,omitempty"`
+ // [Output only] The current software version of the master endpoint.
+ CurrentMasterVersion string `protobuf:"bytes,104,opt,name=current_master_version,json=currentMasterVersion,proto3" json:"current_master_version,omitempty"`
+ // [Output only] Deprecated, use
+ // [NodePool.version](/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters.nodePool)
+ // instead. The current version of the node software components. If they are
+ // currently at multiple versions because they're in the process of being
+ // upgraded, this reflects the minimum version of all nodes.
+ CurrentNodeVersion string `protobuf:"bytes,105,opt,name=current_node_version,json=currentNodeVersion,proto3" json:"current_node_version,omitempty"` // Deprecated: Do not use.
+ // [Output only] The time the cluster was created, in
+ // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ CreateTime string `protobuf:"bytes,106,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ // [Output only] The current status of this cluster.
+ Status Cluster_Status `protobuf:"varint,107,opt,name=status,proto3,enum=google.container.v1.Cluster_Status" json:"status,omitempty"`
+ // [Output only] Additional information about the current status of this
+ // cluster, if available.
+ StatusMessage string `protobuf:"bytes,108,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
+ // [Output only] The size of the address space on each node for hosting
+ // containers. This is provisioned from within the `container_ipv4_cidr`
+ // range.
+ NodeIpv4CidrSize int32 `protobuf:"varint,109,opt,name=node_ipv4_cidr_size,json=nodeIpv4CidrSize,proto3" json:"node_ipv4_cidr_size,omitempty"`
+ // [Output only] The IP address range of the Kubernetes services in
+ // this cluster, in
+ // [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing)
+ // notation (e.g. `1.2.3.4/29`). Service addresses are
+ // typically put in the last `/16` from the container CIDR.
+ ServicesIpv4Cidr string `protobuf:"bytes,110,opt,name=services_ipv4_cidr,json=servicesIpv4Cidr,proto3" json:"services_ipv4_cidr,omitempty"`
+ // Deprecated. Use node_pools.instance_group_urls.
+ InstanceGroupUrls []string `protobuf:"bytes,111,rep,name=instance_group_urls,json=instanceGroupUrls,proto3" json:"instance_group_urls,omitempty"` // Deprecated: Do not use.
+ // [Output only] The number of nodes currently in the cluster.
+ CurrentNodeCount int32 `protobuf:"varint,112,opt,name=current_node_count,json=currentNodeCount,proto3" json:"current_node_count,omitempty"`
+ // [Output only] The time the cluster will be automatically
+ // deleted in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ ExpireTime string `protobuf:"bytes,113,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
+ // [Output only] The name of the Google Compute Engine
+ // [zone](/compute/docs/regions-zones/regions-zones#available) or
+ // [region](/compute/docs/regions-zones/regions-zones#available) in which
+ // the cluster resides.
+ Location string `protobuf:"bytes,114,opt,name=location,proto3" json:"location,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Cluster) Reset() { *m = Cluster{} }
+func (m *Cluster) String() string { return proto.CompactTextString(m) }
+func (*Cluster) ProtoMessage() {}
+func (*Cluster) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{13}
+}
+
+func (m *Cluster) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Cluster.Unmarshal(m, b)
+}
+func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Cluster.Marshal(b, m, deterministic)
+}
+func (m *Cluster) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Cluster.Merge(m, src)
+}
+func (m *Cluster) XXX_Size() int {
+ return xxx_messageInfo_Cluster.Size(m)
+}
+func (m *Cluster) XXX_DiscardUnknown() {
+ xxx_messageInfo_Cluster.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Cluster proto.InternalMessageInfo
+
+func (m *Cluster) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Cluster) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *Cluster) GetInitialNodeCount() int32 {
+ if m != nil {
+ return m.InitialNodeCount
+ }
+ return 0
+}
+
+func (m *Cluster) GetNodeConfig() *NodeConfig {
+ if m != nil {
+ return m.NodeConfig
+ }
+ return nil
+}
+
+func (m *Cluster) GetMasterAuth() *MasterAuth {
+ if m != nil {
+ return m.MasterAuth
+ }
+ return nil
+}
+
+func (m *Cluster) GetLoggingService() string {
+ if m != nil {
+ return m.LoggingService
+ }
+ return ""
+}
+
+func (m *Cluster) GetMonitoringService() string {
+ if m != nil {
+ return m.MonitoringService
+ }
+ return ""
+}
+
+func (m *Cluster) GetNetwork() string {
+ if m != nil {
+ return m.Network
+ }
+ return ""
+}
+
+func (m *Cluster) GetClusterIpv4Cidr() string {
+ if m != nil {
+ return m.ClusterIpv4Cidr
+ }
+ return ""
+}
+
+func (m *Cluster) GetAddonsConfig() *AddonsConfig {
+ if m != nil {
+ return m.AddonsConfig
+ }
+ return nil
+}
+
+func (m *Cluster) GetSubnetwork() string {
+ if m != nil {
+ return m.Subnetwork
+ }
+ return ""
+}
+
+func (m *Cluster) GetNodePools() []*NodePool {
+ if m != nil {
+ return m.NodePools
+ }
+ return nil
+}
+
+func (m *Cluster) GetLocations() []string {
+ if m != nil {
+ return m.Locations
+ }
+ return nil
+}
+
+func (m *Cluster) GetEnableKubernetesAlpha() bool {
+ if m != nil {
+ return m.EnableKubernetesAlpha
+ }
+ return false
+}
+
+func (m *Cluster) GetResourceLabels() map[string]string {
+ if m != nil {
+ return m.ResourceLabels
+ }
+ return nil
+}
+
+func (m *Cluster) GetLabelFingerprint() string {
+ if m != nil {
+ return m.LabelFingerprint
+ }
+ return ""
+}
+
+func (m *Cluster) GetLegacyAbac() *LegacyAbac {
+ if m != nil {
+ return m.LegacyAbac
+ }
+ return nil
+}
+
+func (m *Cluster) GetNetworkPolicy() *NetworkPolicy {
+ if m != nil {
+ return m.NetworkPolicy
+ }
+ return nil
+}
+
+func (m *Cluster) GetIpAllocationPolicy() *IPAllocationPolicy {
+ if m != nil {
+ return m.IpAllocationPolicy
+ }
+ return nil
+}
+
+func (m *Cluster) GetMasterAuthorizedNetworksConfig() *MasterAuthorizedNetworksConfig {
+ if m != nil {
+ return m.MasterAuthorizedNetworksConfig
+ }
+ return nil
+}
+
+func (m *Cluster) GetMaintenancePolicy() *MaintenancePolicy {
+ if m != nil {
+ return m.MaintenancePolicy
+ }
+ return nil
+}
+
+func (m *Cluster) GetNetworkConfig() *NetworkConfig {
+ if m != nil {
+ return m.NetworkConfig
+ }
+ return nil
+}
+
+func (m *Cluster) GetPrivateClusterConfig() *PrivateClusterConfig {
+ if m != nil {
+ return m.PrivateClusterConfig
+ }
+ return nil
+}
+
+func (m *Cluster) GetSelfLink() string {
+ if m != nil {
+ return m.SelfLink
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *Cluster) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+func (m *Cluster) GetEndpoint() string {
+ if m != nil {
+ return m.Endpoint
+ }
+ return ""
+}
+
+func (m *Cluster) GetInitialClusterVersion() string {
+ if m != nil {
+ return m.InitialClusterVersion
+ }
+ return ""
+}
+
+func (m *Cluster) GetCurrentMasterVersion() string {
+ if m != nil {
+ return m.CurrentMasterVersion
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *Cluster) GetCurrentNodeVersion() string {
+ if m != nil {
+ return m.CurrentNodeVersion
+ }
+ return ""
+}
+
+func (m *Cluster) GetCreateTime() string {
+ if m != nil {
+ return m.CreateTime
+ }
+ return ""
+}
+
+func (m *Cluster) GetStatus() Cluster_Status {
+ if m != nil {
+ return m.Status
+ }
+ return Cluster_STATUS_UNSPECIFIED
+}
+
+func (m *Cluster) GetStatusMessage() string {
+ if m != nil {
+ return m.StatusMessage
+ }
+ return ""
+}
+
+func (m *Cluster) GetNodeIpv4CidrSize() int32 {
+ if m != nil {
+ return m.NodeIpv4CidrSize
+ }
+ return 0
+}
+
+func (m *Cluster) GetServicesIpv4Cidr() string {
+ if m != nil {
+ return m.ServicesIpv4Cidr
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *Cluster) GetInstanceGroupUrls() []string {
+ if m != nil {
+ return m.InstanceGroupUrls
+ }
+ return nil
+}
+
+func (m *Cluster) GetCurrentNodeCount() int32 {
+ if m != nil {
+ return m.CurrentNodeCount
+ }
+ return 0
+}
+
+func (m *Cluster) GetExpireTime() string {
+ if m != nil {
+ return m.ExpireTime
+ }
+ return ""
+}
+
+func (m *Cluster) GetLocation() string {
+ if m != nil {
+ return m.Location
+ }
+ return ""
+}
+
+// ClusterUpdate describes an update to the cluster. Exactly one update can
+// be applied to a cluster with each request, so at most one field can be
+// provided.
+type ClusterUpdate struct {
+ // The Kubernetes version to change the nodes to (typically an
+ // upgrade).
+ //
+ // Users may specify either explicit versions offered by
+ // Kubernetes Engine or version aliases, which have the following behavior:
+ //
+ // - "latest": picks the highest valid Kubernetes version
+ // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version
+ // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version
+ // - "1.X.Y-gke.N": picks an explicit Kubernetes version
+ // - "-": picks the Kubernetes master version
+ DesiredNodeVersion string `protobuf:"bytes,4,opt,name=desired_node_version,json=desiredNodeVersion,proto3" json:"desired_node_version,omitempty"`
+ // The monitoring service the cluster should use to write metrics.
+ // Currently available options:
+ //
+ // * "monitoring.googleapis.com" - the Google Cloud Monitoring service
+ // * "none" - no metrics will be exported from the cluster
+ DesiredMonitoringService string `protobuf:"bytes,5,opt,name=desired_monitoring_service,json=desiredMonitoringService,proto3" json:"desired_monitoring_service,omitempty"`
+ // Configurations for the various addons available to run in the cluster.
+ DesiredAddonsConfig *AddonsConfig `protobuf:"bytes,6,opt,name=desired_addons_config,json=desiredAddonsConfig,proto3" json:"desired_addons_config,omitempty"`
+ // The node pool to be upgraded. This field is mandatory if
+ // "desired_node_version", "desired_image_family" or
+ // "desired_node_pool_autoscaling" is specified and there is more than one
+ // node pool on the cluster.
+ DesiredNodePoolId string `protobuf:"bytes,7,opt,name=desired_node_pool_id,json=desiredNodePoolId,proto3" json:"desired_node_pool_id,omitempty"`
+ // The desired image type for the node pool.
+ // NOTE: Set the "desired_node_pool" field as well.
+ DesiredImageType string `protobuf:"bytes,8,opt,name=desired_image_type,json=desiredImageType,proto3" json:"desired_image_type,omitempty"`
+ // Autoscaler configuration for the node pool specified in
+ // desired_node_pool_id. If there is only one pool in the
+ // cluster and desired_node_pool_id is not provided then
+ // the change applies to that single node pool.
+ DesiredNodePoolAutoscaling *NodePoolAutoscaling `protobuf:"bytes,9,opt,name=desired_node_pool_autoscaling,json=desiredNodePoolAutoscaling,proto3" json:"desired_node_pool_autoscaling,omitempty"`
+ // The desired list of Google Compute Engine
+ // [locations](/compute/docs/zones#available) in which the cluster's nodes
+ // should be located. Changing the locations a cluster is in will result
+ // in nodes being either created or removed from the cluster, depending on
+ // whether locations are being added or removed.
+ //
+ // This list must always include the cluster's primary zone.
+ DesiredLocations []string `protobuf:"bytes,10,rep,name=desired_locations,json=desiredLocations,proto3" json:"desired_locations,omitempty"`
+ // The desired configuration options for master authorized networks feature.
+ DesiredMasterAuthorizedNetworksConfig *MasterAuthorizedNetworksConfig `protobuf:"bytes,12,opt,name=desired_master_authorized_networks_config,json=desiredMasterAuthorizedNetworksConfig,proto3" json:"desired_master_authorized_networks_config,omitempty"`
+ // The Kubernetes version to change the master to.
+ //
+ // Users may specify either explicit versions offered by
+ // Kubernetes Engine or version aliases, which have the following behavior:
+ //
+ // - "latest": picks the highest valid Kubernetes version
+ // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version
+ // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version
+ // - "1.X.Y-gke.N": picks an explicit Kubernetes version
+ // - "-": picks the default Kubernetes version
+ DesiredMasterVersion string `protobuf:"bytes,100,opt,name=desired_master_version,json=desiredMasterVersion,proto3" json:"desired_master_version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ClusterUpdate) Reset() { *m = ClusterUpdate{} }
+func (m *ClusterUpdate) String() string { return proto.CompactTextString(m) }
+func (*ClusterUpdate) ProtoMessage() {}
+func (*ClusterUpdate) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{14}
+}
+
+func (m *ClusterUpdate) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ClusterUpdate.Unmarshal(m, b)
+}
+func (m *ClusterUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ClusterUpdate.Marshal(b, m, deterministic)
+}
+func (m *ClusterUpdate) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterUpdate.Merge(m, src)
+}
+func (m *ClusterUpdate) XXX_Size() int {
+ return xxx_messageInfo_ClusterUpdate.Size(m)
+}
+func (m *ClusterUpdate) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterUpdate proto.InternalMessageInfo
+
+func (m *ClusterUpdate) GetDesiredNodeVersion() string {
+ if m != nil {
+ return m.DesiredNodeVersion
+ }
+ return ""
+}
+
+func (m *ClusterUpdate) GetDesiredMonitoringService() string {
+ if m != nil {
+ return m.DesiredMonitoringService
+ }
+ return ""
+}
+
+func (m *ClusterUpdate) GetDesiredAddonsConfig() *AddonsConfig {
+ if m != nil {
+ return m.DesiredAddonsConfig
+ }
+ return nil
+}
+
+func (m *ClusterUpdate) GetDesiredNodePoolId() string {
+ if m != nil {
+ return m.DesiredNodePoolId
+ }
+ return ""
+}
+
+func (m *ClusterUpdate) GetDesiredImageType() string {
+ if m != nil {
+ return m.DesiredImageType
+ }
+ return ""
+}
+
+func (m *ClusterUpdate) GetDesiredNodePoolAutoscaling() *NodePoolAutoscaling {
+ if m != nil {
+ return m.DesiredNodePoolAutoscaling
+ }
+ return nil
+}
+
+func (m *ClusterUpdate) GetDesiredLocations() []string {
+ if m != nil {
+ return m.DesiredLocations
+ }
+ return nil
+}
+
+func (m *ClusterUpdate) GetDesiredMasterAuthorizedNetworksConfig() *MasterAuthorizedNetworksConfig {
+ if m != nil {
+ return m.DesiredMasterAuthorizedNetworksConfig
+ }
+ return nil
+}
+
+func (m *ClusterUpdate) GetDesiredMasterVersion() string {
+ if m != nil {
+ return m.DesiredMasterVersion
+ }
+ return ""
+}
+
+// This operation resource represents operations that may have happened or are
+// happening on the cluster. All fields are output only.
+type Operation struct {
+ // The server-assigned ID for the operation.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the operation
+ // is taking place.
+ // This field is deprecated, use location instead.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // The operation type.
+ OperationType Operation_Type `protobuf:"varint,3,opt,name=operation_type,json=operationType,proto3,enum=google.container.v1.Operation_Type" json:"operation_type,omitempty"`
+ // The current status of the operation.
+ Status Operation_Status `protobuf:"varint,4,opt,name=status,proto3,enum=google.container.v1.Operation_Status" json:"status,omitempty"`
+ // Detailed operation progress, if available.
+ Detail string `protobuf:"bytes,8,opt,name=detail,proto3" json:"detail,omitempty"`
+ // If an error has occurred, a textual description of the error.
+ StatusMessage string `protobuf:"bytes,5,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
+ // Server-defined URL for the resource.
+ SelfLink string `protobuf:"bytes,6,opt,name=self_link,json=selfLink,proto3" json:"self_link,omitempty"`
+ // Server-defined URL for the target of the operation.
+ TargetLink string `protobuf:"bytes,7,opt,name=target_link,json=targetLink,proto3" json:"target_link,omitempty"`
+ // [Output only] The name of the Google Compute Engine
+ // [zone](/compute/docs/regions-zones/regions-zones#available) or
+ // [region](/compute/docs/regions-zones/regions-zones#available) in which
+ // the cluster resides.
+ Location string `protobuf:"bytes,9,opt,name=location,proto3" json:"location,omitempty"`
+ // [Output only] The time the operation started, in
+ // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ StartTime string `protobuf:"bytes,10,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // [Output only] The time the operation completed, in
+ // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ EndTime string `protobuf:"bytes,11,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Operation) Reset() { *m = Operation{} }
+func (m *Operation) String() string { return proto.CompactTextString(m) }
+func (*Operation) ProtoMessage() {}
+func (*Operation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{15}
+}
+
+func (m *Operation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Operation.Unmarshal(m, b)
+}
+func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Operation.Marshal(b, m, deterministic)
+}
+func (m *Operation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Operation.Merge(m, src)
+}
+func (m *Operation) XXX_Size() int {
+ return xxx_messageInfo_Operation.Size(m)
+}
+func (m *Operation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Operation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Operation proto.InternalMessageInfo
+
+func (m *Operation) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *Operation) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+func (m *Operation) GetOperationType() Operation_Type {
+ if m != nil {
+ return m.OperationType
+ }
+ return Operation_TYPE_UNSPECIFIED
+}
+
+func (m *Operation) GetStatus() Operation_Status {
+ if m != nil {
+ return m.Status
+ }
+ return Operation_STATUS_UNSPECIFIED
+}
+
+func (m *Operation) GetDetail() string {
+ if m != nil {
+ return m.Detail
+ }
+ return ""
+}
+
+func (m *Operation) GetStatusMessage() string {
+ if m != nil {
+ return m.StatusMessage
+ }
+ return ""
+}
+
+func (m *Operation) GetSelfLink() string {
+ if m != nil {
+ return m.SelfLink
+ }
+ return ""
+}
+
+func (m *Operation) GetTargetLink() string {
+ if m != nil {
+ return m.TargetLink
+ }
+ return ""
+}
+
+func (m *Operation) GetLocation() string {
+ if m != nil {
+ return m.Location
+ }
+ return ""
+}
+
+func (m *Operation) GetStartTime() string {
+ if m != nil {
+ return m.StartTime
+ }
+ return ""
+}
+
+func (m *Operation) GetEndTime() string {
+ if m != nil {
+ return m.EndTime
+ }
+ return ""
+}
+
+// CreateClusterRequest creates a cluster.
+type CreateClusterRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the parent field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the parent field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // A [cluster
+ // resource](/container-engine/reference/rest/v1/projects.zones.clusters)
+ Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ // The parent (project and location) where the cluster will be created.
+ // Specified in the format 'projects/*/locations/*'.
+ Parent string `protobuf:"bytes,5,opt,name=parent,proto3" json:"parent,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} }
+func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateClusterRequest) ProtoMessage() {}
+func (*CreateClusterRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{16}
+}
+
+func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateClusterRequest.Unmarshal(m, b)
+}
+func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateClusterRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateClusterRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateClusterRequest.Merge(m, src)
+}
+func (m *CreateClusterRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateClusterRequest.Size(m)
+}
+func (m *CreateClusterRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateClusterRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateClusterRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *CreateClusterRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *CreateClusterRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+func (m *CreateClusterRequest) GetCluster() *Cluster {
+ if m != nil {
+ return m.Cluster
+ }
+ return nil
+}
+
+func (m *CreateClusterRequest) GetParent() string {
+ if m != nil {
+ return m.Parent
+ }
+ return ""
+}
+
+// GetClusterRequest gets the settings of a cluster.
+type GetClusterRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to retrieve.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The name (project, location, cluster) of the cluster to retrieve.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} }
+func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) }
+func (*GetClusterRequest) ProtoMessage() {}
+func (*GetClusterRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{17}
+}
+
+func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetClusterRequest.Unmarshal(m, b)
+}
+func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetClusterRequest.Marshal(b, m, deterministic)
+}
+func (m *GetClusterRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetClusterRequest.Merge(m, src)
+}
+func (m *GetClusterRequest) XXX_Size() int {
+ return xxx_messageInfo_GetClusterRequest.Size(m)
+}
+func (m *GetClusterRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetClusterRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetClusterRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *GetClusterRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *GetClusterRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *GetClusterRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *GetClusterRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// UpdateClusterRequest updates the settings of a cluster.
+type UpdateClusterRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // A description of the update.
+ Update *ClusterUpdate `protobuf:"bytes,4,opt,name=update,proto3" json:"update,omitempty"`
+ // The name (project, location, cluster) of the cluster to update.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} }
+func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateClusterRequest) ProtoMessage() {}
+func (*UpdateClusterRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{18}
+}
+
+func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdateClusterRequest.Unmarshal(m, b)
+}
+func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdateClusterRequest.Marshal(b, m, deterministic)
+}
+func (m *UpdateClusterRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateClusterRequest.Merge(m, src)
+}
+func (m *UpdateClusterRequest) XXX_Size() int {
+ return xxx_messageInfo_UpdateClusterRequest.Size(m)
+}
+func (m *UpdateClusterRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateClusterRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateClusterRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *UpdateClusterRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *UpdateClusterRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *UpdateClusterRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *UpdateClusterRequest) GetUpdate() *ClusterUpdate {
+ if m != nil {
+ return m.Update
+ }
+ return nil
+}
+
+func (m *UpdateClusterRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// UpdateNodePoolRequests update a node pool's image and/or version.
+type UpdateNodePoolRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the node pool to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use.
+ // The Kubernetes version to change the nodes to (typically an
+ // upgrade).
+ //
+ // Users may specify either explicit versions offered by Kubernetes Engine or
+ // version aliases, which have the following behavior:
+ //
+ // - "latest": picks the highest valid Kubernetes version
+ // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version
+ // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version
+ // - "1.X.Y-gke.N": picks an explicit Kubernetes version
+ // - "-": picks the Kubernetes master version
+ NodeVersion string `protobuf:"bytes,5,opt,name=node_version,json=nodeVersion,proto3" json:"node_version,omitempty"`
+ // The desired image type for the node pool.
+ ImageType string `protobuf:"bytes,6,opt,name=image_type,json=imageType,proto3" json:"image_type,omitempty"`
+ // The name (project, location, cluster, node pool) of the node pool to
+ // update. Specified in the format
+ // 'projects/*/locations/*/clusters/*/nodePools/*'.
+ Name string `protobuf:"bytes,8,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdateNodePoolRequest) Reset() { *m = UpdateNodePoolRequest{} }
+func (m *UpdateNodePoolRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateNodePoolRequest) ProtoMessage() {}
+func (*UpdateNodePoolRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{19}
+}
+
+func (m *UpdateNodePoolRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdateNodePoolRequest.Unmarshal(m, b)
+}
+func (m *UpdateNodePoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdateNodePoolRequest.Marshal(b, m, deterministic)
+}
+func (m *UpdateNodePoolRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateNodePoolRequest.Merge(m, src)
+}
+func (m *UpdateNodePoolRequest) XXX_Size() int {
+ return xxx_messageInfo_UpdateNodePoolRequest.Size(m)
+}
+func (m *UpdateNodePoolRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateNodePoolRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateNodePoolRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *UpdateNodePoolRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *UpdateNodePoolRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *UpdateNodePoolRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *UpdateNodePoolRequest) GetNodePoolId() string {
+ if m != nil {
+ return m.NodePoolId
+ }
+ return ""
+}
+
+func (m *UpdateNodePoolRequest) GetNodeVersion() string {
+ if m != nil {
+ return m.NodeVersion
+ }
+ return ""
+}
+
+func (m *UpdateNodePoolRequest) GetImageType() string {
+ if m != nil {
+ return m.ImageType
+ }
+ return ""
+}
+
+func (m *UpdateNodePoolRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// SetNodePoolAutoscalingRequest sets the autoscaler settings of a node pool.
+type SetNodePoolAutoscalingRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the node pool to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use.
+ // Autoscaling configuration for the node pool.
+ Autoscaling *NodePoolAutoscaling `protobuf:"bytes,5,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"`
+ // The name (project, location, cluster, node pool) of the node pool to set
+ // autoscaler settings. Specified in the format
+ // 'projects/*/locations/*/clusters/*/nodePools/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetNodePoolAutoscalingRequest) Reset() { *m = SetNodePoolAutoscalingRequest{} }
+func (m *SetNodePoolAutoscalingRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNodePoolAutoscalingRequest) ProtoMessage() {}
+func (*SetNodePoolAutoscalingRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{20}
+}
+
+func (m *SetNodePoolAutoscalingRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetNodePoolAutoscalingRequest.Unmarshal(m, b)
+}
+func (m *SetNodePoolAutoscalingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetNodePoolAutoscalingRequest.Marshal(b, m, deterministic)
+}
+func (m *SetNodePoolAutoscalingRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetNodePoolAutoscalingRequest.Merge(m, src)
+}
+func (m *SetNodePoolAutoscalingRequest) XXX_Size() int {
+ return xxx_messageInfo_SetNodePoolAutoscalingRequest.Size(m)
+}
+func (m *SetNodePoolAutoscalingRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetNodePoolAutoscalingRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetNodePoolAutoscalingRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetNodePoolAutoscalingRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNodePoolAutoscalingRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNodePoolAutoscalingRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNodePoolAutoscalingRequest) GetNodePoolId() string {
+ if m != nil {
+ return m.NodePoolId
+ }
+ return ""
+}
+
+func (m *SetNodePoolAutoscalingRequest) GetAutoscaling() *NodePoolAutoscaling {
+ if m != nil {
+ return m.Autoscaling
+ }
+ return nil
+}
+
+func (m *SetNodePoolAutoscalingRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// SetLoggingServiceRequest sets the logging service of a cluster.
+type SetLoggingServiceRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"`
+ // Deprecated. The name of the cluster to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The logging service the cluster should use to write metrics.
+ // Currently available options:
+ //
+ // * "logging.googleapis.com" - the Google Cloud Logging service
+ // * "none" - no metrics will be exported from the cluster
+ LoggingService string `protobuf:"bytes,4,opt,name=logging_service,json=loggingService,proto3" json:"logging_service,omitempty"`
+ // The name (project, location, cluster) of the cluster to set logging.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetLoggingServiceRequest) Reset() { *m = SetLoggingServiceRequest{} }
+func (m *SetLoggingServiceRequest) String() string { return proto.CompactTextString(m) }
+func (*SetLoggingServiceRequest) ProtoMessage() {}
+func (*SetLoggingServiceRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{21}
+}
+
+func (m *SetLoggingServiceRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetLoggingServiceRequest.Unmarshal(m, b)
+}
+func (m *SetLoggingServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetLoggingServiceRequest.Marshal(b, m, deterministic)
+}
+func (m *SetLoggingServiceRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetLoggingServiceRequest.Merge(m, src)
+}
+func (m *SetLoggingServiceRequest) XXX_Size() int {
+ return xxx_messageInfo_SetLoggingServiceRequest.Size(m)
+}
+func (m *SetLoggingServiceRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetLoggingServiceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetLoggingServiceRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetLoggingServiceRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+func (m *SetLoggingServiceRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetLoggingServiceRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *SetLoggingServiceRequest) GetLoggingService() string {
+ if m != nil {
+ return m.LoggingService
+ }
+ return ""
+}
+
+func (m *SetLoggingServiceRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// SetMonitoringServiceRequest sets the monitoring service of a cluster.
+type SetMonitoringServiceRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The monitoring service the cluster should use to write metrics.
+ // Currently available options:
+ //
+ // * "monitoring.googleapis.com" - the Google Cloud Monitoring service
+ // * "none" - no metrics will be exported from the cluster
+ MonitoringService string `protobuf:"bytes,4,opt,name=monitoring_service,json=monitoringService,proto3" json:"monitoring_service,omitempty"`
+ // The name (project, location, cluster) of the cluster to set monitoring.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetMonitoringServiceRequest) Reset() { *m = SetMonitoringServiceRequest{} }
+func (m *SetMonitoringServiceRequest) String() string { return proto.CompactTextString(m) }
+func (*SetMonitoringServiceRequest) ProtoMessage() {}
+func (*SetMonitoringServiceRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{22}
+}
+
+func (m *SetMonitoringServiceRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetMonitoringServiceRequest.Unmarshal(m, b)
+}
+func (m *SetMonitoringServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetMonitoringServiceRequest.Marshal(b, m, deterministic)
+}
+func (m *SetMonitoringServiceRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetMonitoringServiceRequest.Merge(m, src)
+}
+func (m *SetMonitoringServiceRequest) XXX_Size() int {
+ return xxx_messageInfo_SetMonitoringServiceRequest.Size(m)
+}
+func (m *SetMonitoringServiceRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetMonitoringServiceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetMonitoringServiceRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetMonitoringServiceRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetMonitoringServiceRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetMonitoringServiceRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *SetMonitoringServiceRequest) GetMonitoringService() string {
+ if m != nil {
+ return m.MonitoringService
+ }
+ return ""
+}
+
+func (m *SetMonitoringServiceRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// SetAddonsConfigRequest sets the addons associated with the cluster.
+type SetAddonsConfigRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The desired configurations for the various addons available to run in the
+ // cluster.
+ AddonsConfig *AddonsConfig `protobuf:"bytes,4,opt,name=addons_config,json=addonsConfig,proto3" json:"addons_config,omitempty"`
+ // The name (project, location, cluster) of the cluster to set addons.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetAddonsConfigRequest) Reset() { *m = SetAddonsConfigRequest{} }
+func (m *SetAddonsConfigRequest) String() string { return proto.CompactTextString(m) }
+func (*SetAddonsConfigRequest) ProtoMessage() {}
+func (*SetAddonsConfigRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{23}
+}
+
+func (m *SetAddonsConfigRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetAddonsConfigRequest.Unmarshal(m, b)
+}
+func (m *SetAddonsConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetAddonsConfigRequest.Marshal(b, m, deterministic)
+}
+func (m *SetAddonsConfigRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetAddonsConfigRequest.Merge(m, src)
+}
+func (m *SetAddonsConfigRequest) XXX_Size() int {
+ return xxx_messageInfo_SetAddonsConfigRequest.Size(m)
+}
+func (m *SetAddonsConfigRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetAddonsConfigRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetAddonsConfigRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetAddonsConfigRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetAddonsConfigRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetAddonsConfigRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *SetAddonsConfigRequest) GetAddonsConfig() *AddonsConfig {
+ if m != nil {
+ return m.AddonsConfig
+ }
+ return nil
+}
+
+func (m *SetAddonsConfigRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// SetLocationsRequest sets the locations of the cluster.
+type SetLocationsRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The desired list of Google Compute Engine
+ // [locations](/compute/docs/zones#available) in which the cluster's nodes
+ // should be located. Changing the locations a cluster is in will result
+ // in nodes being either created or removed from the cluster, depending on
+ // whether locations are being added or removed.
+ //
+ // This list must always include the cluster's primary zone.
+ Locations []string `protobuf:"bytes,4,rep,name=locations,proto3" json:"locations,omitempty"`
+ // The name (project, location, cluster) of the cluster to set locations.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetLocationsRequest) Reset() { *m = SetLocationsRequest{} }
+func (m *SetLocationsRequest) String() string { return proto.CompactTextString(m) }
+func (*SetLocationsRequest) ProtoMessage() {}
+func (*SetLocationsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{24}
+}
+
+func (m *SetLocationsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetLocationsRequest.Unmarshal(m, b)
+}
+func (m *SetLocationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetLocationsRequest.Marshal(b, m, deterministic)
+}
+func (m *SetLocationsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetLocationsRequest.Merge(m, src)
+}
+func (m *SetLocationsRequest) XXX_Size() int {
+ return xxx_messageInfo_SetLocationsRequest.Size(m)
+}
+func (m *SetLocationsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetLocationsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetLocationsRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetLocationsRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetLocationsRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetLocationsRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *SetLocationsRequest) GetLocations() []string {
+ if m != nil {
+ return m.Locations
+ }
+ return nil
+}
+
+func (m *SetLocationsRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// UpdateMasterRequest updates the master of the cluster.
+type UpdateMasterRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The Kubernetes version to change the master to.
+ //
+ // Users may specify either explicit versions offered by Kubernetes Engine or
+ // version aliases, which have the following behavior:
+ //
+ // - "latest": picks the highest valid Kubernetes version
+ // - "1.X": picks the highest valid patch+gke.N patch in the 1.X version
+ // - "1.X.Y": picks the highest valid gke.N patch in the 1.X.Y version
+ // - "1.X.Y-gke.N": picks an explicit Kubernetes version
+ // - "-": picks the default Kubernetes version
+ MasterVersion string `protobuf:"bytes,4,opt,name=master_version,json=masterVersion,proto3" json:"master_version,omitempty"`
+ // The name (project, location, cluster) of the cluster to update.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdateMasterRequest) Reset() { *m = UpdateMasterRequest{} }
+func (m *UpdateMasterRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateMasterRequest) ProtoMessage() {}
+func (*UpdateMasterRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{25}
+}
+
+func (m *UpdateMasterRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdateMasterRequest.Unmarshal(m, b)
+}
+func (m *UpdateMasterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdateMasterRequest.Marshal(b, m, deterministic)
+}
+func (m *UpdateMasterRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateMasterRequest.Merge(m, src)
+}
+func (m *UpdateMasterRequest) XXX_Size() int {
+ return xxx_messageInfo_UpdateMasterRequest.Size(m)
+}
+func (m *UpdateMasterRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateMasterRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateMasterRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *UpdateMasterRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *UpdateMasterRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *UpdateMasterRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *UpdateMasterRequest) GetMasterVersion() string {
+ if m != nil {
+ return m.MasterVersion
+ }
+ return ""
+}
+
+func (m *UpdateMasterRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// SetMasterAuthRequest updates the admin password of a cluster.
+type SetMasterAuthRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to upgrade.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The exact form of action to be taken on the master auth.
+ Action SetMasterAuthRequest_Action `protobuf:"varint,4,opt,name=action,proto3,enum=google.container.v1.SetMasterAuthRequest_Action" json:"action,omitempty"`
+ // A description of the update.
+ Update *MasterAuth `protobuf:"bytes,5,opt,name=update,proto3" json:"update,omitempty"`
+ // The name (project, location, cluster) of the cluster to set auth.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetMasterAuthRequest) Reset() { *m = SetMasterAuthRequest{} }
+func (m *SetMasterAuthRequest) String() string { return proto.CompactTextString(m) }
+func (*SetMasterAuthRequest) ProtoMessage() {}
+func (*SetMasterAuthRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{26}
+}
+
+func (m *SetMasterAuthRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetMasterAuthRequest.Unmarshal(m, b)
+}
+func (m *SetMasterAuthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetMasterAuthRequest.Marshal(b, m, deterministic)
+}
+func (m *SetMasterAuthRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetMasterAuthRequest.Merge(m, src)
+}
+func (m *SetMasterAuthRequest) XXX_Size() int {
+ return xxx_messageInfo_SetMasterAuthRequest.Size(m)
+}
+func (m *SetMasterAuthRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetMasterAuthRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetMasterAuthRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetMasterAuthRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetMasterAuthRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetMasterAuthRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *SetMasterAuthRequest) GetAction() SetMasterAuthRequest_Action {
+ if m != nil {
+ return m.Action
+ }
+ return SetMasterAuthRequest_UNKNOWN
+}
+
+func (m *SetMasterAuthRequest) GetUpdate() *MasterAuth {
+ if m != nil {
+ return m.Update
+ }
+ return nil
+}
+
+func (m *SetMasterAuthRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// DeleteClusterRequest deletes a cluster.
+type DeleteClusterRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to delete.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The name (project, location, cluster) of the cluster to delete.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} }
+func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteClusterRequest) ProtoMessage() {}
+func (*DeleteClusterRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{27}
+}
+
+func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteClusterRequest.Unmarshal(m, b)
+}
+func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteClusterRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteClusterRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteClusterRequest.Merge(m, src)
+}
+func (m *DeleteClusterRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteClusterRequest.Size(m)
+}
+func (m *DeleteClusterRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteClusterRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteClusterRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *DeleteClusterRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *DeleteClusterRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *DeleteClusterRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *DeleteClusterRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// ListClustersRequest lists clusters.
+type ListClustersRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the parent field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides, or "-" for all zones.
+ // This field has been deprecated and replaced by the parent field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // The parent (project and location) where the clusters will be listed.
+ // Specified in the format 'projects/*/locations/*'.
+ // Location "-" matches all zones and all regions.
+ Parent string `protobuf:"bytes,4,opt,name=parent,proto3" json:"parent,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} }
+func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) }
+func (*ListClustersRequest) ProtoMessage() {}
+func (*ListClustersRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{28}
+}
+
+func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListClustersRequest.Unmarshal(m, b)
+}
+func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListClustersRequest.Marshal(b, m, deterministic)
+}
+func (m *ListClustersRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListClustersRequest.Merge(m, src)
+}
+func (m *ListClustersRequest) XXX_Size() int {
+ return xxx_messageInfo_ListClustersRequest.Size(m)
+}
+func (m *ListClustersRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListClustersRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListClustersRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *ListClustersRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *ListClustersRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+func (m *ListClustersRequest) GetParent() string {
+ if m != nil {
+ return m.Parent
+ }
+ return ""
+}
+
+// ListClustersResponse is the result of ListClustersRequest.
+type ListClustersResponse struct {
+ // A list of clusters in the project in the specified zone, or
+ // across all ones.
+ Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"`
+ // If any zones are listed here, the list of clusters returned
+ // may be missing those zones.
+ MissingZones []string `protobuf:"bytes,2,rep,name=missing_zones,json=missingZones,proto3" json:"missing_zones,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} }
+func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) }
+func (*ListClustersResponse) ProtoMessage() {}
+func (*ListClustersResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{29}
+}
+
+func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListClustersResponse.Unmarshal(m, b)
+}
+func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListClustersResponse.Marshal(b, m, deterministic)
+}
+func (m *ListClustersResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListClustersResponse.Merge(m, src)
+}
+func (m *ListClustersResponse) XXX_Size() int {
+ return xxx_messageInfo_ListClustersResponse.Size(m)
+}
+func (m *ListClustersResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListClustersResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListClustersResponse proto.InternalMessageInfo
+
+func (m *ListClustersResponse) GetClusters() []*Cluster {
+ if m != nil {
+ return m.Clusters
+ }
+ return nil
+}
+
+func (m *ListClustersResponse) GetMissingZones() []string {
+ if m != nil {
+ return m.MissingZones
+ }
+ return nil
+}
+
+// GetOperationRequest gets a single operation.
+type GetOperationRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The server-assigned `name` of the operation.
+ // This field has been deprecated and replaced by the name field.
+ OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` // Deprecated: Do not use.
+ // The name (project, location, operation id) of the operation to get.
+ // Specified in the format 'projects/*/locations/*/operations/*'.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetOperationRequest) Reset() { *m = GetOperationRequest{} }
+func (m *GetOperationRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOperationRequest) ProtoMessage() {}
+func (*GetOperationRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{30}
+}
+
+func (m *GetOperationRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetOperationRequest.Unmarshal(m, b)
+}
+func (m *GetOperationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetOperationRequest.Marshal(b, m, deterministic)
+}
+func (m *GetOperationRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetOperationRequest.Merge(m, src)
+}
+func (m *GetOperationRequest) XXX_Size() int {
+ return xxx_messageInfo_GetOperationRequest.Size(m)
+}
+func (m *GetOperationRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetOperationRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOperationRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *GetOperationRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *GetOperationRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *GetOperationRequest) GetOperationId() string {
+ if m != nil {
+ return m.OperationId
+ }
+ return ""
+}
+
+func (m *GetOperationRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// ListOperationsRequest lists operations.
+type ListOperationsRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the parent field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) to return operations for, or `-` for
+ // all zones. This field has been deprecated and replaced by the parent field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // The parent (project and location) where the operations will be listed.
+ // Specified in the format 'projects/*/locations/*'.
+ // Location "-" matches all zones and all regions.
+ Parent string `protobuf:"bytes,4,opt,name=parent,proto3" json:"parent,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListOperationsRequest) Reset() { *m = ListOperationsRequest{} }
+func (m *ListOperationsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListOperationsRequest) ProtoMessage() {}
+func (*ListOperationsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{31}
+}
+
+func (m *ListOperationsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListOperationsRequest.Unmarshal(m, b)
+}
+func (m *ListOperationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListOperationsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListOperationsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListOperationsRequest.Merge(m, src)
+}
+func (m *ListOperationsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListOperationsRequest.Size(m)
+}
+func (m *ListOperationsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListOperationsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListOperationsRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *ListOperationsRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *ListOperationsRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+func (m *ListOperationsRequest) GetParent() string {
+ if m != nil {
+ return m.Parent
+ }
+ return ""
+}
+
+// CancelOperationRequest cancels a single operation.
+type CancelOperationRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the operation resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The server-assigned `name` of the operation.
+ // This field has been deprecated and replaced by the name field.
+ OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` // Deprecated: Do not use.
+ // The name (project, location, operation id) of the operation to cancel.
+ // Specified in the format 'projects/*/locations/*/operations/*'.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CancelOperationRequest) Reset() { *m = CancelOperationRequest{} }
+func (m *CancelOperationRequest) String() string { return proto.CompactTextString(m) }
+func (*CancelOperationRequest) ProtoMessage() {}
+func (*CancelOperationRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{32}
+}
+
+func (m *CancelOperationRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CancelOperationRequest.Unmarshal(m, b)
+}
+func (m *CancelOperationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CancelOperationRequest.Marshal(b, m, deterministic)
+}
+func (m *CancelOperationRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CancelOperationRequest.Merge(m, src)
+}
+func (m *CancelOperationRequest) XXX_Size() int {
+ return xxx_messageInfo_CancelOperationRequest.Size(m)
+}
+func (m *CancelOperationRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CancelOperationRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CancelOperationRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *CancelOperationRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *CancelOperationRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *CancelOperationRequest) GetOperationId() string {
+ if m != nil {
+ return m.OperationId
+ }
+ return ""
+}
+
+func (m *CancelOperationRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// ListOperationsResponse is the result of ListOperationsRequest.
+type ListOperationsResponse struct {
+ // A list of operations in the project in the specified zone.
+ Operations []*Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
+ // If any zones are listed here, the list of operations returned
+ // may be missing the operations from those zones.
+ MissingZones []string `protobuf:"bytes,2,rep,name=missing_zones,json=missingZones,proto3" json:"missing_zones,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListOperationsResponse) Reset() { *m = ListOperationsResponse{} }
+func (m *ListOperationsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListOperationsResponse) ProtoMessage() {}
+func (*ListOperationsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{33}
+}
+
+func (m *ListOperationsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListOperationsResponse.Unmarshal(m, b)
+}
+func (m *ListOperationsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListOperationsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListOperationsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListOperationsResponse.Merge(m, src)
+}
+func (m *ListOperationsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListOperationsResponse.Size(m)
+}
+func (m *ListOperationsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListOperationsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListOperationsResponse proto.InternalMessageInfo
+
+func (m *ListOperationsResponse) GetOperations() []*Operation {
+ if m != nil {
+ return m.Operations
+ }
+ return nil
+}
+
+func (m *ListOperationsResponse) GetMissingZones() []string {
+ if m != nil {
+ return m.MissingZones
+ }
+ return nil
+}
+
+// Gets the current Kubernetes Engine service configuration.
+type GetServerConfigRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) to return operations for.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // The name (project and location) of the server config to get
+ // Specified in the format 'projects/*/locations/*'.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetServerConfigRequest) Reset() { *m = GetServerConfigRequest{} }
+func (m *GetServerConfigRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServerConfigRequest) ProtoMessage() {}
+func (*GetServerConfigRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{34}
+}
+
+func (m *GetServerConfigRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetServerConfigRequest.Unmarshal(m, b)
+}
+func (m *GetServerConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetServerConfigRequest.Marshal(b, m, deterministic)
+}
+func (m *GetServerConfigRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetServerConfigRequest.Merge(m, src)
+}
+func (m *GetServerConfigRequest) XXX_Size() int {
+ return xxx_messageInfo_GetServerConfigRequest.Size(m)
+}
+func (m *GetServerConfigRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetServerConfigRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetServerConfigRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *GetServerConfigRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *GetServerConfigRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+func (m *GetServerConfigRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// Kubernetes Engine service configuration.
+type ServerConfig struct {
+ // Version of Kubernetes the service deploys by default.
+ DefaultClusterVersion string `protobuf:"bytes,1,opt,name=default_cluster_version,json=defaultClusterVersion,proto3" json:"default_cluster_version,omitempty"`
+ // List of valid node upgrade target versions.
+ ValidNodeVersions []string `protobuf:"bytes,3,rep,name=valid_node_versions,json=validNodeVersions,proto3" json:"valid_node_versions,omitempty"`
+ // Default image type.
+ DefaultImageType string `protobuf:"bytes,4,opt,name=default_image_type,json=defaultImageType,proto3" json:"default_image_type,omitempty"`
+ // List of valid image types.
+ ValidImageTypes []string `protobuf:"bytes,5,rep,name=valid_image_types,json=validImageTypes,proto3" json:"valid_image_types,omitempty"`
+ // List of valid master versions.
+ ValidMasterVersions []string `protobuf:"bytes,6,rep,name=valid_master_versions,json=validMasterVersions,proto3" json:"valid_master_versions,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ServerConfig) Reset() { *m = ServerConfig{} }
+func (m *ServerConfig) String() string { return proto.CompactTextString(m) }
+func (*ServerConfig) ProtoMessage() {}
+func (*ServerConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{35}
+}
+
+func (m *ServerConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ServerConfig.Unmarshal(m, b)
+}
+func (m *ServerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ServerConfig.Marshal(b, m, deterministic)
+}
+func (m *ServerConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServerConfig.Merge(m, src)
+}
+func (m *ServerConfig) XXX_Size() int {
+ return xxx_messageInfo_ServerConfig.Size(m)
+}
+func (m *ServerConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServerConfig proto.InternalMessageInfo
+
+func (m *ServerConfig) GetDefaultClusterVersion() string {
+ if m != nil {
+ return m.DefaultClusterVersion
+ }
+ return ""
+}
+
+func (m *ServerConfig) GetValidNodeVersions() []string {
+ if m != nil {
+ return m.ValidNodeVersions
+ }
+ return nil
+}
+
+func (m *ServerConfig) GetDefaultImageType() string {
+ if m != nil {
+ return m.DefaultImageType
+ }
+ return ""
+}
+
+func (m *ServerConfig) GetValidImageTypes() []string {
+ if m != nil {
+ return m.ValidImageTypes
+ }
+ return nil
+}
+
+func (m *ServerConfig) GetValidMasterVersions() []string {
+ if m != nil {
+ return m.ValidMasterVersions
+ }
+ return nil
+}
+
+// CreateNodePoolRequest creates a node pool for a cluster.
+type CreateNodePoolRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ // This field has been deprecated and replaced by the parent field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the parent field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster.
+ // This field has been deprecated and replaced by the parent field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The node pool to create.
+ NodePool *NodePool `protobuf:"bytes,4,opt,name=node_pool,json=nodePool,proto3" json:"node_pool,omitempty"`
+ // The parent (project, location, cluster id) where the node pool will be
+ // created. Specified in the format
+ // 'projects/*/locations/*/clusters/*'.
+ Parent string `protobuf:"bytes,6,opt,name=parent,proto3" json:"parent,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateNodePoolRequest) Reset() { *m = CreateNodePoolRequest{} }
+func (m *CreateNodePoolRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateNodePoolRequest) ProtoMessage() {}
+func (*CreateNodePoolRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{36}
+}
+
+func (m *CreateNodePoolRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateNodePoolRequest.Unmarshal(m, b)
+}
+func (m *CreateNodePoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateNodePoolRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateNodePoolRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateNodePoolRequest.Merge(m, src)
+}
+func (m *CreateNodePoolRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateNodePoolRequest.Size(m)
+}
+func (m *CreateNodePoolRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateNodePoolRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateNodePoolRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *CreateNodePoolRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *CreateNodePoolRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *CreateNodePoolRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *CreateNodePoolRequest) GetNodePool() *NodePool {
+ if m != nil {
+ return m.NodePool
+ }
+ return nil
+}
+
+func (m *CreateNodePoolRequest) GetParent() string {
+ if m != nil {
+ return m.Parent
+ }
+ return ""
+}
+
+// DeleteNodePoolRequest deletes a node pool for a cluster.
+type DeleteNodePoolRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the node pool to delete.
+ // This field has been deprecated and replaced by the name field.
+ NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use.
+ // The name (project, location, cluster, node pool id) of the node pool to
+ // delete. Specified in the format
+ // 'projects/*/locations/*/clusters/*/nodePools/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteNodePoolRequest) Reset() { *m = DeleteNodePoolRequest{} }
+func (m *DeleteNodePoolRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteNodePoolRequest) ProtoMessage() {}
+func (*DeleteNodePoolRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{37}
+}
+
+func (m *DeleteNodePoolRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteNodePoolRequest.Unmarshal(m, b)
+}
+func (m *DeleteNodePoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteNodePoolRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteNodePoolRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteNodePoolRequest.Merge(m, src)
+}
+func (m *DeleteNodePoolRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteNodePoolRequest.Size(m)
+}
+func (m *DeleteNodePoolRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteNodePoolRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteNodePoolRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *DeleteNodePoolRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *DeleteNodePoolRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *DeleteNodePoolRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *DeleteNodePoolRequest) GetNodePoolId() string {
+ if m != nil {
+ return m.NodePoolId
+ }
+ return ""
+}
+
+func (m *DeleteNodePoolRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// ListNodePoolsRequest lists the node pool(s) for a cluster.
+type ListNodePoolsRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ // This field has been deprecated and replaced by the parent field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the parent field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster.
+ // This field has been deprecated and replaced by the parent field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The parent (project, location, cluster id) where the node pools will be
+ // listed. Specified in the format 'projects/*/locations/*/clusters/*'.
+ Parent string `protobuf:"bytes,5,opt,name=parent,proto3" json:"parent,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListNodePoolsRequest) Reset() { *m = ListNodePoolsRequest{} }
+func (m *ListNodePoolsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListNodePoolsRequest) ProtoMessage() {}
+func (*ListNodePoolsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{38}
+}
+
+func (m *ListNodePoolsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListNodePoolsRequest.Unmarshal(m, b)
+}
+func (m *ListNodePoolsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListNodePoolsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListNodePoolsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListNodePoolsRequest.Merge(m, src)
+}
+func (m *ListNodePoolsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListNodePoolsRequest.Size(m)
+}
+func (m *ListNodePoolsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListNodePoolsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListNodePoolsRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *ListNodePoolsRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *ListNodePoolsRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *ListNodePoolsRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *ListNodePoolsRequest) GetParent() string {
+ if m != nil {
+ return m.Parent
+ }
+ return ""
+}
+
+// GetNodePoolRequest retrieves a node pool for a cluster.
+type GetNodePoolRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the node pool.
+ // This field has been deprecated and replaced by the name field.
+ NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use.
+ // The name (project, location, cluster, node pool id) of the node pool to
+ // get. Specified in the format
+ // 'projects/*/locations/*/clusters/*/nodePools/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetNodePoolRequest) Reset() { *m = GetNodePoolRequest{} }
+func (m *GetNodePoolRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNodePoolRequest) ProtoMessage() {}
+func (*GetNodePoolRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{39}
+}
+
+func (m *GetNodePoolRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetNodePoolRequest.Unmarshal(m, b)
+}
+func (m *GetNodePoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetNodePoolRequest.Marshal(b, m, deterministic)
+}
+func (m *GetNodePoolRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetNodePoolRequest.Merge(m, src)
+}
+func (m *GetNodePoolRequest) XXX_Size() int {
+ return xxx_messageInfo_GetNodePoolRequest.Size(m)
+}
+func (m *GetNodePoolRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetNodePoolRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNodePoolRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *GetNodePoolRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *GetNodePoolRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *GetNodePoolRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *GetNodePoolRequest) GetNodePoolId() string {
+ if m != nil {
+ return m.NodePoolId
+ }
+ return ""
+}
+
+func (m *GetNodePoolRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// NodePool contains the name and configuration for a cluster's node pool.
+// Node pools are a set of nodes (i.e. VM's), with a common configuration and
+// specification, under the control of the cluster master. They may have a set
+// of Kubernetes labels applied to them, which may be used to reference them
+// during pod scheduling. They may also be resized up or down, to accommodate
+// the workload.
+type NodePool struct {
+ // The name of the node pool.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The node configuration of the pool.
+ Config *NodeConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
+ // The initial node count for the pool. You must ensure that your
+ // Compute Engine resource quota
+ // is sufficient for this number of instances. You must also have available
+ // firewall and routes quota.
+ InitialNodeCount int32 `protobuf:"varint,3,opt,name=initial_node_count,json=initialNodeCount,proto3" json:"initial_node_count,omitempty"`
+ // [Output only] Server-defined URL for the resource.
+ SelfLink string `protobuf:"bytes,100,opt,name=self_link,json=selfLink,proto3" json:"self_link,omitempty"`
+ // The version of the Kubernetes of this node.
+ Version string `protobuf:"bytes,101,opt,name=version,proto3" json:"version,omitempty"`
+ // [Output only] The resource URLs of the [managed instance
+ // groups](/compute/docs/instance-groups/creating-groups-of-managed-instances)
+ // associated with this node pool.
+ InstanceGroupUrls []string `protobuf:"bytes,102,rep,name=instance_group_urls,json=instanceGroupUrls,proto3" json:"instance_group_urls,omitempty"`
+ // [Output only] The status of the nodes in this pool instance.
+ Status NodePool_Status `protobuf:"varint,103,opt,name=status,proto3,enum=google.container.v1.NodePool_Status" json:"status,omitempty"`
+ // [Output only] Additional information about the current status of this
+ // node pool instance, if available.
+ StatusMessage string `protobuf:"bytes,104,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
+ // Autoscaler configuration for this NodePool. Autoscaler is enabled
+ // only if a valid configuration is present.
+ Autoscaling *NodePoolAutoscaling `protobuf:"bytes,4,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"`
+ // NodeManagement configuration for this NodePool.
+ Management *NodeManagement `protobuf:"bytes,5,opt,name=management,proto3" json:"management,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NodePool) Reset() { *m = NodePool{} }
+func (m *NodePool) String() string { return proto.CompactTextString(m) }
+func (*NodePool) ProtoMessage() {}
+func (*NodePool) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{40}
+}
+
+func (m *NodePool) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NodePool.Unmarshal(m, b)
+}
+func (m *NodePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NodePool.Marshal(b, m, deterministic)
+}
+func (m *NodePool) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NodePool.Merge(m, src)
+}
+func (m *NodePool) XXX_Size() int {
+ return xxx_messageInfo_NodePool.Size(m)
+}
+func (m *NodePool) XXX_DiscardUnknown() {
+ xxx_messageInfo_NodePool.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodePool proto.InternalMessageInfo
+
+func (m *NodePool) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *NodePool) GetConfig() *NodeConfig {
+ if m != nil {
+ return m.Config
+ }
+ return nil
+}
+
+func (m *NodePool) GetInitialNodeCount() int32 {
+ if m != nil {
+ return m.InitialNodeCount
+ }
+ return 0
+}
+
+func (m *NodePool) GetSelfLink() string {
+ if m != nil {
+ return m.SelfLink
+ }
+ return ""
+}
+
+func (m *NodePool) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *NodePool) GetInstanceGroupUrls() []string {
+ if m != nil {
+ return m.InstanceGroupUrls
+ }
+ return nil
+}
+
+func (m *NodePool) GetStatus() NodePool_Status {
+ if m != nil {
+ return m.Status
+ }
+ return NodePool_STATUS_UNSPECIFIED
+}
+
+func (m *NodePool) GetStatusMessage() string {
+ if m != nil {
+ return m.StatusMessage
+ }
+ return ""
+}
+
+func (m *NodePool) GetAutoscaling() *NodePoolAutoscaling {
+ if m != nil {
+ return m.Autoscaling
+ }
+ return nil
+}
+
+func (m *NodePool) GetManagement() *NodeManagement {
+ if m != nil {
+ return m.Management
+ }
+ return nil
+}
+
+// NodeManagement defines the set of node management services turned on for the
+// node pool.
+type NodeManagement struct {
+ // A flag that specifies whether node auto-upgrade is enabled for the node
+ // pool. If enabled, node auto-upgrade helps keep the nodes in your node pool
+ // up to date with the latest release version of Kubernetes.
+ AutoUpgrade bool `protobuf:"varint,1,opt,name=auto_upgrade,json=autoUpgrade,proto3" json:"auto_upgrade,omitempty"`
+ // A flag that specifies whether the node auto-repair is enabled for the node
+ // pool. If enabled, the nodes in this node pool will be monitored and, if
+ // they fail health checks too many times, an automatic repair action will be
+ // triggered.
+ AutoRepair bool `protobuf:"varint,2,opt,name=auto_repair,json=autoRepair,proto3" json:"auto_repair,omitempty"`
+ // Specifies the Auto Upgrade knobs for the node pool.
+ UpgradeOptions *AutoUpgradeOptions `protobuf:"bytes,10,opt,name=upgrade_options,json=upgradeOptions,proto3" json:"upgrade_options,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NodeManagement) Reset() { *m = NodeManagement{} }
+func (m *NodeManagement) String() string { return proto.CompactTextString(m) }
+func (*NodeManagement) ProtoMessage() {}
+func (*NodeManagement) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{41}
+}
+
+func (m *NodeManagement) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NodeManagement.Unmarshal(m, b)
+}
+func (m *NodeManagement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NodeManagement.Marshal(b, m, deterministic)
+}
+func (m *NodeManagement) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NodeManagement.Merge(m, src)
+}
+func (m *NodeManagement) XXX_Size() int {
+ return xxx_messageInfo_NodeManagement.Size(m)
+}
+func (m *NodeManagement) XXX_DiscardUnknown() {
+ xxx_messageInfo_NodeManagement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeManagement proto.InternalMessageInfo
+
+func (m *NodeManagement) GetAutoUpgrade() bool {
+ if m != nil {
+ return m.AutoUpgrade
+ }
+ return false
+}
+
+func (m *NodeManagement) GetAutoRepair() bool {
+ if m != nil {
+ return m.AutoRepair
+ }
+ return false
+}
+
+func (m *NodeManagement) GetUpgradeOptions() *AutoUpgradeOptions {
+ if m != nil {
+ return m.UpgradeOptions
+ }
+ return nil
+}
+
+// AutoUpgradeOptions defines the set of options for the user to control how
+// the Auto Upgrades will proceed.
+type AutoUpgradeOptions struct {
+ // [Output only] This field is set when upgrades are about to commence
+ // with the approximate start time for the upgrades, in
+ // [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
+ AutoUpgradeStartTime string `protobuf:"bytes,1,opt,name=auto_upgrade_start_time,json=autoUpgradeStartTime,proto3" json:"auto_upgrade_start_time,omitempty"`
+ // [Output only] This field is set when upgrades are about to commence
+ // with the description of the upgrade.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AutoUpgradeOptions) Reset() { *m = AutoUpgradeOptions{} }
+func (m *AutoUpgradeOptions) String() string { return proto.CompactTextString(m) }
+func (*AutoUpgradeOptions) ProtoMessage() {}
+func (*AutoUpgradeOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{42}
+}
+
+func (m *AutoUpgradeOptions) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AutoUpgradeOptions.Unmarshal(m, b)
+}
+func (m *AutoUpgradeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AutoUpgradeOptions.Marshal(b, m, deterministic)
+}
+func (m *AutoUpgradeOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AutoUpgradeOptions.Merge(m, src)
+}
+func (m *AutoUpgradeOptions) XXX_Size() int {
+ return xxx_messageInfo_AutoUpgradeOptions.Size(m)
+}
+func (m *AutoUpgradeOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_AutoUpgradeOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AutoUpgradeOptions proto.InternalMessageInfo
+
+func (m *AutoUpgradeOptions) GetAutoUpgradeStartTime() string {
+ if m != nil {
+ return m.AutoUpgradeStartTime
+ }
+ return ""
+}
+
+func (m *AutoUpgradeOptions) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+// MaintenancePolicy defines the maintenance policy to be used for the cluster.
+type MaintenancePolicy struct {
+ // Specifies the maintenance window in which maintenance may be performed.
+ Window *MaintenanceWindow `protobuf:"bytes,1,opt,name=window,proto3" json:"window,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MaintenancePolicy) Reset() { *m = MaintenancePolicy{} }
+func (m *MaintenancePolicy) String() string { return proto.CompactTextString(m) }
+func (*MaintenancePolicy) ProtoMessage() {}
+func (*MaintenancePolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{43}
+}
+
+func (m *MaintenancePolicy) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MaintenancePolicy.Unmarshal(m, b)
+}
+func (m *MaintenancePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MaintenancePolicy.Marshal(b, m, deterministic)
+}
+func (m *MaintenancePolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MaintenancePolicy.Merge(m, src)
+}
+func (m *MaintenancePolicy) XXX_Size() int {
+ return xxx_messageInfo_MaintenancePolicy.Size(m)
+}
+func (m *MaintenancePolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_MaintenancePolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MaintenancePolicy proto.InternalMessageInfo
+
+func (m *MaintenancePolicy) GetWindow() *MaintenanceWindow {
+ if m != nil {
+ return m.Window
+ }
+ return nil
+}
+
+// MaintenanceWindow defines the maintenance window to be used for the cluster.
+type MaintenanceWindow struct {
+ // Types that are valid to be assigned to Policy:
+ // *MaintenanceWindow_DailyMaintenanceWindow
+ Policy isMaintenanceWindow_Policy `protobuf_oneof:"policy"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MaintenanceWindow) Reset() { *m = MaintenanceWindow{} }
+func (m *MaintenanceWindow) String() string { return proto.CompactTextString(m) }
+func (*MaintenanceWindow) ProtoMessage() {}
+func (*MaintenanceWindow) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{44}
+}
+
+func (m *MaintenanceWindow) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MaintenanceWindow.Unmarshal(m, b)
+}
+func (m *MaintenanceWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MaintenanceWindow.Marshal(b, m, deterministic)
+}
+func (m *MaintenanceWindow) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MaintenanceWindow.Merge(m, src)
+}
+func (m *MaintenanceWindow) XXX_Size() int {
+ return xxx_messageInfo_MaintenanceWindow.Size(m)
+}
+func (m *MaintenanceWindow) XXX_DiscardUnknown() {
+ xxx_messageInfo_MaintenanceWindow.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MaintenanceWindow proto.InternalMessageInfo
+
+type isMaintenanceWindow_Policy interface {
+ isMaintenanceWindow_Policy()
+}
+
+type MaintenanceWindow_DailyMaintenanceWindow struct {
+ DailyMaintenanceWindow *DailyMaintenanceWindow `protobuf:"bytes,2,opt,name=daily_maintenance_window,json=dailyMaintenanceWindow,proto3,oneof"`
+}
+
+func (*MaintenanceWindow_DailyMaintenanceWindow) isMaintenanceWindow_Policy() {}
+
+func (m *MaintenanceWindow) GetPolicy() isMaintenanceWindow_Policy {
+ if m != nil {
+ return m.Policy
+ }
+ return nil
+}
+
+func (m *MaintenanceWindow) GetDailyMaintenanceWindow() *DailyMaintenanceWindow {
+ if x, ok := m.GetPolicy().(*MaintenanceWindow_DailyMaintenanceWindow); ok {
+ return x.DailyMaintenanceWindow
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*MaintenanceWindow) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _MaintenanceWindow_OneofMarshaler, _MaintenanceWindow_OneofUnmarshaler, _MaintenanceWindow_OneofSizer, []interface{}{
+ (*MaintenanceWindow_DailyMaintenanceWindow)(nil),
+ }
+}
+
+func _MaintenanceWindow_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*MaintenanceWindow)
+ // policy
+ switch x := m.Policy.(type) {
+ case *MaintenanceWindow_DailyMaintenanceWindow:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.DailyMaintenanceWindow); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("MaintenanceWindow.Policy has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _MaintenanceWindow_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*MaintenanceWindow)
+ switch tag {
+ case 2: // policy.daily_maintenance_window
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(DailyMaintenanceWindow)
+ err := b.DecodeMessage(msg)
+ m.Policy = &MaintenanceWindow_DailyMaintenanceWindow{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _MaintenanceWindow_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*MaintenanceWindow)
+ // policy
+ switch x := m.Policy.(type) {
+ case *MaintenanceWindow_DailyMaintenanceWindow:
+ s := proto.Size(x.DailyMaintenanceWindow)
+ n += 1 // tag and wire
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// Time window specified for daily maintenance operations.
+type DailyMaintenanceWindow struct {
+ // Time within the maintenance window to start the maintenance operations.
+ // Time format should be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)
+ // format "HH:MM”, where HH : [00-23] and MM : [00-59] GMT.
+ StartTime string `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // [Output only] Duration of the time window, automatically chosen to be
+ // smallest possible in the given scenario.
+ // Duration will be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt)
+ // format "PTnHnMnS".
+ Duration string `protobuf:"bytes,3,opt,name=duration,proto3" json:"duration,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DailyMaintenanceWindow) Reset() { *m = DailyMaintenanceWindow{} }
+func (m *DailyMaintenanceWindow) String() string { return proto.CompactTextString(m) }
+func (*DailyMaintenanceWindow) ProtoMessage() {}
+func (*DailyMaintenanceWindow) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{45}
+}
+
+func (m *DailyMaintenanceWindow) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DailyMaintenanceWindow.Unmarshal(m, b)
+}
+func (m *DailyMaintenanceWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DailyMaintenanceWindow.Marshal(b, m, deterministic)
+}
+func (m *DailyMaintenanceWindow) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DailyMaintenanceWindow.Merge(m, src)
+}
+func (m *DailyMaintenanceWindow) XXX_Size() int {
+ return xxx_messageInfo_DailyMaintenanceWindow.Size(m)
+}
+func (m *DailyMaintenanceWindow) XXX_DiscardUnknown() {
+ xxx_messageInfo_DailyMaintenanceWindow.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DailyMaintenanceWindow proto.InternalMessageInfo
+
+func (m *DailyMaintenanceWindow) GetStartTime() string {
+ if m != nil {
+ return m.StartTime
+ }
+ return ""
+}
+
+func (m *DailyMaintenanceWindow) GetDuration() string {
+ if m != nil {
+ return m.Duration
+ }
+ return ""
+}
+
+// SetNodePoolManagementRequest sets the node management properties of a node
+// pool.
+type SetNodePoolManagementRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to update.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the node pool to update.
+ // This field has been deprecated and replaced by the name field.
+ NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use.
+ // NodeManagement configuration for the node pool.
+ Management *NodeManagement `protobuf:"bytes,5,opt,name=management,proto3" json:"management,omitempty"`
+ // The name (project, location, cluster, node pool id) of the node pool to set
+ // management properties. Specified in the format
+ // 'projects/*/locations/*/clusters/*/nodePools/*'.
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetNodePoolManagementRequest) Reset() { *m = SetNodePoolManagementRequest{} }
+func (m *SetNodePoolManagementRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNodePoolManagementRequest) ProtoMessage() {}
+func (*SetNodePoolManagementRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{46}
+}
+
+func (m *SetNodePoolManagementRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetNodePoolManagementRequest.Unmarshal(m, b)
+}
+func (m *SetNodePoolManagementRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetNodePoolManagementRequest.Marshal(b, m, deterministic)
+}
+func (m *SetNodePoolManagementRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetNodePoolManagementRequest.Merge(m, src)
+}
+func (m *SetNodePoolManagementRequest) XXX_Size() int {
+ return xxx_messageInfo_SetNodePoolManagementRequest.Size(m)
+}
+func (m *SetNodePoolManagementRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetNodePoolManagementRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetNodePoolManagementRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetNodePoolManagementRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNodePoolManagementRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNodePoolManagementRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNodePoolManagementRequest) GetNodePoolId() string {
+ if m != nil {
+ return m.NodePoolId
+ }
+ return ""
+}
+
+func (m *SetNodePoolManagementRequest) GetManagement() *NodeManagement {
+ if m != nil {
+ return m.Management
+ }
+ return nil
+}
+
+func (m *SetNodePoolManagementRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// SetNodePoolSizeRequest sets the size a node
+// pool.
+type SetNodePoolSizeRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to update.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the node pool to update.
+ // This field has been deprecated and replaced by the name field.
+ NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use.
+ // The desired node count for the pool.
+ NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount,proto3" json:"node_count,omitempty"`
+ // The name (project, location, cluster, node pool id) of the node pool to set
+ // size.
+ // Specified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetNodePoolSizeRequest) Reset() { *m = SetNodePoolSizeRequest{} }
+func (m *SetNodePoolSizeRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNodePoolSizeRequest) ProtoMessage() {}
+func (*SetNodePoolSizeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{47}
+}
+
+func (m *SetNodePoolSizeRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetNodePoolSizeRequest.Unmarshal(m, b)
+}
+func (m *SetNodePoolSizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetNodePoolSizeRequest.Marshal(b, m, deterministic)
+}
+func (m *SetNodePoolSizeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetNodePoolSizeRequest.Merge(m, src)
+}
+func (m *SetNodePoolSizeRequest) XXX_Size() int {
+ return xxx_messageInfo_SetNodePoolSizeRequest.Size(m)
+}
+func (m *SetNodePoolSizeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetNodePoolSizeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetNodePoolSizeRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetNodePoolSizeRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNodePoolSizeRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNodePoolSizeRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNodePoolSizeRequest) GetNodePoolId() string {
+ if m != nil {
+ return m.NodePoolId
+ }
+ return ""
+}
+
+func (m *SetNodePoolSizeRequest) GetNodeCount() int32 {
+ if m != nil {
+ return m.NodeCount
+ }
+ return 0
+}
+
+func (m *SetNodePoolSizeRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// RollbackNodePoolUpgradeRequest rollbacks the previously Aborted or Failed
+// NodePool upgrade. This will be an no-op if the last upgrade successfully
+// completed.
+type RollbackNodePoolUpgradeRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to rollback.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the node pool to rollback.
+ // This field has been deprecated and replaced by the name field.
+ NodePoolId string `protobuf:"bytes,4,opt,name=node_pool_id,json=nodePoolId,proto3" json:"node_pool_id,omitempty"` // Deprecated: Do not use.
+ // The name (project, location, cluster, node pool id) of the node poll to
+ // rollback upgrade.
+ // Specified in the format 'projects/*/locations/*/clusters/*/nodePools/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RollbackNodePoolUpgradeRequest) Reset() { *m = RollbackNodePoolUpgradeRequest{} }
+func (m *RollbackNodePoolUpgradeRequest) String() string { return proto.CompactTextString(m) }
+func (*RollbackNodePoolUpgradeRequest) ProtoMessage() {}
+func (*RollbackNodePoolUpgradeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{48}
+}
+
+func (m *RollbackNodePoolUpgradeRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RollbackNodePoolUpgradeRequest.Unmarshal(m, b)
+}
+func (m *RollbackNodePoolUpgradeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RollbackNodePoolUpgradeRequest.Marshal(b, m, deterministic)
+}
+func (m *RollbackNodePoolUpgradeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RollbackNodePoolUpgradeRequest.Merge(m, src)
+}
+func (m *RollbackNodePoolUpgradeRequest) XXX_Size() int {
+ return xxx_messageInfo_RollbackNodePoolUpgradeRequest.Size(m)
+}
+func (m *RollbackNodePoolUpgradeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_RollbackNodePoolUpgradeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RollbackNodePoolUpgradeRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *RollbackNodePoolUpgradeRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *RollbackNodePoolUpgradeRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *RollbackNodePoolUpgradeRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *RollbackNodePoolUpgradeRequest) GetNodePoolId() string {
+ if m != nil {
+ return m.NodePoolId
+ }
+ return ""
+}
+
+func (m *RollbackNodePoolUpgradeRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// ListNodePoolsResponse is the result of ListNodePoolsRequest.
+type ListNodePoolsResponse struct {
+ // A list of node pools for a cluster.
+ NodePools []*NodePool `protobuf:"bytes,1,rep,name=node_pools,json=nodePools,proto3" json:"node_pools,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListNodePoolsResponse) Reset() { *m = ListNodePoolsResponse{} }
+func (m *ListNodePoolsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListNodePoolsResponse) ProtoMessage() {}
+func (*ListNodePoolsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{49}
+}
+
+func (m *ListNodePoolsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListNodePoolsResponse.Unmarshal(m, b)
+}
+func (m *ListNodePoolsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListNodePoolsResponse.Marshal(b, m, deterministic)
+}
+func (m *ListNodePoolsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListNodePoolsResponse.Merge(m, src)
+}
+func (m *ListNodePoolsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListNodePoolsResponse.Size(m)
+}
+func (m *ListNodePoolsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListNodePoolsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListNodePoolsResponse proto.InternalMessageInfo
+
+func (m *ListNodePoolsResponse) GetNodePools() []*NodePool {
+ if m != nil {
+ return m.NodePools
+ }
+ return nil
+}
+
+// NodePoolAutoscaling contains information required by cluster autoscaler to
+// adjust the size of the node pool to the current cluster usage.
+type NodePoolAutoscaling struct {
+ // Is autoscaling enabled for this node pool.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Minimum number of nodes in the NodePool. Must be >= 1 and <=
+ // max_node_count.
+ MinNodeCount int32 `protobuf:"varint,2,opt,name=min_node_count,json=minNodeCount,proto3" json:"min_node_count,omitempty"`
+ // Maximum number of nodes in the NodePool. Must be >= min_node_count. There
+ // has to enough quota to scale up the cluster.
+ MaxNodeCount int32 `protobuf:"varint,3,opt,name=max_node_count,json=maxNodeCount,proto3" json:"max_node_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NodePoolAutoscaling) Reset() { *m = NodePoolAutoscaling{} }
+func (m *NodePoolAutoscaling) String() string { return proto.CompactTextString(m) }
+func (*NodePoolAutoscaling) ProtoMessage() {}
+func (*NodePoolAutoscaling) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{50}
+}
+
+func (m *NodePoolAutoscaling) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NodePoolAutoscaling.Unmarshal(m, b)
+}
+func (m *NodePoolAutoscaling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NodePoolAutoscaling.Marshal(b, m, deterministic)
+}
+func (m *NodePoolAutoscaling) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NodePoolAutoscaling.Merge(m, src)
+}
+func (m *NodePoolAutoscaling) XXX_Size() int {
+ return xxx_messageInfo_NodePoolAutoscaling.Size(m)
+}
+func (m *NodePoolAutoscaling) XXX_DiscardUnknown() {
+ xxx_messageInfo_NodePoolAutoscaling.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodePoolAutoscaling proto.InternalMessageInfo
+
+func (m *NodePoolAutoscaling) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+func (m *NodePoolAutoscaling) GetMinNodeCount() int32 {
+ if m != nil {
+ return m.MinNodeCount
+ }
+ return 0
+}
+
+func (m *NodePoolAutoscaling) GetMaxNodeCount() int32 {
+ if m != nil {
+ return m.MaxNodeCount
+ }
+ return 0
+}
+
+// SetLabelsRequest sets the Google Cloud Platform labels on a Google Container
+// Engine cluster, which will in turn set them for Google Compute Engine
+// resources used by that cluster
+type SetLabelsRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The labels to set for that cluster.
+ ResourceLabels map[string]string `protobuf:"bytes,4,rep,name=resource_labels,json=resourceLabels,proto3" json:"resource_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The fingerprint of the previous set of labels for this resource,
+ // used to detect conflicts. The fingerprint is initially generated by
+ // Kubernetes Engine and changes after every request to modify or update
+ // labels. You must always provide an up-to-date fingerprint hash when
+ // updating or changing labels. Make a get() request to the
+ // resource to get the latest fingerprint.
+ LabelFingerprint string `protobuf:"bytes,5,opt,name=label_fingerprint,json=labelFingerprint,proto3" json:"label_fingerprint,omitempty"`
+ // The name (project, location, cluster id) of the cluster to set labels.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetLabelsRequest) Reset() { *m = SetLabelsRequest{} }
+func (m *SetLabelsRequest) String() string { return proto.CompactTextString(m) }
+func (*SetLabelsRequest) ProtoMessage() {}
+func (*SetLabelsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{51}
+}
+
+func (m *SetLabelsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetLabelsRequest.Unmarshal(m, b)
+}
+func (m *SetLabelsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetLabelsRequest.Marshal(b, m, deterministic)
+}
+func (m *SetLabelsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetLabelsRequest.Merge(m, src)
+}
+func (m *SetLabelsRequest) XXX_Size() int {
+ return xxx_messageInfo_SetLabelsRequest.Size(m)
+}
+func (m *SetLabelsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetLabelsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetLabelsRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetLabelsRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetLabelsRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetLabelsRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *SetLabelsRequest) GetResourceLabels() map[string]string {
+ if m != nil {
+ return m.ResourceLabels
+ }
+ return nil
+}
+
+func (m *SetLabelsRequest) GetLabelFingerprint() string {
+ if m != nil {
+ return m.LabelFingerprint
+ }
+ return ""
+}
+
+func (m *SetLabelsRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// SetLegacyAbacRequest enables or disables the ABAC authorization mechanism for
+// a cluster.
+type SetLegacyAbacRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster to update.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // Whether ABAC authorization will be enabled in the cluster.
+ Enabled bool `protobuf:"varint,4,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // The name (project, location, cluster id) of the cluster to set legacy abac.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetLegacyAbacRequest) Reset() { *m = SetLegacyAbacRequest{} }
+func (m *SetLegacyAbacRequest) String() string { return proto.CompactTextString(m) }
+func (*SetLegacyAbacRequest) ProtoMessage() {}
+func (*SetLegacyAbacRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{52}
+}
+
+func (m *SetLegacyAbacRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetLegacyAbacRequest.Unmarshal(m, b)
+}
+func (m *SetLegacyAbacRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetLegacyAbacRequest.Marshal(b, m, deterministic)
+}
+func (m *SetLegacyAbacRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetLegacyAbacRequest.Merge(m, src)
+}
+func (m *SetLegacyAbacRequest) XXX_Size() int {
+ return xxx_messageInfo_SetLegacyAbacRequest.Size(m)
+}
+func (m *SetLegacyAbacRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetLegacyAbacRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetLegacyAbacRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetLegacyAbacRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetLegacyAbacRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetLegacyAbacRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *SetLegacyAbacRequest) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+func (m *SetLegacyAbacRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// StartIPRotationRequest creates a new IP for the cluster and then performs
+// a node upgrade on each node pool to point to the new IP.
+type StartIPRotationRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The name (project, location, cluster id) of the cluster to start IP
+ // rotation. Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ // Whether to rotate credentials during IP rotation.
+ RotateCredentials bool `protobuf:"varint,7,opt,name=rotate_credentials,json=rotateCredentials,proto3" json:"rotate_credentials,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StartIPRotationRequest) Reset() { *m = StartIPRotationRequest{} }
+func (m *StartIPRotationRequest) String() string { return proto.CompactTextString(m) }
+func (*StartIPRotationRequest) ProtoMessage() {}
+func (*StartIPRotationRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{53}
+}
+
+func (m *StartIPRotationRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StartIPRotationRequest.Unmarshal(m, b)
+}
+func (m *StartIPRotationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StartIPRotationRequest.Marshal(b, m, deterministic)
+}
+func (m *StartIPRotationRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StartIPRotationRequest.Merge(m, src)
+}
+func (m *StartIPRotationRequest) XXX_Size() int {
+ return xxx_messageInfo_StartIPRotationRequest.Size(m)
+}
+func (m *StartIPRotationRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_StartIPRotationRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StartIPRotationRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *StartIPRotationRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *StartIPRotationRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *StartIPRotationRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *StartIPRotationRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *StartIPRotationRequest) GetRotateCredentials() bool {
+ if m != nil {
+ return m.RotateCredentials
+ }
+ return false
+}
+
+// CompleteIPRotationRequest moves the cluster master back into single-IP mode.
+type CompleteIPRotationRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // The name (project, location, cluster id) of the cluster to complete IP
+ // rotation. Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompleteIPRotationRequest) Reset() { *m = CompleteIPRotationRequest{} }
+func (m *CompleteIPRotationRequest) String() string { return proto.CompactTextString(m) }
+func (*CompleteIPRotationRequest) ProtoMessage() {}
+func (*CompleteIPRotationRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{54}
+}
+
+func (m *CompleteIPRotationRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CompleteIPRotationRequest.Unmarshal(m, b)
+}
+func (m *CompleteIPRotationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CompleteIPRotationRequest.Marshal(b, m, deterministic)
+}
+func (m *CompleteIPRotationRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompleteIPRotationRequest.Merge(m, src)
+}
+func (m *CompleteIPRotationRequest) XXX_Size() int {
+ return xxx_messageInfo_CompleteIPRotationRequest.Size(m)
+}
+func (m *CompleteIPRotationRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompleteIPRotationRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompleteIPRotationRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *CompleteIPRotationRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *CompleteIPRotationRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *CompleteIPRotationRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *CompleteIPRotationRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// AcceleratorConfig represents a Hardware Accelerator request.
+type AcceleratorConfig struct {
+ // The number of the accelerator cards exposed to an instance.
+ AcceleratorCount int64 `protobuf:"varint,1,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
+ // The accelerator type resource name. List of supported accelerators
+ // [here](/compute/docs/gpus/#Introduction)
+ AcceleratorType string `protobuf:"bytes,2,opt,name=accelerator_type,json=acceleratorType,proto3" json:"accelerator_type,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AcceleratorConfig) Reset() { *m = AcceleratorConfig{} }
+func (m *AcceleratorConfig) String() string { return proto.CompactTextString(m) }
+func (*AcceleratorConfig) ProtoMessage() {}
+func (*AcceleratorConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{55}
+}
+
+func (m *AcceleratorConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AcceleratorConfig.Unmarshal(m, b)
+}
+func (m *AcceleratorConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AcceleratorConfig.Marshal(b, m, deterministic)
+}
+func (m *AcceleratorConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AcceleratorConfig.Merge(m, src)
+}
+func (m *AcceleratorConfig) XXX_Size() int {
+ return xxx_messageInfo_AcceleratorConfig.Size(m)
+}
+func (m *AcceleratorConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_AcceleratorConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AcceleratorConfig proto.InternalMessageInfo
+
+func (m *AcceleratorConfig) GetAcceleratorCount() int64 {
+ if m != nil {
+ return m.AcceleratorCount
+ }
+ return 0
+}
+
+func (m *AcceleratorConfig) GetAcceleratorType() string {
+ if m != nil {
+ return m.AcceleratorType
+ }
+ return ""
+}
+
+// SetNetworkPolicyRequest enables/disables network policy for a cluster.
+type SetNetworkPolicyRequest struct {
+ // Deprecated. The Google Developers Console [project ID or project
+ // number](https://developers.google.com/console/help/new/#projectnumber).
+ // This field has been deprecated and replaced by the name field.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ // This field has been deprecated and replaced by the name field.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` // Deprecated: Do not use.
+ // Deprecated. The name of the cluster.
+ // This field has been deprecated and replaced by the name field.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` // Deprecated: Do not use.
+ // Configuration options for the NetworkPolicy feature.
+ NetworkPolicy *NetworkPolicy `protobuf:"bytes,4,opt,name=network_policy,json=networkPolicy,proto3" json:"network_policy,omitempty"`
+ // The name (project, location, cluster id) of the cluster to set networking
+ // policy. Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetNetworkPolicyRequest) Reset() { *m = SetNetworkPolicyRequest{} }
+func (m *SetNetworkPolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNetworkPolicyRequest) ProtoMessage() {}
+func (*SetNetworkPolicyRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{56}
+}
+
+func (m *SetNetworkPolicyRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetNetworkPolicyRequest.Unmarshal(m, b)
+}
+func (m *SetNetworkPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetNetworkPolicyRequest.Marshal(b, m, deterministic)
+}
+func (m *SetNetworkPolicyRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetNetworkPolicyRequest.Merge(m, src)
+}
+func (m *SetNetworkPolicyRequest) XXX_Size() int {
+ return xxx_messageInfo_SetNetworkPolicyRequest.Size(m)
+}
+func (m *SetNetworkPolicyRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetNetworkPolicyRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetNetworkPolicyRequest proto.InternalMessageInfo
+
+// Deprecated: Do not use.
+func (m *SetNetworkPolicyRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNetworkPolicyRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (m *SetNetworkPolicyRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *SetNetworkPolicyRequest) GetNetworkPolicy() *NetworkPolicy {
+ if m != nil {
+ return m.NetworkPolicy
+ }
+ return nil
+}
+
+func (m *SetNetworkPolicyRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// SetMaintenancePolicyRequest sets the maintenance policy for a cluster.
+type SetMaintenancePolicyRequest struct {
+ // The Google Developers Console [project ID or project
+ // number](https://support.google.com/cloud/answer/6158840).
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
+ // The name of the Google Compute Engine
+ // [zone](/compute/docs/zones#available) in which the cluster
+ // resides.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"`
+ // The name of the cluster to update.
+ ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ // The maintenance policy to be set for the cluster. An empty field
+ // clears the existing maintenance policy.
+ MaintenancePolicy *MaintenancePolicy `protobuf:"bytes,4,opt,name=maintenance_policy,json=maintenancePolicy,proto3" json:"maintenance_policy,omitempty"`
+ // The name (project, location, cluster id) of the cluster to set maintenance
+ // policy.
+ // Specified in the format 'projects/*/locations/*/clusters/*'.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetMaintenancePolicyRequest) Reset() { *m = SetMaintenancePolicyRequest{} }
+func (m *SetMaintenancePolicyRequest) String() string { return proto.CompactTextString(m) }
+func (*SetMaintenancePolicyRequest) ProtoMessage() {}
+func (*SetMaintenancePolicyRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{57}
+}
+
+func (m *SetMaintenancePolicyRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetMaintenancePolicyRequest.Unmarshal(m, b)
+}
+func (m *SetMaintenancePolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetMaintenancePolicyRequest.Marshal(b, m, deterministic)
+}
+func (m *SetMaintenancePolicyRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetMaintenancePolicyRequest.Merge(m, src)
+}
+func (m *SetMaintenancePolicyRequest) XXX_Size() int {
+ return xxx_messageInfo_SetMaintenancePolicyRequest.Size(m)
+}
+func (m *SetMaintenancePolicyRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetMaintenancePolicyRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetMaintenancePolicyRequest proto.InternalMessageInfo
+
+func (m *SetMaintenancePolicyRequest) GetProjectId() string {
+ if m != nil {
+ return m.ProjectId
+ }
+ return ""
+}
+
+func (m *SetMaintenancePolicyRequest) GetZone() string {
+ if m != nil {
+ return m.Zone
+ }
+ return ""
+}
+
+func (m *SetMaintenancePolicyRequest) GetClusterId() string {
+ if m != nil {
+ return m.ClusterId
+ }
+ return ""
+}
+
+func (m *SetMaintenancePolicyRequest) GetMaintenancePolicy() *MaintenancePolicy {
+ if m != nil {
+ return m.MaintenancePolicy
+ }
+ return nil
+}
+
+func (m *SetMaintenancePolicyRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// NetworkConfig reports the relative names of network & subnetwork.
+type NetworkConfig struct {
+ // Output only. The relative name of the Google Compute Engine
+ // [network][google.container.v1.NetworkConfig.network](/compute/docs/networks-and-firewalls#networks) to which
+ // the cluster is connected.
+ // Example: projects/my-project/global/networks/my-network
+ Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"`
+ // Output only. The relative name of the Google Compute Engine
+ // [subnetwork](/compute/docs/vpc) to which the cluster is connected.
+ // Example: projects/my-project/regions/us-central1/subnetworks/my-subnet
+ Subnetwork string `protobuf:"bytes,2,opt,name=subnetwork,proto3" json:"subnetwork,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *NetworkConfig) Reset() { *m = NetworkConfig{} }
+func (m *NetworkConfig) String() string { return proto.CompactTextString(m) }
+func (*NetworkConfig) ProtoMessage() {}
+func (*NetworkConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1c7f18b1699f357a, []int{58}
+}
+
+func (m *NetworkConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_NetworkConfig.Unmarshal(m, b)
+}
+func (m *NetworkConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_NetworkConfig.Marshal(b, m, deterministic)
+}
+func (m *NetworkConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkConfig.Merge(m, src)
+}
+func (m *NetworkConfig) XXX_Size() int {
+ return xxx_messageInfo_NetworkConfig.Size(m)
+}
+func (m *NetworkConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkConfig proto.InternalMessageInfo
+
+func (m *NetworkConfig) GetNetwork() string {
+ if m != nil {
+ return m.Network
+ }
+ return ""
+}
+
+func (m *NetworkConfig) GetSubnetwork() string {
+ if m != nil {
+ return m.Subnetwork
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("google.container.v1.NetworkPolicy_Provider", NetworkPolicy_Provider_name, NetworkPolicy_Provider_value)
+ proto.RegisterEnum("google.container.v1.Cluster_Status", Cluster_Status_name, Cluster_Status_value)
+ proto.RegisterEnum("google.container.v1.Operation_Status", Operation_Status_name, Operation_Status_value)
+ proto.RegisterEnum("google.container.v1.Operation_Type", Operation_Type_name, Operation_Type_value)
+ proto.RegisterEnum("google.container.v1.SetMasterAuthRequest_Action", SetMasterAuthRequest_Action_name, SetMasterAuthRequest_Action_value)
+ proto.RegisterEnum("google.container.v1.NodePool_Status", NodePool_Status_name, NodePool_Status_value)
+ proto.RegisterType((*NodeConfig)(nil), "google.container.v1.NodeConfig")
+ proto.RegisterMapType((map[string]string)(nil), "google.container.v1.NodeConfig.LabelsEntry")
+ proto.RegisterMapType((map[string]string)(nil), "google.container.v1.NodeConfig.MetadataEntry")
+ proto.RegisterType((*MasterAuth)(nil), "google.container.v1.MasterAuth")
+ proto.RegisterType((*ClientCertificateConfig)(nil), "google.container.v1.ClientCertificateConfig")
+ proto.RegisterType((*AddonsConfig)(nil), "google.container.v1.AddonsConfig")
+ proto.RegisterType((*HttpLoadBalancing)(nil), "google.container.v1.HttpLoadBalancing")
+ proto.RegisterType((*HorizontalPodAutoscaling)(nil), "google.container.v1.HorizontalPodAutoscaling")
+ proto.RegisterType((*KubernetesDashboard)(nil), "google.container.v1.KubernetesDashboard")
+ proto.RegisterType((*NetworkPolicyConfig)(nil), "google.container.v1.NetworkPolicyConfig")
+ proto.RegisterType((*PrivateClusterConfig)(nil), "google.container.v1.PrivateClusterConfig")
+ proto.RegisterType((*MasterAuthorizedNetworksConfig)(nil), "google.container.v1.MasterAuthorizedNetworksConfig")
+ proto.RegisterType((*MasterAuthorizedNetworksConfig_CidrBlock)(nil), "google.container.v1.MasterAuthorizedNetworksConfig.CidrBlock")
+ proto.RegisterType((*LegacyAbac)(nil), "google.container.v1.LegacyAbac")
+ proto.RegisterType((*NetworkPolicy)(nil), "google.container.v1.NetworkPolicy")
+ proto.RegisterType((*IPAllocationPolicy)(nil), "google.container.v1.IPAllocationPolicy")
+ proto.RegisterType((*Cluster)(nil), "google.container.v1.Cluster")
+ proto.RegisterMapType((map[string]string)(nil), "google.container.v1.Cluster.ResourceLabelsEntry")
+ proto.RegisterType((*ClusterUpdate)(nil), "google.container.v1.ClusterUpdate")
+ proto.RegisterType((*Operation)(nil), "google.container.v1.Operation")
+ proto.RegisterType((*CreateClusterRequest)(nil), "google.container.v1.CreateClusterRequest")
+ proto.RegisterType((*GetClusterRequest)(nil), "google.container.v1.GetClusterRequest")
+ proto.RegisterType((*UpdateClusterRequest)(nil), "google.container.v1.UpdateClusterRequest")
+ proto.RegisterType((*UpdateNodePoolRequest)(nil), "google.container.v1.UpdateNodePoolRequest")
+ proto.RegisterType((*SetNodePoolAutoscalingRequest)(nil), "google.container.v1.SetNodePoolAutoscalingRequest")
+ proto.RegisterType((*SetLoggingServiceRequest)(nil), "google.container.v1.SetLoggingServiceRequest")
+ proto.RegisterType((*SetMonitoringServiceRequest)(nil), "google.container.v1.SetMonitoringServiceRequest")
+ proto.RegisterType((*SetAddonsConfigRequest)(nil), "google.container.v1.SetAddonsConfigRequest")
+ proto.RegisterType((*SetLocationsRequest)(nil), "google.container.v1.SetLocationsRequest")
+ proto.RegisterType((*UpdateMasterRequest)(nil), "google.container.v1.UpdateMasterRequest")
+ proto.RegisterType((*SetMasterAuthRequest)(nil), "google.container.v1.SetMasterAuthRequest")
+ proto.RegisterType((*DeleteClusterRequest)(nil), "google.container.v1.DeleteClusterRequest")
+ proto.RegisterType((*ListClustersRequest)(nil), "google.container.v1.ListClustersRequest")
+ proto.RegisterType((*ListClustersResponse)(nil), "google.container.v1.ListClustersResponse")
+ proto.RegisterType((*GetOperationRequest)(nil), "google.container.v1.GetOperationRequest")
+ proto.RegisterType((*ListOperationsRequest)(nil), "google.container.v1.ListOperationsRequest")
+ proto.RegisterType((*CancelOperationRequest)(nil), "google.container.v1.CancelOperationRequest")
+ proto.RegisterType((*ListOperationsResponse)(nil), "google.container.v1.ListOperationsResponse")
+ proto.RegisterType((*GetServerConfigRequest)(nil), "google.container.v1.GetServerConfigRequest")
+ proto.RegisterType((*ServerConfig)(nil), "google.container.v1.ServerConfig")
+ proto.RegisterType((*CreateNodePoolRequest)(nil), "google.container.v1.CreateNodePoolRequest")
+ proto.RegisterType((*DeleteNodePoolRequest)(nil), "google.container.v1.DeleteNodePoolRequest")
+ proto.RegisterType((*ListNodePoolsRequest)(nil), "google.container.v1.ListNodePoolsRequest")
+ proto.RegisterType((*GetNodePoolRequest)(nil), "google.container.v1.GetNodePoolRequest")
+ proto.RegisterType((*NodePool)(nil), "google.container.v1.NodePool")
+ proto.RegisterType((*NodeManagement)(nil), "google.container.v1.NodeManagement")
+ proto.RegisterType((*AutoUpgradeOptions)(nil), "google.container.v1.AutoUpgradeOptions")
+ proto.RegisterType((*MaintenancePolicy)(nil), "google.container.v1.MaintenancePolicy")
+ proto.RegisterType((*MaintenanceWindow)(nil), "google.container.v1.MaintenanceWindow")
+ proto.RegisterType((*DailyMaintenanceWindow)(nil), "google.container.v1.DailyMaintenanceWindow")
+ proto.RegisterType((*SetNodePoolManagementRequest)(nil), "google.container.v1.SetNodePoolManagementRequest")
+ proto.RegisterType((*SetNodePoolSizeRequest)(nil), "google.container.v1.SetNodePoolSizeRequest")
+ proto.RegisterType((*RollbackNodePoolUpgradeRequest)(nil), "google.container.v1.RollbackNodePoolUpgradeRequest")
+ proto.RegisterType((*ListNodePoolsResponse)(nil), "google.container.v1.ListNodePoolsResponse")
+ proto.RegisterType((*NodePoolAutoscaling)(nil), "google.container.v1.NodePoolAutoscaling")
+ proto.RegisterType((*SetLabelsRequest)(nil), "google.container.v1.SetLabelsRequest")
+ proto.RegisterMapType((map[string]string)(nil), "google.container.v1.SetLabelsRequest.ResourceLabelsEntry")
+ proto.RegisterType((*SetLegacyAbacRequest)(nil), "google.container.v1.SetLegacyAbacRequest")
+ proto.RegisterType((*StartIPRotationRequest)(nil), "google.container.v1.StartIPRotationRequest")
+ proto.RegisterType((*CompleteIPRotationRequest)(nil), "google.container.v1.CompleteIPRotationRequest")
+ proto.RegisterType((*AcceleratorConfig)(nil), "google.container.v1.AcceleratorConfig")
+ proto.RegisterType((*SetNetworkPolicyRequest)(nil), "google.container.v1.SetNetworkPolicyRequest")
+ proto.RegisterType((*SetMaintenancePolicyRequest)(nil), "google.container.v1.SetMaintenancePolicyRequest")
+ proto.RegisterType((*NetworkConfig)(nil), "google.container.v1.NetworkConfig")
+}
+
+func init() {
+ proto.RegisterFile("google/container/v1/cluster_service.proto", fileDescriptor_1c7f18b1699f357a)
+}
+
+var fileDescriptor_1c7f18b1699f357a = []byte{
+ // 5271 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3c, 0x6d, 0x8c, 0x24, 0xd7,
+ 0x51, 0xe9, 0xd9, 0xd9, 0xd9, 0x9d, 0x9a, 0xd9, 0xd9, 0xd9, 0xb7, 0x1f, 0x37, 0x1e, 0xdf, 0xd9,
+ 0x77, 0x1d, 0x9f, 0x7d, 0xb7, 0xb6, 0x77, 0x7c, 0xe7, 0xef, 0xf3, 0xd9, 0xf1, 0xdc, 0xec, 0x78,
+ 0x6f, 0x7c, 0xfb, 0x31, 0xe9, 0xd9, 0xbd, 0x93, 0x0f, 0x93, 0x56, 0xef, 0x4c, 0xdf, 0x6c, 0x7b,
+ 0x7b, 0xba, 0x3b, 0xdd, 0x3d, 0x67, 0xef, 0x9d, 0x0e, 0xf1, 0x91, 0x04, 0x83, 0x63, 0x93, 0x90,
+ 0x04, 0xa4, 0xf0, 0x21, 0x02, 0x24, 0x28, 0x01, 0x41, 0x88, 0x88, 0x04, 0x02, 0x21, 0x21, 0xf1,
+ 0x83, 0x00, 0x12, 0x08, 0x90, 0x85, 0x90, 0xf8, 0x87, 0x10, 0x41, 0x42, 0x22, 0xc0, 0x1f, 0x04,
+ 0x88, 0xe8, 0x7d, 0x74, 0xf7, 0xeb, 0x99, 0xee, 0x99, 0x9d, 0xdd, 0xbb, 0xcd, 0xfd, 0xba, 0xed,
+ 0x7a, 0xaf, 0xde, 0xab, 0xaa, 0x57, 0xaf, 0xaa, 0x5e, 0x55, 0xcd, 0xc1, 0xd9, 0xb6, 0x69, 0xb6,
+ 0x75, 0xb5, 0xd4, 0x34, 0x0d, 0x57, 0xd1, 0x0c, 0xd5, 0x2e, 0xdd, 0x3c, 0x57, 0x6a, 0xea, 0x5d,
+ 0xc7, 0x55, 0x6d, 0xd9, 0x51, 0xed, 0x9b, 0x5a, 0x53, 0x5d, 0xb2, 0x6c, 0xd3, 0x35, 0xd1, 0x2c,
+ 0x9d, 0xba, 0xe4, 0x4f, 0x5d, 0xba, 0x79, 0xae, 0x78, 0x9c, 0xe1, 0x2b, 0x96, 0x56, 0x52, 0x0c,
+ 0xc3, 0x74, 0x15, 0x57, 0x33, 0x0d, 0x87, 0xa2, 0x14, 0x1f, 0x64, 0xa3, 0xe4, 0x6b, 0xbb, 0x7b,
+ 0xa3, 0xa4, 0x76, 0x2c, 0x77, 0x8f, 0x0e, 0x8a, 0xdf, 0x1e, 0x07, 0x58, 0x37, 0x5b, 0x6a, 0xc5,
+ 0x34, 0x6e, 0x68, 0x6d, 0x74, 0x0a, 0xb2, 0x1d, 0xa5, 0xb9, 0xa3, 0x19, 0xaa, 0xec, 0xee, 0x59,
+ 0x6a, 0x41, 0x38, 0x29, 0x9c, 0x49, 0x4b, 0x19, 0x06, 0xdb, 0xdc, 0xb3, 0x54, 0x74, 0x12, 0xb2,
+ 0x2d, 0xcd, 0xd9, 0x95, 0x1d, 0xed, 0x96, 0x2a, 0xb7, 0xb7, 0x0b, 0x89, 0x93, 0xc2, 0x99, 0x71,
+ 0x09, 0x30, 0xac, 0xa1, 0xdd, 0x52, 0x57, 0xb6, 0xf1, 0x22, 0xa6, 0xd2, 0x75, 0x77, 0x64, 0xa7,
+ 0x69, 0x5a, 0xaa, 0x53, 0x18, 0x3b, 0x39, 0x86, 0x17, 0x21, 0xb0, 0x06, 0x01, 0xa1, 0xc7, 0x60,
+ 0x9a, 0xf1, 0x25, 0x2b, 0xcd, 0xa6, 0xd9, 0x35, 0xdc, 0x42, 0x9a, 0x6c, 0x95, 0x63, 0xe0, 0x32,
+ 0x85, 0xa2, 0x1a, 0x4c, 0x76, 0x54, 0x57, 0x69, 0x29, 0xae, 0x52, 0x48, 0x9e, 0x1c, 0x3b, 0x93,
+ 0x39, 0xff, 0xe4, 0x52, 0x84, 0x08, 0x96, 0x02, 0x1e, 0x96, 0xd6, 0xd8, 0xfc, 0xaa, 0xe1, 0xda,
+ 0x7b, 0x92, 0x8f, 0x8e, 0x4e, 0x00, 0x68, 0x1d, 0xa5, 0xcd, 0x38, 0x1b, 0x27, 0xdb, 0xa5, 0x09,
+ 0x84, 0xf0, 0x55, 0x81, 0x94, 0xae, 0x6c, 0xab, 0xba, 0x53, 0x48, 0x91, 0x7d, 0x1e, 0x1f, 0xb6,
+ 0xcf, 0x2a, 0x99, 0x4d, 0x77, 0x61, 0xa8, 0xe8, 0x51, 0x98, 0xd6, 0xcd, 0xa6, 0xa2, 0xcb, 0x8e,
+ 0xd3, 0x92, 0x29, 0x5f, 0x13, 0x44, 0x3e, 0x53, 0x04, 0xdc, 0x70, 0x5a, 0x15, 0xc2, 0x16, 0x82,
+ 0xa4, 0xab, 0xb4, 0x9d, 0xc2, 0x24, 0x11, 0x0d, 0xf9, 0x1b, 0x9d, 0x84, 0x8c, 0x65, 0xab, 0xf8,
+ 0x70, 0xb4, 0x6d, 0x5d, 0x2d, 0xc0, 0x49, 0xe1, 0xcc, 0xa4, 0xc4, 0x83, 0xd0, 0xeb, 0x90, 0x55,
+ 0x9a, 0x4d, 0x55, 0x57, 0x6d, 0xc5, 0x35, 0x6d, 0xa7, 0x90, 0x21, 0x84, 0x3e, 0x1a, 0x49, 0x68,
+ 0x39, 0x98, 0x48, 0xe9, 0x95, 0x42, 0xb8, 0xe8, 0x41, 0x48, 0x93, 0x63, 0x24, 0xc2, 0xc8, 0x12,
+ 0x61, 0x4c, 0x62, 0x00, 0x91, 0xc5, 0x19, 0xc8, 0x77, 0x34, 0x43, 0x6e, 0x5a, 0x5d, 0xd9, 0xd2,
+ 0x15, 0xf7, 0x86, 0x69, 0x77, 0x0a, 0x53, 0xf4, 0x7c, 0x3a, 0x9a, 0x51, 0xb1, 0xba, 0x75, 0x06,
+ 0x2d, 0xbe, 0x04, 0x53, 0x21, 0x79, 0xa3, 0x3c, 0x8c, 0xed, 0xaa, 0x7b, 0x4c, 0x71, 0xf0, 0x9f,
+ 0x68, 0x0e, 0xc6, 0x6f, 0x2a, 0x7a, 0x57, 0x25, 0x9a, 0x92, 0x96, 0xe8, 0xc7, 0x85, 0xc4, 0x0b,
+ 0x42, 0xf1, 0x45, 0xc8, 0x70, 0x42, 0x1c, 0x05, 0x55, 0xfc, 0x56, 0x02, 0x60, 0x4d, 0xc1, 0x17,
+ 0xa4, 0xdc, 0x75, 0x77, 0x50, 0x11, 0x26, 0xbb, 0x8e, 0x6a, 0x1b, 0x4a, 0xc7, 0xd3, 0x59, 0xff,
+ 0x1b, 0x8f, 0x59, 0x8a, 0xe3, 0xbc, 0x6d, 0xda, 0x2d, 0xb6, 0x8e, 0xff, 0x8d, 0x76, 0xe0, 0x81,
+ 0xa6, 0xae, 0xa9, 0x86, 0x2b, 0x37, 0x55, 0xdb, 0xd5, 0x6e, 0x68, 0x4d, 0xc5, 0x55, 0xe5, 0x26,
+ 0x11, 0x58, 0x61, 0xec, 0xa4, 0x70, 0x26, 0x73, 0xfe, 0x89, 0x48, 0xf1, 0x56, 0x08, 0x56, 0x25,
+ 0x40, 0x62, 0x42, 0x3e, 0xd6, 0x8c, 0x1e, 0x40, 0xcf, 0xc0, 0x82, 0x77, 0xa3, 0x9b, 0x0a, 0xbf,
+ 0x5b, 0xa1, 0x45, 0x68, 0x9a, 0x63, 0xa3, 0x15, 0x85, 0xc3, 0x45, 0x4f, 0x02, 0xea, 0xa7, 0xaf,
+ 0xa0, 0x12, 0x8c, 0x99, 0xbe, 0xad, 0xb0, 0x8a, 0xb3, 0xe9, 0x58, 0x90, 0x37, 0xa8, 0x8a, 0x53,
+ 0xc8, 0x15, 0x75, 0x4f, 0x6c, 0xc0, 0xb1, 0x18, 0xba, 0xd1, 0x0b, 0x50, 0xd0, 0x1c, 0xa7, 0xab,
+ 0xca, 0x11, 0xdb, 0x09, 0x44, 0x13, 0x17, 0xc8, 0x78, 0x1f, 0xbe, 0xf8, 0xc1, 0x18, 0x64, 0xcb,
+ 0xad, 0x96, 0x69, 0x38, 0x6c, 0xa9, 0xab, 0x30, 0xbb, 0xe3, 0xba, 0x96, 0xac, 0x9b, 0x4a, 0x4b,
+ 0xde, 0x56, 0x74, 0xc5, 0x68, 0x6a, 0x46, 0x9b, 0xac, 0x12, 0xa7, 0xac, 0x97, 0x5d, 0xd7, 0x5a,
+ 0x35, 0x95, 0xd6, 0x25, 0x6f, 0xb6, 0x34, 0xb3, 0xd3, 0x0b, 0x42, 0xbb, 0x50, 0xdc, 0x31, 0x6d,
+ 0xed, 0x16, 0x46, 0xd4, 0x65, 0xcb, 0x6c, 0xc9, 0x4a, 0xd7, 0x35, 0x9d, 0xa6, 0xa2, 0xe3, 0xe5,
+ 0x13, 0x64, 0xf9, 0x68, 0xe3, 0x70, 0xd9, 0x47, 0xab, 0x9b, 0xad, 0x72, 0x80, 0x24, 0x15, 0x76,
+ 0x62, 0x46, 0xd0, 0x0f, 0xc1, 0xdc, 0x6e, 0x77, 0x5b, 0xb5, 0x0d, 0xd5, 0x55, 0x1d, 0xb9, 0xa5,
+ 0x38, 0x3b, 0xdb, 0xa6, 0x62, 0xb7, 0x98, 0x4e, 0x9c, 0x89, 0xdc, 0xe6, 0x8a, 0x8f, 0xb0, 0xec,
+ 0xcd, 0x97, 0x66, 0x77, 0xfb, 0x81, 0xe8, 0x4d, 0x98, 0x37, 0x54, 0xf7, 0x6d, 0xd3, 0xde, 0x95,
+ 0x2d, 0x53, 0xd7, 0x9a, 0x7b, 0x9e, 0xc6, 0x25, 0x07, 0xac, 0xbe, 0x4e, 0x31, 0xea, 0x04, 0x81,
+ 0x69, 0xdb, 0xac, 0xd1, 0x0f, 0x14, 0x4b, 0x30, 0xd3, 0x27, 0x4f, 0x7c, 0x09, 0x5a, 0x9a, 0xa3,
+ 0x6c, 0xeb, 0x6a, 0x8b, 0x9d, 0xa7, 0xff, 0x2d, 0x3e, 0x07, 0x85, 0x38, 0x09, 0x0d, 0xc4, 0x3b,
+ 0x07, 0xb3, 0x11, 0x2c, 0x0f, 0x43, 0x89, 0xe0, 0x63, 0x20, 0xca, 0x4f, 0x25, 0x60, 0xae, 0x6e,
+ 0x6b, 0x37, 0xb1, 0xae, 0xb2, 0x2b, 0x42, 0x91, 0x9e, 0x82, 0x39, 0xd5, 0xc0, 0x73, 0x64, 0x8b,
+ 0x0e, 0xcb, 0x86, 0xd9, 0x52, 0x1d, 0xb6, 0x00, 0xa2, 0x63, 0x0c, 0x13, 0x9b, 0x6d, 0x07, 0x3d,
+ 0x07, 0xc7, 0x7a, 0x30, 0x54, 0xa3, 0x65, 0x99, 0x9a, 0xe1, 0x12, 0xf5, 0x99, 0x94, 0xe6, 0x43,
+ 0x48, 0x55, 0x36, 0x88, 0x9e, 0x86, 0x85, 0x0e, 0xb1, 0x35, 0xb2, 0x66, 0xdd, 0x7c, 0x46, 0x6e,
+ 0x6a, 0x2d, 0x5b, 0xde, 0xd6, 0xcd, 0xe6, 0x2e, 0x51, 0x87, 0xb4, 0x34, 0x4b, 0x47, 0x6b, 0xd6,
+ 0xcd, 0x67, 0x2a, 0x5a, 0xcb, 0xbe, 0x84, 0x87, 0xd0, 0x59, 0xc8, 0xf7, 0xed, 0x92, 0x24, 0xd3,
+ 0xa7, 0xad, 0x9e, 0xf5, 0x1f, 0x83, 0x69, 0xab, 0xbb, 0xad, 0x6b, 0xcd, 0x60, 0x26, 0x75, 0x4f,
+ 0x39, 0x0a, 0xf6, 0x26, 0x8a, 0xdf, 0x15, 0xe0, 0xa1, 0xc0, 0xea, 0xe1, 0x33, 0x53, 0x5b, 0x4c,
+ 0x9e, 0xde, 0xed, 0x2b, 0xc0, 0x04, 0x65, 0xc2, 0x93, 0xa4, 0xf7, 0x89, 0x3e, 0x01, 0x99, 0x80,
+ 0x72, 0xa7, 0x90, 0x20, 0xce, 0xe3, 0xe5, 0x48, 0x5d, 0x1b, 0xbc, 0xc7, 0x92, 0xcf, 0xa4, 0x04,
+ 0x4d, 0xef, 0x4f, 0xa7, 0xb8, 0x06, 0xe9, 0x80, 0xfb, 0x53, 0x24, 0x4a, 0xb0, 0x74, 0x65, 0x4f,
+ 0xe6, 0x8c, 0x72, 0x86, 0xc1, 0xd6, 0xb1, 0x5d, 0xc6, 0xc6, 0x2a, 0x90, 0x64, 0x82, 0x19, 0x2b,
+ 0x6f, 0x05, 0xf1, 0x51, 0x80, 0x55, 0xb5, 0xad, 0x34, 0xf7, 0xca, 0xdb, 0x4a, 0x33, 0x9e, 0x2d,
+ 0xf1, 0xd7, 0x05, 0x98, 0x0a, 0xe9, 0x14, 0x5a, 0x81, 0x49, 0xcb, 0x36, 0x6f, 0x6a, 0x2d, 0xd5,
+ 0x26, 0x93, 0x73, 0x71, 0xbe, 0x9c, 0xc7, 0x5a, 0xaa, 0x33, 0x14, 0xc9, 0x47, 0xe6, 0x37, 0x4d,
+ 0x84, 0x37, 0x7d, 0x0a, 0x26, 0xeb, 0xc1, 0xac, 0xb9, 0xba, 0xb4, 0x71, 0xb5, 0xb6, 0x5c, 0x95,
+ 0xe4, 0xad, 0xf5, 0x46, 0xbd, 0x5a, 0xa9, 0xbd, 0x56, 0xab, 0x2e, 0xe7, 0x3f, 0x82, 0x00, 0x52,
+ 0x95, 0xf2, 0x6a, 0xad, 0xb2, 0x91, 0x17, 0xc4, 0x3f, 0x49, 0x02, 0xaa, 0xd5, 0xcb, 0x3a, 0x8e,
+ 0x03, 0x70, 0x74, 0xc6, 0x68, 0x7d, 0x04, 0x72, 0x5d, 0x47, 0x95, 0x35, 0x4b, 0x56, 0x74, 0x4d,
+ 0x71, 0x7c, 0xf5, 0xcd, 0x76, 0x1d, 0xb5, 0x66, 0x95, 0x29, 0x0c, 0x3d, 0x0e, 0x33, 0x4d, 0x5b,
+ 0xc5, 0xaa, 0xe4, 0x74, 0xb7, 0xd9, 0x9d, 0x67, 0x24, 0xe5, 0xe9, 0x40, 0xc3, 0x87, 0x93, 0xd8,
+ 0xca, 0xff, 0xa2, 0xd2, 0x1f, 0x63, 0xb1, 0x95, 0x0f, 0x26, 0x07, 0xb0, 0x04, 0x33, 0x9e, 0x4b,
+ 0xf2, 0xf5, 0x9a, 0xaa, 0xe8, 0xa5, 0x44, 0x41, 0x90, 0xa6, 0xd9, 0xa0, 0xa7, 0xd6, 0xe8, 0x0c,
+ 0xe4, 0xf0, 0x0d, 0xe3, 0x26, 0x8f, 0xfb, 0x93, 0xb3, 0x78, 0xc4, 0x9f, 0xf9, 0x14, 0x20, 0x16,
+ 0xc7, 0x39, 0xdc, 0xec, 0x94, 0x3f, 0x3b, 0xef, 0x8d, 0xfa, 0x18, 0x1f, 0x83, 0xe3, 0x41, 0xc0,
+ 0xdb, 0x34, 0x8d, 0x96, 0x62, 0xef, 0xc9, 0xb6, 0x62, 0xb4, 0x55, 0xca, 0xc1, 0x04, 0xe1, 0xe0,
+ 0x01, 0x36, 0xa7, 0xe1, 0x4d, 0x91, 0xf0, 0x0c, 0xc2, 0x4c, 0x19, 0x4e, 0xf8, 0x5b, 0x46, 0xae,
+ 0x30, 0x49, 0x56, 0x28, 0x7a, 0x93, 0x22, 0x96, 0x78, 0x16, 0x8e, 0xf5, 0xc9, 0x83, 0x69, 0x67,
+ 0x3a, 0xe4, 0xa3, 0xc3, 0x17, 0xbd, 0x04, 0x73, 0x61, 0xb1, 0x30, 0x1c, 0xa0, 0x5e, 0x9a, 0x17,
+ 0x0c, 0x45, 0x78, 0x1e, 0x0a, 0xfd, 0xd2, 0x61, 0x48, 0x19, 0x82, 0x34, 0xdf, 0x2b, 0x1f, 0x7a,
+ 0x25, 0xbe, 0x39, 0x03, 0x13, 0xcc, 0x06, 0xe2, 0x08, 0x92, 0xbb, 0x58, 0xe4, 0x6f, 0x1c, 0x41,
+ 0xb6, 0x54, 0xa7, 0x69, 0x6b, 0x16, 0xd6, 0x30, 0x76, 0xa5, 0x78, 0x10, 0x7a, 0x02, 0x90, 0x66,
+ 0x68, 0xae, 0xa6, 0xe8, 0xc4, 0x58, 0xb2, 0x10, 0x75, 0x8c, 0x84, 0xa8, 0x79, 0x36, 0x42, 0x43,
+ 0x5c, 0x1c, 0xa5, 0xbe, 0x0a, 0x19, 0x36, 0x8b, 0xf3, 0x4e, 0x0f, 0x0f, 0x89, 0x8b, 0x25, 0x30,
+ 0x82, 0xf7, 0xc4, 0xab, 0x90, 0x61, 0x96, 0x13, 0x07, 0xff, 0x44, 0x5f, 0xe2, 0x56, 0x08, 0x6c,
+ 0x8e, 0x04, 0x9d, 0x20, 0xb2, 0x7b, 0x0c, 0x47, 0xd4, 0xed, 0xb6, 0x66, 0xb4, 0xbd, 0x97, 0x10,
+ 0xd5, 0x23, 0x29, 0xc7, 0xc0, 0x0d, 0x0a, 0xc5, 0xa1, 0x52, 0xc7, 0x34, 0x34, 0xd7, 0xb4, 0xf9,
+ 0xb9, 0x54, 0x6f, 0x66, 0x82, 0x11, 0x6f, 0x7a, 0x01, 0x26, 0xbc, 0x8b, 0x44, 0x35, 0xc3, 0xfb,
+ 0x44, 0x8b, 0x51, 0xd7, 0x82, 0x2a, 0x40, 0xdf, 0x95, 0x78, 0x0d, 0xa6, 0x14, 0x12, 0xfb, 0x78,
+ 0x32, 0x02, 0xc2, 0xe1, 0xa9, 0xe8, 0x90, 0x9c, 0x8b, 0x92, 0xa4, 0xac, 0xc2, 0xc7, 0x4c, 0x0f,
+ 0x01, 0x70, 0x37, 0x9b, 0x2a, 0x01, 0x07, 0x41, 0x17, 0x81, 0x48, 0x55, 0xb6, 0x4c, 0x53, 0x77,
+ 0x0a, 0x59, 0x62, 0xba, 0x4f, 0xc4, 0x1e, 0x44, 0xdd, 0x34, 0x75, 0x29, 0x6d, 0xb0, 0xbf, 0x1c,
+ 0x74, 0x1c, 0xd2, 0x9e, 0xd9, 0x71, 0x0a, 0x53, 0xe4, 0xc9, 0x11, 0x00, 0x38, 0xaf, 0xc8, 0x45,
+ 0x3c, 0x8a, 0x6e, 0xed, 0x28, 0x85, 0x1c, 0xef, 0x15, 0x03, 0x5f, 0x5f, 0xc6, 0x83, 0xe8, 0x0d,
+ 0x98, 0xb6, 0x55, 0xc7, 0xec, 0xda, 0x4d, 0x55, 0x66, 0x2f, 0xa7, 0x69, 0x42, 0xd8, 0x53, 0x31,
+ 0x11, 0x33, 0x11, 0xdd, 0x92, 0xc4, 0x70, 0xf8, 0xe7, 0x53, 0xce, 0x0e, 0x01, 0xb1, 0xbd, 0x23,
+ 0x2b, 0xca, 0x37, 0x34, 0xa3, 0xad, 0xda, 0x96, 0x8d, 0x5d, 0x62, 0x9e, 0x48, 0x25, 0x4f, 0x06,
+ 0x5e, 0x0b, 0xe0, 0x58, 0xc7, 0x74, 0xe2, 0x28, 0x64, 0x65, 0x5b, 0x69, 0x16, 0xd0, 0x00, 0x1d,
+ 0x0b, 0x1c, 0x8a, 0x04, 0x7a, 0xe0, 0x5c, 0x6a, 0x90, 0x0b, 0xc7, 0x63, 0x85, 0x59, 0xb2, 0x88,
+ 0x38, 0xdc, 0x6d, 0x48, 0x53, 0xa1, 0x10, 0x0c, 0xbd, 0x01, 0x73, 0xc4, 0x96, 0x7b, 0xe2, 0xf5,
+ 0x16, 0x9c, 0x23, 0x0b, 0x3e, 0x16, 0xb9, 0x60, 0xbf, 0x5b, 0x90, 0x90, 0x66, 0xf5, 0xb9, 0x8a,
+ 0x1f, 0x81, 0x53, 0xdc, 0x5d, 0xa2, 0x8e, 0x59, 0x66, 0xbb, 0xfb, 0xfa, 0xb7, 0x40, 0xf6, 0x79,
+ 0xfa, 0x00, 0x5e, 0x5d, 0x7a, 0xa8, 0x33, 0x38, 0xb2, 0xd8, 0x02, 0xd4, 0x51, 0x34, 0xc3, 0x55,
+ 0x0d, 0xc5, 0x68, 0xaa, 0x1e, 0x63, 0xc7, 0x06, 0x84, 0xf5, 0x6b, 0xc1, 0x74, 0xc6, 0xd7, 0x4c,
+ 0xa7, 0x17, 0xc4, 0x0b, 0x9f, 0xf1, 0xf0, 0xe0, 0x70, 0xe1, 0x33, 0x92, 0x3d, 0xe1, 0x33, 0x0a,
+ 0x65, 0x58, 0xf0, 0x42, 0x2e, 0xff, 0xad, 0x45, 0x97, 0x3c, 0x4d, 0x96, 0x3c, 0x1b, 0xb9, 0x64,
+ 0x54, 0x70, 0x29, 0xcd, 0x59, 0x51, 0x21, 0xe7, 0x83, 0x90, 0x76, 0x54, 0xfd, 0x86, 0xac, 0x6b,
+ 0xc6, 0x2e, 0x7b, 0xb7, 0x4d, 0x62, 0xc0, 0xaa, 0x66, 0xec, 0xa2, 0x05, 0x48, 0xde, 0x32, 0x0d,
+ 0xf6, 0x3a, 0x23, 0x6e, 0x8e, 0x7c, 0xe3, 0xe0, 0xd6, 0x0f, 0xeb, 0xe8, 0x93, 0xcc, 0xff, 0xc6,
+ 0x77, 0xcf, 0xb3, 0xc7, 0x1e, 0xc5, 0x37, 0x55, 0xdb, 0xc1, 0xd6, 0xbb, 0x4d, 0x3d, 0x01, 0x1b,
+ 0x66, 0x74, 0x5c, 0xa5, 0x83, 0xe4, 0x35, 0xd9, 0xb5, 0x6d, 0xfc, 0x52, 0x63, 0x3a, 0xe1, 0xa1,
+ 0xed, 0x30, 0x4f, 0x45, 0x47, 0xe9, 0x91, 0x07, 0x58, 0x1e, 0x9c, 0x5a, 0x7f, 0x0f, 0x47, 0xf3,
+ 0x29, 0x46, 0x6c, 0x1c, 0x5b, 0x11, 0x0f, 0xeb, 0x61, 0xc8, 0xb0, 0xe0, 0xc3, 0xd5, 0x3a, 0x6a,
+ 0xe1, 0x2d, 0x6a, 0x9c, 0x28, 0x68, 0x53, 0xeb, 0xa8, 0xe8, 0x25, 0x48, 0x39, 0xae, 0xe2, 0x76,
+ 0x9d, 0xc2, 0x2e, 0x89, 0xb6, 0x3e, 0x3a, 0xf0, 0xfe, 0x37, 0xc8, 0x54, 0x89, 0xa1, 0xa0, 0xd3,
+ 0x90, 0xa3, 0x7f, 0xc9, 0x1d, 0xd5, 0x71, 0x94, 0xb6, 0x5a, 0xd0, 0xc9, 0x06, 0x53, 0x14, 0xba,
+ 0x46, 0x81, 0xe8, 0x49, 0x98, 0xed, 0x71, 0xb2, 0x8e, 0x76, 0x4b, 0x2d, 0x74, 0xa8, 0xe7, 0xe2,
+ 0x7d, 0x6c, 0x43, 0xbb, 0xa5, 0x62, 0x3f, 0x17, 0x11, 0x80, 0x18, 0xd4, 0x82, 0xf4, 0x05, 0x1f,
+ 0xe7, 0x61, 0x56, 0x33, 0x1c, 0x97, 0xa8, 0x75, 0xdb, 0x36, 0xbb, 0x96, 0xdc, 0xb5, 0x75, 0xa7,
+ 0x60, 0x62, 0x4b, 0x49, 0xc4, 0x32, 0xe3, 0x0d, 0xaf, 0xe0, 0xd1, 0x2d, 0x5b, 0x77, 0xf0, 0x0e,
+ 0x21, 0x59, 0x52, 0x4f, 0x6a, 0x51, 0x7a, 0x38, 0x29, 0x52, 0x4f, 0xfa, 0x30, 0x64, 0xd4, 0x77,
+ 0x2c, 0xcd, 0x66, 0x32, 0xfc, 0x24, 0x95, 0x21, 0x05, 0x11, 0x19, 0x16, 0x61, 0xd2, 0xbb, 0xee,
+ 0x05, 0x9b, 0x2a, 0x89, 0xf7, 0x5d, 0x2c, 0xc3, 0x6c, 0x84, 0xd1, 0x1c, 0x29, 0x5d, 0xf2, 0x36,
+ 0xa4, 0xa8, 0xdc, 0xd1, 0x02, 0xa0, 0xc6, 0x66, 0x79, 0x73, 0xab, 0xd1, 0x13, 0xab, 0xe6, 0x21,
+ 0x4b, 0xa2, 0xd8, 0x46, 0x6d, 0x63, 0xbd, 0xb6, 0xbe, 0x92, 0x17, 0x50, 0x06, 0x26, 0xa4, 0xad,
+ 0x75, 0xf2, 0x91, 0x40, 0xd3, 0x90, 0x91, 0xaa, 0x95, 0x8d, 0xf5, 0x4a, 0x6d, 0x15, 0x03, 0xc6,
+ 0x50, 0x16, 0x26, 0x1b, 0x9b, 0x1b, 0xf5, 0x3a, 0xfe, 0x4a, 0xa2, 0x34, 0x8c, 0x57, 0x25, 0x69,
+ 0x43, 0xca, 0x8f, 0xe3, 0x81, 0xe5, 0xea, 0x8a, 0x54, 0x5e, 0xae, 0x2e, 0xe7, 0x53, 0xe2, 0x97,
+ 0xc6, 0x61, 0x8a, 0x9d, 0xfc, 0x96, 0xd5, 0x52, 0x5c, 0x15, 0x3f, 0xdb, 0x5a, 0xaa, 0xa3, 0xd9,
+ 0xd8, 0x78, 0xf1, 0x4a, 0x48, 0xdf, 0x46, 0x88, 0x8d, 0xf1, 0x0a, 0x78, 0x11, 0x8a, 0x1e, 0x46,
+ 0x84, 0x87, 0xa7, 0x2f, 0xa5, 0x02, 0x9b, 0xb1, 0xd6, 0xe7, 0xe8, 0xb7, 0x60, 0xde, 0xc3, 0x0e,
+ 0xbb, 0xea, 0xd4, 0x7e, 0x5d, 0xf5, 0x2c, 0xc3, 0x0f, 0x65, 0x39, 0x4a, 0x3d, 0x6c, 0x60, 0xcf,
+ 0x2c, 0x6b, 0x2d, 0x2f, 0xe0, 0xe0, 0xd8, 0xc0, 0x3e, 0xb8, 0xd6, 0xc2, 0x0a, 0xe3, 0x21, 0x70,
+ 0x69, 0x48, 0x1a, 0x7b, 0xe4, 0xd9, 0x48, 0xcd, 0xcf, 0x46, 0xee, 0xc2, 0x89, 0xfe, 0xe5, 0xf9,
+ 0x7c, 0x47, 0x7a, 0x50, 0xaa, 0x80, 0xed, 0xca, 0xa7, 0x3a, 0x8a, 0x3d, 0x14, 0xf1, 0x8f, 0xfc,
+ 0xc7, 0xc1, 0xa3, 0x57, 0x0e, 0xe2, 0x04, 0x20, 0x71, 0x82, 0x47, 0xd9, 0xaa, 0x1f, 0x2e, 0xbc,
+ 0x2f, 0xc0, 0x59, 0xff, 0x38, 0x86, 0xfa, 0xa3, 0xec, 0xc1, 0xfd, 0xd1, 0x69, 0xef, 0x48, 0x07,
+ 0xbb, 0xa5, 0x67, 0x60, 0xa1, 0x87, 0x1c, 0x4f, 0xa3, 0x58, 0x62, 0x2d, 0xb4, 0x0c, 0xd3, 0x29,
+ 0xf1, 0xbb, 0x29, 0x48, 0x6f, 0x58, 0xaa, 0x4d, 0x98, 0x8a, 0x0c, 0xa6, 0x3d, 0x73, 0x9e, 0xe8,
+ 0x31, 0xe7, 0xaf, 0x43, 0xce, 0xf4, 0x10, 0xe9, 0x19, 0x8e, 0x0d, 0xb0, 0x7a, 0xfe, 0x1e, 0x4b,
+ 0xf8, 0x58, 0xa5, 0x29, 0x1f, 0x95, 0x9c, 0xf2, 0xcb, 0xbe, 0xe5, 0x4c, 0x92, 0x35, 0x4e, 0x0f,
+ 0x59, 0xa3, 0xc7, 0x76, 0x2e, 0x40, 0xaa, 0xa5, 0xba, 0x8a, 0xa6, 0x33, 0x35, 0x62, 0x5f, 0x11,
+ 0x36, 0x75, 0x3c, 0xca, 0xa6, 0x86, 0xbc, 0x59, 0xaa, 0xc7, 0x9b, 0x3d, 0x0c, 0x19, 0x57, 0xb1,
+ 0xdb, 0xaa, 0x4b, 0x87, 0xa9, 0x5a, 0x03, 0x05, 0x91, 0x09, 0xbc, 0xc5, 0x4a, 0x87, 0x2d, 0x16,
+ 0x7e, 0xda, 0x3b, 0xae, 0x62, 0xbb, 0xd4, 0xda, 0xd1, 0x87, 0x50, 0x9a, 0x40, 0x88, 0xb1, 0x7b,
+ 0x80, 0x78, 0x44, 0x3a, 0x48, 0x63, 0xdd, 0x09, 0xd5, 0x68, 0xe1, 0x21, 0x51, 0x1a, 0x6a, 0xa8,
+ 0x32, 0x30, 0x51, 0xaf, 0xae, 0x2f, 0x47, 0xd8, 0xa8, 0x49, 0x48, 0x2e, 0x6f, 0xac, 0x57, 0xa9,
+ 0x71, 0x2a, 0x5f, 0xda, 0x90, 0x36, 0x89, 0x71, 0x12, 0xff, 0x2f, 0x01, 0x49, 0x22, 0xee, 0x39,
+ 0xc8, 0x6f, 0xbe, 0x51, 0xaf, 0xf6, 0x2c, 0x88, 0x20, 0x57, 0x91, 0xaa, 0xe5, 0xcd, 0xaa, 0x5c,
+ 0x59, 0xdd, 0x6a, 0x6c, 0x56, 0xa5, 0xbc, 0x80, 0x61, 0xcb, 0xd5, 0xd5, 0x2a, 0x07, 0x4b, 0x60,
+ 0xd8, 0x56, 0x9d, 0x18, 0x36, 0x79, 0xad, 0x4c, 0x60, 0x63, 0x68, 0x06, 0xa6, 0x3c, 0xd8, 0xfa,
+ 0xc6, 0x72, 0xb5, 0x91, 0x4f, 0xe2, 0x69, 0x52, 0xb5, 0x5e, 0xae, 0x49, 0x3e, 0xea, 0x38, 0x45,
+ 0x5d, 0xe6, 0xb7, 0x48, 0x61, 0x62, 0xd8, 0xb6, 0x18, 0x53, 0xae, 0x6f, 0x6c, 0xac, 0xe6, 0x27,
+ 0x30, 0x94, 0x6d, 0x1c, 0x40, 0x27, 0xd1, 0x71, 0x28, 0x34, 0xaa, 0x9b, 0x01, 0x48, 0x5e, 0x2b,
+ 0xaf, 0x97, 0x57, 0xaa, 0x6b, 0xd5, 0xf5, 0xcd, 0x7c, 0x1a, 0xcd, 0xc3, 0x4c, 0x79, 0x6b, 0x73,
+ 0x43, 0x66, 0xdb, 0x52, 0x42, 0x00, 0x0b, 0x90, 0x80, 0xc3, 0x04, 0x66, 0x50, 0x0e, 0x00, 0x2f,
+ 0xb6, 0x5a, 0xbe, 0x54, 0x5d, 0x6d, 0xe4, 0xb3, 0x68, 0x16, 0xa6, 0xf1, 0x37, 0xe5, 0x49, 0x2e,
+ 0x6f, 0x6d, 0x5e, 0xce, 0x4f, 0x11, 0xe9, 0x87, 0x76, 0x6c, 0xd4, 0xae, 0x57, 0xf3, 0x39, 0x1f,
+ 0x5e, 0xdd, 0xbc, 0xb6, 0x21, 0x5d, 0x91, 0xeb, 0x1b, 0xab, 0xb5, 0xca, 0x1b, 0xf9, 0x69, 0x54,
+ 0x84, 0x05, 0xba, 0x48, 0x6d, 0x7d, 0xb3, 0xba, 0x5e, 0x5e, 0xaf, 0x54, 0xbd, 0xb1, 0xbc, 0xf8,
+ 0x15, 0x01, 0xe6, 0x2a, 0x24, 0x5c, 0x60, 0x9e, 0x40, 0x52, 0x3f, 0xd9, 0x55, 0x1d, 0x17, 0x9d,
+ 0x02, 0xb0, 0x6c, 0xf3, 0x2d, 0xb5, 0xe9, 0x62, 0xcb, 0x29, 0xf8, 0x17, 0x2d, 0xcd, 0xa0, 0xb5,
+ 0x56, 0xec, 0x2d, 0x7c, 0x0e, 0x26, 0x58, 0xc0, 0xc4, 0x52, 0xb2, 0xc7, 0x07, 0x05, 0x1d, 0x92,
+ 0x37, 0x19, 0x5f, 0x19, 0x4b, 0xc1, 0xbe, 0x99, 0x5d, 0x09, 0xf6, 0x25, 0x7e, 0x5a, 0x80, 0x99,
+ 0x15, 0xd5, 0xbd, 0x7b, 0x04, 0x9e, 0x02, 0xf0, 0x5f, 0x91, 0x34, 0x6d, 0xcc, 0x50, 0xbd, 0x27,
+ 0x64, 0xcb, 0xb7, 0x3a, 0xe3, 0x81, 0xd5, 0x11, 0xff, 0x54, 0x80, 0x39, 0xea, 0x28, 0x8f, 0x94,
+ 0x94, 0x0b, 0x90, 0xea, 0x92, 0x5d, 0xd9, 0x23, 0x5f, 0x1c, 0x24, 0x4d, 0x4a, 0x9f, 0xc4, 0x30,
+ 0x22, 0xd9, 0xf8, 0x9e, 0x00, 0xf3, 0x74, 0x9a, 0xff, 0x1e, 0x3d, 0x12, 0x3e, 0x1e, 0x81, 0x6c,
+ 0xc8, 0x1b, 0x07, 0xd9, 0x2c, 0x30, 0x02, 0x57, 0x7c, 0x8a, 0xcd, 0xf2, 0x1c, 0x05, 0xa5, 0x9c,
+ 0xe4, 0x3a, 0xbc, 0x98, 0x23, 0x5c, 0x2c, 0x4c, 0xf5, 0x16, 0x0b, 0x3d, 0x9e, 0x27, 0x39, 0x9e,
+ 0x7f, 0x22, 0x01, 0x27, 0x1a, 0xaa, 0x1b, 0xe5, 0x7c, 0xef, 0x23, 0xde, 0x5f, 0x87, 0x0c, 0x1f,
+ 0x46, 0x8c, 0x8f, 0x18, 0x46, 0xf0, 0xc8, 0xbe, 0x14, 0x52, 0x9c, 0x14, 0x7e, 0x57, 0x80, 0x42,
+ 0x43, 0x75, 0x57, 0x43, 0xc9, 0x99, 0x11, 0x04, 0x80, 0x78, 0x01, 0xec, 0x9f, 0xf9, 0x88, 0x34,
+ 0x51, 0x32, 0x32, 0x4d, 0x14, 0xa5, 0xad, 0x7f, 0x2c, 0xc0, 0x83, 0x0d, 0xd5, 0xed, 0x8b, 0x1d,
+ 0x8f, 0xe6, 0xdc, 0xa2, 0x13, 0x57, 0xc9, 0xb8, 0xc4, 0x55, 0x94, 0xd0, 0xff, 0x46, 0x80, 0x85,
+ 0x86, 0xea, 0x86, 0xa2, 0xd6, 0x23, 0xa1, 0xbd, 0x2f, 0xff, 0x95, 0x3c, 0x58, 0xfe, 0x2b, 0x8a,
+ 0xa9, 0xaf, 0x0a, 0x30, 0x4b, 0x34, 0x89, 0x45, 0x9e, 0x47, 0xc3, 0x51, 0x28, 0x57, 0x96, 0xec,
+ 0xcd, 0x95, 0x45, 0xd1, 0xf9, 0xdb, 0x02, 0xcc, 0x52, 0x5b, 0x47, 0x43, 0xcc, 0xa3, 0xa1, 0xf3,
+ 0x34, 0xe4, 0x7a, 0xc2, 0x5d, 0xaa, 0x31, 0x53, 0x9d, 0xd0, 0x93, 0xdf, 0x23, 0x78, 0x82, 0x23,
+ 0xf8, 0x1f, 0x13, 0x30, 0x87, 0xd5, 0x3d, 0x48, 0xb8, 0x1e, 0x09, 0xc5, 0x97, 0x21, 0xa5, 0x34,
+ 0x5d, 0x8f, 0xd2, 0x5c, 0x4c, 0x9a, 0x30, 0x8a, 0xb0, 0xa5, 0x32, 0xc1, 0x93, 0x18, 0x3e, 0x7a,
+ 0xde, 0xf7, 0x56, 0xfb, 0x4c, 0x28, 0xf7, 0xba, 0x2a, 0x5e, 0x1a, 0x75, 0x48, 0xd1, 0xe5, 0x71,
+ 0x30, 0xb9, 0xb5, 0x7e, 0x65, 0x7d, 0xe3, 0xda, 0x3a, 0x7d, 0x0f, 0xe3, 0x80, 0xa6, 0x5e, 0x6e,
+ 0x34, 0xae, 0x6d, 0x48, 0xcb, 0x79, 0x01, 0x87, 0x59, 0x2b, 0xd5, 0xf5, 0xaa, 0x84, 0x43, 0x36,
+ 0x1f, 0x9c, 0xf0, 0x26, 0x6e, 0x35, 0xaa, 0xd2, 0x7a, 0x79, 0xad, 0x9a, 0x1f, 0x13, 0xdf, 0x15,
+ 0x60, 0x6e, 0x59, 0xd5, 0xd5, 0x23, 0xf6, 0xe1, 0x1e, 0x73, 0x49, 0x8e, 0xb9, 0x1d, 0x98, 0x5d,
+ 0xd5, 0x1c, 0x2f, 0xac, 0xb9, 0x1b, 0x57, 0x28, 0x08, 0xa0, 0x92, 0xa1, 0x00, 0xaa, 0x0b, 0x73,
+ 0xe1, 0x9d, 0x1c, 0xcb, 0x34, 0x1c, 0x15, 0xbd, 0x00, 0x93, 0x8c, 0x44, 0xa7, 0x20, 0x90, 0xf4,
+ 0xf0, 0xe0, 0x48, 0xcd, 0x9f, 0x8d, 0x3e, 0x0a, 0x53, 0x1d, 0xcd, 0x71, 0xb0, 0x51, 0xc4, 0x3b,
+ 0xd3, 0x8a, 0x65, 0x5a, 0xca, 0x32, 0xe0, 0x75, 0x0c, 0x13, 0xdf, 0x13, 0x60, 0x76, 0x45, 0x75,
+ 0xfd, 0x27, 0xd2, 0x5d, 0xe0, 0xf0, 0x34, 0x64, 0x83, 0x07, 0x5e, 0x48, 0xd8, 0x19, 0x1f, 0x1e,
+ 0x13, 0xbd, 0xbd, 0x05, 0xf3, 0x58, 0x08, 0x3e, 0x35, 0xf7, 0x52, 0xe0, 0x1f, 0x08, 0xb0, 0x50,
+ 0x51, 0x8c, 0xa6, 0xaa, 0xff, 0x00, 0x99, 0xe7, 0x75, 0xed, 0x0e, 0x2c, 0xf4, 0x32, 0xcf, 0x74,
+ 0xe0, 0x15, 0x00, 0x1f, 0xd9, 0xd3, 0x82, 0x87, 0x06, 0x3f, 0x75, 0x25, 0x0e, 0x63, 0x7f, 0x9a,
+ 0xd0, 0x86, 0x85, 0x15, 0xd5, 0xc5, 0x5e, 0xd2, 0x4f, 0xe2, 0x1e, 0x5e, 0x1c, 0x51, 0x7c, 0x7e,
+ 0x2a, 0x01, 0x59, 0x7e, 0x1b, 0xf4, 0x1c, 0x1c, 0x6b, 0xa9, 0x37, 0x94, 0xae, 0xee, 0xf6, 0x25,
+ 0x71, 0x69, 0x42, 0x61, 0x9e, 0x0d, 0xf7, 0x24, 0x71, 0x97, 0x60, 0xf6, 0xa6, 0xa2, 0x6b, 0xe1,
+ 0x3c, 0x98, 0xd7, 0x2e, 0x37, 0x43, 0x86, 0xb8, 0x34, 0x98, 0x43, 0x33, 0x48, 0x74, 0x1f, 0x2e,
+ 0x36, 0x4d, 0x7a, 0x19, 0x24, 0x32, 0x12, 0x64, 0x90, 0x16, 0x81, 0x2e, 0xc1, 0xcd, 0x75, 0x0a,
+ 0xe3, 0x64, 0xed, 0x69, 0x32, 0xe0, 0x4f, 0x75, 0xd0, 0x79, 0x98, 0xa7, 0x73, 0xc3, 0x2e, 0x85,
+ 0xb6, 0xc2, 0xa5, 0x25, 0x4a, 0x66, 0x28, 0x81, 0xe2, 0x88, 0xdf, 0x11, 0x60, 0x9e, 0xbe, 0xea,
+ 0x8e, 0x36, 0xc4, 0xbf, 0x00, 0x69, 0x3f, 0xcc, 0x65, 0xe1, 0xc6, 0x90, 0x4a, 0xd8, 0xa4, 0x17,
+ 0xfe, 0x72, 0x77, 0x29, 0x15, 0xba, 0x4b, 0xbf, 0x23, 0xc0, 0x3c, 0xb5, 0xd8, 0xf7, 0xe3, 0x73,
+ 0x25, 0x2a, 0xe8, 0xf8, 0xac, 0x40, 0xed, 0xad, 0x47, 0xef, 0x11, 0x45, 0x47, 0x71, 0xcf, 0xe7,
+ 0xdf, 0x12, 0x00, 0xad, 0x04, 0x6f, 0x9f, 0xfb, 0x5d, 0x7a, 0xff, 0x9c, 0x84, 0x49, 0x8f, 0xd6,
+ 0xc8, 0xe4, 0xdf, 0xf3, 0x90, 0x62, 0x01, 0x6d, 0x62, 0x7f, 0x45, 0x6f, 0x36, 0x7d, 0xc4, 0x02,
+ 0xfb, 0xc0, 0x7a, 0x52, 0x01, 0x26, 0x3c, 0x33, 0x42, 0x1b, 0xfe, 0xbc, 0x4f, 0x6c, 0x38, 0xa2,
+ 0xea, 0x15, 0x37, 0xa8, 0xe1, 0xe8, 0xaf, 0x55, 0x5c, 0xf4, 0xd3, 0x8c, 0x6d, 0x12, 0x79, 0x3d,
+ 0x32, 0xf0, 0xbe, 0x0c, 0xaf, 0xd0, 0xec, 0x44, 0x65, 0x13, 0x7b, 0x1e, 0x96, 0xc9, 0xc3, 0x3c,
+ 0x2c, 0x2b, 0x00, 0x1d, 0xc5, 0x50, 0xda, 0x6a, 0xc7, 0x53, 0xb5, 0x4c, 0x4c, 0x7e, 0x15, 0x2f,
+ 0xb5, 0xe6, 0x4f, 0x95, 0x38, 0x34, 0xf1, 0xc7, 0x84, 0xc3, 0x16, 0x3d, 0x16, 0x00, 0xb1, 0x0f,
+ 0xf9, 0x5a, 0x6d, 0xf3, 0xb2, 0x4c, 0x4b, 0x1c, 0x63, 0xbd, 0xc5, 0x90, 0x64, 0xa8, 0x18, 0x32,
+ 0x1e, 0x14, 0x43, 0x52, 0xe2, 0xd7, 0x04, 0xc8, 0x85, 0x49, 0x44, 0xa7, 0x20, 0x8b, 0x59, 0x95,
+ 0xbb, 0x56, 0xdb, 0x56, 0x5a, 0x5e, 0x77, 0x25, 0x61, 0x7f, 0x8b, 0x82, 0xd0, 0xc3, 0x54, 0x94,
+ 0xb2, 0xad, 0x5a, 0x8a, 0x66, 0xb3, 0x46, 0x1f, 0xc0, 0x20, 0x89, 0x40, 0x50, 0x1d, 0xa6, 0x19,
+ 0xba, 0x6c, 0x5a, 0x5e, 0xba, 0x3e, 0xbe, 0xc0, 0x5c, 0x0e, 0xd6, 0xde, 0xa0, 0xd3, 0xa5, 0x5c,
+ 0x37, 0xf4, 0x2d, 0x76, 0x00, 0xf5, 0xcf, 0x42, 0xcf, 0xc2, 0x31, 0x9e, 0x56, 0x99, 0x4b, 0xea,
+ 0xd2, 0xdb, 0x32, 0xc7, 0x91, 0xdd, 0xf0, 0xf3, 0xbb, 0x43, 0xfb, 0x50, 0xc4, 0x06, 0xcc, 0xf4,
+ 0x15, 0x87, 0xd1, 0x2b, 0x90, 0x7a, 0x5b, 0x33, 0x5a, 0xe6, 0xdb, 0x03, 0x7b, 0x45, 0x39, 0xbc,
+ 0x6b, 0x64, 0xb6, 0xc4, 0xb0, 0xc4, 0xcf, 0x08, 0xa1, 0x55, 0xe9, 0x28, 0x6a, 0x43, 0xa1, 0xa5,
+ 0x68, 0xfa, 0x9e, 0xcc, 0x17, 0xaf, 0xd9, 0x3e, 0xf4, 0x72, 0x47, 0x77, 0x87, 0x2d, 0x63, 0xa4,
+ 0xbe, 0xe5, 0x2e, 0x7f, 0x44, 0x5a, 0x68, 0x45, 0x8e, 0x5c, 0x9a, 0x84, 0x14, 0xad, 0x89, 0x8b,
+ 0x0d, 0x58, 0x88, 0xc6, 0xee, 0x49, 0x8c, 0x27, 0x7a, 0x13, 0xe3, 0x45, 0x98, 0x6c, 0x75, 0x69,
+ 0xd4, 0xc3, 0x7a, 0xb6, 0xfc, 0x6f, 0xf1, 0x7f, 0x04, 0x38, 0xce, 0xa5, 0x97, 0x38, 0xa5, 0xbf,
+ 0x8f, 0x8c, 0xed, 0xdd, 0xb8, 0xb8, 0x91, 0xaf, 0xb4, 0xbf, 0xa2, 0x19, 0x0e, 0x8f, 0xfb, 0x86,
+ 0x76, 0x4b, 0xbd, 0x9f, 0xf8, 0x3e, 0xc1, 0xfa, 0x73, 0xa8, 0xb9, 0x1f, 0x27, 0xe6, 0x3e, 0x6d,
+ 0xf8, 0x76, 0x3e, 0x8a, 0xa3, 0xdf, 0x17, 0xe0, 0x21, 0xc9, 0xd4, 0xf5, 0x6d, 0xa5, 0xb9, 0xeb,
+ 0xb1, 0xc5, 0x6e, 0xd2, 0xfd, 0xee, 0x3e, 0xb7, 0xe8, 0x33, 0x87, 0x8b, 0x3d, 0x58, 0xa0, 0x1f,
+ 0x6e, 0x53, 0x12, 0x46, 0x6b, 0x53, 0x12, 0x6f, 0xc3, 0x6c, 0x54, 0x75, 0x32, 0xbe, 0xa3, 0xf5,
+ 0x11, 0xc8, 0x75, 0x34, 0x83, 0x77, 0xb4, 0xf4, 0xc7, 0x28, 0xd9, 0x8e, 0x66, 0x04, 0x4e, 0x16,
+ 0xcf, 0x52, 0xde, 0xe9, 0x77, 0xc7, 0xd9, 0x8e, 0xf2, 0x8e, 0x3f, 0x4b, 0xfc, 0xfb, 0x04, 0xe4,
+ 0x1b, 0xaa, 0x4b, 0x0b, 0xec, 0x47, 0x73, 0x00, 0xdb, 0xfd, 0x0d, 0x54, 0xf4, 0x27, 0x2e, 0x2f,
+ 0xc6, 0x65, 0x46, 0x42, 0xd4, 0x1d, 0xbc, 0x93, 0x6a, 0x3c, 0xa6, 0x93, 0x2a, 0x42, 0x4d, 0xef,
+ 0x46, 0xf3, 0xc1, 0xaf, 0x0a, 0x24, 0xdf, 0xc4, 0x35, 0x5f, 0x1d, 0x89, 0x78, 0x39, 0xbd, 0x49,
+ 0x86, 0xf5, 0x26, 0x4a, 0xa7, 0xff, 0x00, 0x1b, 0x18, 0x6c, 0x88, 0x6b, 0x75, 0x89, 0xfd, 0x80,
+ 0xea, 0x68, 0xd3, 0x36, 0x1c, 0x31, 0xe8, 0x49, 0x40, 0x36, 0x26, 0x42, 0x95, 0x9b, 0xb6, 0xda,
+ 0x52, 0x0d, 0x1c, 0x35, 0x3a, 0xe4, 0x58, 0x26, 0xa5, 0x19, 0x3a, 0x52, 0x09, 0x06, 0xc4, 0xf7,
+ 0x05, 0x78, 0xa0, 0x62, 0x76, 0x2c, 0xfc, 0x80, 0xf9, 0x41, 0x91, 0xcf, 0x9b, 0xb6, 0x5d, 0x98,
+ 0xe9, 0xfb, 0xf9, 0x11, 0xd6, 0x44, 0xee, 0x07, 0x48, 0xec, 0x26, 0x62, 0x6a, 0xc6, 0xa4, 0xbc,
+ 0xc2, 0xcf, 0xc6, 0x77, 0xf6, 0x2c, 0xf0, 0x30, 0xfa, 0xd0, 0xa5, 0x7a, 0x35, 0xcd, 0xc1, 0xf1,
+ 0xe3, 0x55, 0xfc, 0x50, 0x80, 0x63, 0xd8, 0x33, 0x84, 0xba, 0xf2, 0x8e, 0x84, 0xf5, 0xfe, 0xb6,
+ 0xc1, 0xe4, 0x41, 0xdb, 0x06, 0xa3, 0x34, 0xf2, 0x1f, 0x58, 0x55, 0xa2, 0xaf, 0x89, 0x8e, 0x31,
+ 0x77, 0xa2, 0x9f, 0xb9, 0x61, 0x85, 0x94, 0x13, 0xfd, 0x4c, 0xf1, 0x0c, 0x45, 0x77, 0xf8, 0x25,
+ 0x0f, 0xdb, 0xe1, 0x17, 0x95, 0x29, 0xab, 0xf9, 0x4d, 0xfb, 0xc1, 0xef, 0x16, 0xbc, 0xf6, 0x57,
+ 0x21, 0xdc, 0x8f, 0x1b, 0xee, 0x8d, 0x4d, 0xf4, 0xf6, 0xc6, 0x9e, 0xff, 0x8b, 0x17, 0x21, 0xc7,
+ 0x32, 0x2b, 0x34, 0xa0, 0xb0, 0xd1, 0x5f, 0x0a, 0x90, 0xe5, 0xb3, 0x91, 0x28, 0xfa, 0x1d, 0x12,
+ 0x91, 0x1a, 0x2d, 0x9e, 0xdd, 0xc7, 0x4c, 0xea, 0xed, 0xc4, 0xf6, 0x8f, 0xff, 0xed, 0x3f, 0x7d,
+ 0x21, 0xa1, 0xa0, 0x27, 0x4a, 0x37, 0xcf, 0x95, 0x6e, 0xd3, 0x97, 0xf0, 0xcb, 0xec, 0x00, 0x9c,
+ 0xd2, 0x62, 0xc9, 0x2f, 0x1b, 0x94, 0x16, 0xef, 0x78, 0x3f, 0xe7, 0x74, 0xae, 0x9f, 0x43, 0x25,
+ 0x3c, 0xdf, 0x9f, 0x77, 0x3b, 0x38, 0xc4, 0x3b, 0x25, 0x92, 0xde, 0x2a, 0xdd, 0xc6, 0xff, 0x04,
+ 0x28, 0xe8, 0xcf, 0x04, 0x80, 0xa0, 0x38, 0x8d, 0xa2, 0x8f, 0xa2, 0xaf, 0x7a, 0x5d, 0x1c, 0x98,
+ 0x68, 0x15, 0x5d, 0x42, 0xbd, 0xc1, 0xa8, 0xc7, 0x87, 0x12, 0x43, 0xbb, 0x4f, 0x47, 0x69, 0xf1,
+ 0xce, 0xf5, 0x57, 0xd0, 0xc5, 0x11, 0xa9, 0x2f, 0xdd, 0x0e, 0x54, 0xee, 0x0e, 0xfa, 0x8e, 0x00,
+ 0x53, 0xa1, 0x5e, 0x00, 0x14, 0x2d, 0xf0, 0xa8, 0x7e, 0x81, 0xe2, 0x90, 0x9c, 0xa1, 0xd8, 0x21,
+ 0x2c, 0xb5, 0xc5, 0x91, 0x0e, 0xe4, 0x82, 0xb0, 0x78, 0xfd, 0x19, 0x71, 0xd4, 0x33, 0xb9, 0x20,
+ 0x2c, 0xa2, 0x0f, 0x05, 0x98, 0x0a, 0xd5, 0xea, 0x63, 0x78, 0x89, 0xaa, 0xe7, 0x0f, 0xe5, 0xe5,
+ 0x16, 0xe1, 0xc5, 0x2d, 0x8e, 0x74, 0x3c, 0x98, 0x97, 0x72, 0xf1, 0x50, 0x27, 0x84, 0x19, 0xfb,
+ 0x4c, 0x02, 0x72, 0xe1, 0xea, 0x3d, 0x5a, 0x1c, 0xc0, 0x59, 0x4f, 0xd6, 0x67, 0x28, 0x6b, 0xbf,
+ 0x21, 0x10, 0xde, 0x7e, 0x45, 0x28, 0xbe, 0x30, 0x02, 0x73, 0x25, 0x3f, 0x50, 0x64, 0x8c, 0x2a,
+ 0xe2, 0x9b, 0x87, 0x61, 0x94, 0x5b, 0xef, 0x36, 0x1f, 0x07, 0xdf, 0x29, 0xd1, 0x62, 0x11, 0x16,
+ 0xc4, 0x37, 0x12, 0xa1, 0x57, 0x07, 0x1f, 0x95, 0x9e, 0x8f, 0x8b, 0xd1, 0xe2, 0xeb, 0xff, 0x43,
+ 0x05, 0xf3, 0x87, 0x54, 0x30, 0xbf, 0x27, 0x88, 0x2b, 0x07, 0x16, 0x8c, 0xa3, 0xba, 0xdc, 0xce,
+ 0x58, 0x4e, 0x37, 0x44, 0xe5, 0xde, 0xc8, 0x49, 0x09, 0xed, 0x83, 0xfe, 0x53, 0x80, 0x99, 0xbe,
+ 0xca, 0x3f, 0x7a, 0x32, 0x36, 0x96, 0x8d, 0xea, 0x10, 0x18, 0x2a, 0xa2, 0x0f, 0xa8, 0x88, 0x7e,
+ 0x52, 0x10, 0x9f, 0x1f, 0xe9, 0x62, 0x38, 0xfe, 0x86, 0x58, 0x24, 0x35, 0x71, 0xf9, 0x50, 0x22,
+ 0xd1, 0xfd, 0xa5, 0xd0, 0xff, 0xd3, 0xe0, 0xb6, 0xbf, 0xef, 0x34, 0xbe, 0xbc, 0x19, 0xd3, 0x66,
+ 0x30, 0x94, 0xf7, 0x2f, 0x52, 0xde, 0xdf, 0x17, 0xc4, 0x0b, 0xa3, 0xf2, 0x1e, 0xec, 0x89, 0xd9,
+ 0x5f, 0xa5, 0xfa, 0x75, 0x60, 0xf6, 0x3b, 0xfc, 0x6a, 0xe8, 0xdf, 0x04, 0x98, 0xee, 0x69, 0x3e,
+ 0x40, 0x8f, 0xc7, 0x31, 0x1f, 0xd1, 0xa2, 0x30, 0x94, 0xef, 0xcf, 0x52, 0xbe, 0x3f, 0x2d, 0x88,
+ 0xcf, 0x8d, 0xca, 0x37, 0xdd, 0x0e, 0xf3, 0x7c, 0x59, 0xac, 0x1c, 0x8a, 0x67, 0xc5, 0x5b, 0x09,
+ 0xf3, 0x9b, 0xe5, 0xfb, 0x12, 0x62, 0x82, 0x8b, 0x88, 0xd6, 0x85, 0xa1, 0x9c, 0xfe, 0x2c, 0xe5,
+ 0xf4, 0x3d, 0x41, 0x7c, 0x71, 0x74, 0xed, 0x66, 0x43, 0x98, 0xd9, 0x2b, 0xe2, 0x6b, 0x87, 0xd4,
+ 0xef, 0x60, 0x31, 0xf4, 0xaf, 0x02, 0x64, 0xf9, 0xfe, 0x86, 0x18, 0x7e, 0x23, 0x5a, 0x20, 0x86,
+ 0xf2, 0xfb, 0x33, 0x94, 0xdf, 0x77, 0x47, 0xe5, 0xb7, 0xcb, 0xed, 0x75, 0x37, 0x0e, 0xb7, 0xe3,
+ 0xad, 0x84, 0xfe, 0x4b, 0x80, 0xa9, 0x50, 0x0b, 0x42, 0x8c, 0x4f, 0x8f, 0x6a, 0x53, 0x18, 0xca,
+ 0xee, 0xcf, 0x53, 0x76, 0x3f, 0x77, 0x90, 0x0b, 0xec, 0x6f, 0x86, 0xf9, 0xdd, 0x10, 0x5f, 0x3f,
+ 0x94, 0x8f, 0xef, 0x5d, 0x10, 0xfd, 0xb5, 0x00, 0x53, 0xa1, 0x96, 0x85, 0x18, 0xb6, 0xa3, 0xda,
+ 0x1a, 0x86, 0xb2, 0xcd, 0x22, 0xcd, 0xc5, 0x11, 0x23, 0xcd, 0xc5, 0xc3, 0x45, 0x9a, 0x1f, 0x0a,
+ 0x90, 0x0b, 0xd7, 0xa3, 0x63, 0x82, 0x98, 0xc8, 0x8a, 0x7d, 0xf1, 0xf1, 0x7d, 0xcd, 0x65, 0x2f,
+ 0x81, 0x5d, 0xc2, 0xa1, 0x8a, 0x96, 0xf6, 0x13, 0x78, 0x06, 0x85, 0xed, 0xeb, 0x4f, 0xa3, 0x73,
+ 0xfb, 0xe4, 0x91, 0xab, 0x86, 0xff, 0x9d, 0x00, 0x59, 0xbe, 0xe5, 0x21, 0xe6, 0x3e, 0x46, 0x74,
+ 0x45, 0xec, 0x37, 0xe8, 0x64, 0x7c, 0x0c, 0x3a, 0xa9, 0x80, 0x1e, 0x7c, 0x56, 0x97, 0xd0, 0xab,
+ 0x23, 0xf3, 0x51, 0xba, 0xcd, 0x37, 0x1a, 0xdc, 0x41, 0xff, 0x22, 0xc0, 0x74, 0x4f, 0x3f, 0x43,
+ 0x8c, 0x1b, 0x89, 0xee, 0x7a, 0x28, 0x2e, 0x78, 0x93, 0xbd, 0xff, 0xe8, 0x66, 0xa9, 0xda, 0xb1,
+ 0xdc, 0x3d, 0xce, 0xc8, 0x3c, 0x3b, 0x1a, 0x5b, 0x17, 0x9a, 0x64, 0xa3, 0xd1, 0x3c, 0x66, 0x2c,
+ 0x83, 0xc1, 0x6a, 0xf8, 0xb6, 0x4d, 0xf7, 0xf4, 0x2a, 0xc4, 0xb0, 0x1a, 0xdd, 0xd1, 0x50, 0x3c,
+ 0x15, 0x63, 0x93, 0x82, 0x99, 0xde, 0x5b, 0x68, 0xf8, 0x51, 0xde, 0x29, 0x39, 0x1c, 0xde, 0xf5,
+ 0x67, 0xd1, 0xd3, 0xfb, 0xe4, 0x94, 0xa2, 0xb1, 0xba, 0x27, 0xb6, 0x9b, 0xa1, 0x9c, 0x30, 0x8a,
+ 0x7f, 0x48, 0xf7, 0xd6, 0xac, 0x8b, 0x8b, 0xfb, 0x99, 0xca, 0xae, 0xda, 0x7b, 0xf4, 0x34, 0x3f,
+ 0x25, 0xa0, 0x17, 0x86, 0x5f, 0x36, 0xde, 0xa0, 0x04, 0x71, 0xec, 0xf5, 0xcb, 0xe8, 0xb5, 0xbb,
+ 0x13, 0x11, 0xa3, 0xff, 0x10, 0x20, 0xc3, 0xd5, 0xbd, 0xd1, 0x63, 0x71, 0xa7, 0xd8, 0xfb, 0x46,
+ 0x1a, 0x9c, 0x15, 0x17, 0xbf, 0x4c, 0xb9, 0xfc, 0x82, 0xc7, 0xe5, 0x01, 0x5e, 0x02, 0xd7, 0xaf,
+ 0xa1, 0xad, 0x7b, 0x12, 0xf7, 0xa3, 0x7f, 0x17, 0x20, 0x17, 0xee, 0xfc, 0x88, 0xb1, 0xac, 0x91,
+ 0xed, 0x21, 0x43, 0x8d, 0xd0, 0xe7, 0x29, 0xef, 0x3f, 0x2d, 0x88, 0x07, 0x3e, 0xe1, 0xbb, 0x11,
+ 0x03, 0xf1, 0x8b, 0xa1, 0xff, 0x16, 0x20, 0x17, 0x6e, 0x10, 0x89, 0x61, 0x39, 0xb2, 0x8b, 0x64,
+ 0x28, 0xcb, 0xfe, 0x71, 0x2f, 0x1e, 0xe2, 0xb8, 0x17, 0xef, 0xd1, 0x71, 0xff, 0x5a, 0x02, 0x8e,
+ 0xc5, 0x14, 0xaa, 0x50, 0xf4, 0x0f, 0xbb, 0x06, 0x97, 0xb5, 0x86, 0x4a, 0xe3, 0x5b, 0x54, 0x1a,
+ 0xbf, 0x29, 0x88, 0xe5, 0x03, 0x3f, 0x83, 0x6d, 0x46, 0x02, 0xd6, 0x84, 0xa6, 0xf8, 0x89, 0x7b,
+ 0x22, 0x19, 0x7e, 0x13, 0xf4, 0xf5, 0x04, 0xcc, 0x47, 0x96, 0x67, 0xd1, 0xb9, 0x61, 0x99, 0x82,
+ 0xbe, 0x52, 0xee, 0x50, 0x09, 0xfd, 0x11, 0x95, 0xd0, 0xb7, 0x05, 0xaa, 0xe3, 0x07, 0x4d, 0x14,
+ 0x04, 0x1b, 0x63, 0x31, 0xed, 0x88, 0xcd, 0x7b, 0x93, 0x27, 0xe8, 0xdd, 0x09, 0x7d, 0x4f, 0x80,
+ 0xb4, 0x5f, 0xcd, 0x42, 0xa7, 0xf7, 0x55, 0xed, 0x1a, 0x2a, 0x93, 0x5f, 0xa4, 0x32, 0xf9, 0x92,
+ 0x20, 0xbe, 0x32, 0x6a, 0x70, 0x1d, 0x2e, 0x70, 0x61, 0x59, 0xd4, 0xc5, 0x2b, 0x87, 0x92, 0x85,
+ 0xdd, 0xbb, 0x22, 0xe6, 0x79, 0x2a, 0x54, 0x04, 0x8b, 0x7f, 0x58, 0xf4, 0x15, 0xca, 0xee, 0x65,
+ 0x66, 0x20, 0xd8, 0xec, 0x6e, 0x64, 0x06, 0x74, 0x7e, 0x35, 0xf4, 0xbf, 0x02, 0x4c, 0xf7, 0xd4,
+ 0xd4, 0xe2, 0x32, 0x03, 0x91, 0x95, 0xb7, 0xa1, 0x7c, 0xff, 0x02, 0xe5, 0xfb, 0x8b, 0x82, 0x78,
+ 0x71, 0x34, 0xbe, 0xc9, 0x76, 0x96, 0xb7, 0x1d, 0xe6, 0xfc, 0xe3, 0xe2, 0xea, 0xe1, 0x9e, 0x54,
+ 0x7d, 0x4b, 0xa2, 0x77, 0x13, 0x80, 0xfa, 0xcb, 0x72, 0x68, 0x29, 0xda, 0x57, 0xc6, 0xd5, 0xef,
+ 0x86, 0x0a, 0xe1, 0x2b, 0x54, 0x08, 0x5f, 0x16, 0xc4, 0x8f, 0x8d, 0x24, 0x84, 0xa6, 0xb7, 0x63,
+ 0x48, 0x0e, 0x9b, 0xe2, 0xc6, 0xa1, 0xe4, 0x10, 0xb9, 0x2a, 0xfa, 0x7c, 0x82, 0xe4, 0x88, 0xf8,
+ 0xf6, 0x8d, 0xf8, 0x1c, 0x51, 0x44, 0x93, 0xc7, 0x50, 0x21, 0x7c, 0x93, 0x0a, 0xe1, 0xeb, 0x82,
+ 0xf8, 0xea, 0x61, 0x2c, 0x22, 0xde, 0x12, 0x4b, 0x61, 0x5b, 0xfc, 0xe1, 0x7b, 0x66, 0x0b, 0xd9,
+ 0x1e, 0xe8, 0x47, 0x69, 0xc7, 0x41, 0xf8, 0xff, 0xae, 0x7a, 0x22, 0x56, 0x28, 0x11, 0xf5, 0xcd,
+ 0xa1, 0x52, 0xf9, 0x25, 0x2a, 0x95, 0x9f, 0x13, 0xc4, 0x97, 0x47, 0xb5, 0x0b, 0xa1, 0xfd, 0xb0,
+ 0x48, 0x24, 0x71, 0xed, 0xb0, 0x39, 0x87, 0xde, 0x35, 0xd1, 0xe7, 0xbc, 0x5f, 0xa2, 0xf4, 0x96,
+ 0x0c, 0x07, 0xfc, 0x36, 0x24, 0xba, 0x1a, 0x3a, 0x54, 0x14, 0x5f, 0xa5, 0xa2, 0xf8, 0xe5, 0xd1,
+ 0x82, 0x0a, 0xe6, 0x26, 0x7b, 0xf6, 0xc4, 0xe2, 0xb8, 0x2a, 0x7e, 0xfc, 0xf0, 0x29, 0x98, 0xfe,
+ 0x75, 0x2f, 0x7d, 0x4d, 0x80, 0x63, 0x4d, 0xb3, 0x13, 0xc5, 0xcd, 0xa5, 0xd9, 0x8a, 0xf7, 0xdf,
+ 0x5f, 0x91, 0xbc, 0x71, 0x1d, 0xbf, 0x73, 0xeb, 0xc2, 0xf5, 0x8b, 0x6c, 0x6e, 0xdb, 0xd4, 0x15,
+ 0xa3, 0xbd, 0x64, 0xda, 0xed, 0x52, 0x5b, 0x35, 0xc8, 0x2b, 0xb8, 0x44, 0x87, 0x14, 0x4b, 0x73,
+ 0x42, 0xff, 0xbb, 0xec, 0x4b, 0xfe, 0xc7, 0x37, 0x12, 0x0f, 0xac, 0x50, 0xf4, 0x8a, 0x6e, 0x76,
+ 0x5b, 0x4b, 0x15, 0x7f, 0xc3, 0xab, 0xe7, 0xfe, 0xdc, 0x1b, 0x7b, 0x93, 0x8c, 0xbd, 0xe9, 0x8f,
+ 0xbd, 0x79, 0xf5, 0xdc, 0x76, 0x8a, 0x6c, 0xf0, 0xf4, 0xf7, 0x03, 0x00, 0x00, 0xff, 0xff, 0x61,
+ 0xea, 0xa4, 0xfb, 0xbd, 0x56, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// ClusterManagerClient is the client API for ClusterManager service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ClusterManagerClient interface {
+ // Lists all clusters owned by a project in either the specified zone or all
+ // zones.
+ ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
+ // Gets the details of a specific cluster.
+ GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
+ // Creates a cluster, consisting of the specified number and type of Google
+ // Compute Engine instances.
+ //
+ // By default, the cluster is created in the project's
+ // [default network](/compute/docs/networks-and-firewalls#networks).
+ //
+ // One firewall is added for the cluster. After cluster creation,
+ // the cluster creates routes for each node to allow the containers
+ // on that node to communicate with all other instances in the
+ // cluster.
+ //
+ // Finally, an entry is added to the project's global metadata indicating
+ // which CIDR range is being used by the cluster.
+ CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Updates the settings of a specific cluster.
+ UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Updates the version and/or image type for a specific node pool.
+ UpdateNodePool(ctx context.Context, in *UpdateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Sets the autoscaling settings for a specific node pool.
+ SetNodePoolAutoscaling(ctx context.Context, in *SetNodePoolAutoscalingRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Sets the logging service for a specific cluster.
+ SetLoggingService(ctx context.Context, in *SetLoggingServiceRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Sets the monitoring service for a specific cluster.
+ SetMonitoringService(ctx context.Context, in *SetMonitoringServiceRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Sets the addons for a specific cluster.
+ SetAddonsConfig(ctx context.Context, in *SetAddonsConfigRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Sets the locations for a specific cluster.
+ SetLocations(ctx context.Context, in *SetLocationsRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Updates the master for a specific cluster.
+ UpdateMaster(ctx context.Context, in *UpdateMasterRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Used to set master auth materials. Currently supports :-
+ // Changing the admin password for a specific cluster.
+ // This can be either via password generation or explicitly set the password.
+ SetMasterAuth(ctx context.Context, in *SetMasterAuthRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Deletes the cluster, including the Kubernetes endpoint and all worker
+ // nodes.
+ //
+ // Firewalls and routes that were configured during cluster creation
+ // are also deleted.
+ //
+ // Other Google Compute Engine resources that might be in use by the cluster
+ // (e.g. load balancer resources) will not be deleted if they weren't present
+ // at the initial create time.
+ DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Lists all operations in a project in a specific zone or all zones.
+ ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error)
+ // Gets the specified operation.
+ GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Cancels the specified operation.
+ CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ // Returns configuration info about the Kubernetes Engine service.
+ GetServerConfig(ctx context.Context, in *GetServerConfigRequest, opts ...grpc.CallOption) (*ServerConfig, error)
+ // Lists the node pools for a cluster.
+ ListNodePools(ctx context.Context, in *ListNodePoolsRequest, opts ...grpc.CallOption) (*ListNodePoolsResponse, error)
+ // Retrieves the node pool requested.
+ GetNodePool(ctx context.Context, in *GetNodePoolRequest, opts ...grpc.CallOption) (*NodePool, error)
+ // Creates a node pool for a cluster.
+ CreateNodePool(ctx context.Context, in *CreateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Deletes a node pool from a cluster.
+ DeleteNodePool(ctx context.Context, in *DeleteNodePoolRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Roll back the previously Aborted or Failed NodePool upgrade.
+ // This will be an no-op if the last upgrade successfully completed.
+ RollbackNodePoolUpgrade(ctx context.Context, in *RollbackNodePoolUpgradeRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Sets the NodeManagement options for a node pool.
+ SetNodePoolManagement(ctx context.Context, in *SetNodePoolManagementRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Sets labels on a cluster.
+ SetLabels(ctx context.Context, in *SetLabelsRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Enables or disables the ABAC authorization mechanism on a cluster.
+ SetLegacyAbac(ctx context.Context, in *SetLegacyAbacRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Start master IP rotation.
+ StartIPRotation(ctx context.Context, in *StartIPRotationRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Completes master IP rotation.
+ CompleteIPRotation(ctx context.Context, in *CompleteIPRotationRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Sets the size for a specific node pool.
+ SetNodePoolSize(ctx context.Context, in *SetNodePoolSizeRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Enables/Disables Network Policy for a cluster.
+ SetNetworkPolicy(ctx context.Context, in *SetNetworkPolicyRequest, opts ...grpc.CallOption) (*Operation, error)
+ // Sets the maintenance policy for a cluster.
+ SetMaintenancePolicy(ctx context.Context, in *SetMaintenancePolicyRequest, opts ...grpc.CallOption) (*Operation, error)
+}
+
+type clusterManagerClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewClusterManagerClient(cc *grpc.ClientConn) ClusterManagerClient {
+ return &clusterManagerClient{cc}
+}
+
+func (c *clusterManagerClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) {
+ out := new(ListClustersResponse)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListClusters", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) {
+ out := new(Cluster)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetCluster", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CreateCluster", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateCluster", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) UpdateNodePool(ctx context.Context, in *UpdateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateNodePool", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, in *SetNodePoolAutoscalingRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetLoggingService(ctx context.Context, in *SetLoggingServiceRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLoggingService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetMonitoringService(ctx context.Context, in *SetMonitoringServiceRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMonitoringService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetAddonsConfig(ctx context.Context, in *SetAddonsConfigRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetAddonsConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetLocations(ctx context.Context, in *SetLocationsRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLocations", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) UpdateMaster(ctx context.Context, in *UpdateMasterRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/UpdateMaster", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetMasterAuth(ctx context.Context, in *SetMasterAuthRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMasterAuth", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/DeleteCluster", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) ListOperations(ctx context.Context, in *ListOperationsRequest, opts ...grpc.CallOption) (*ListOperationsResponse, error) {
+ out := new(ListOperationsResponse)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListOperations", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) GetOperation(ctx context.Context, in *GetOperationRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetOperation", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) CancelOperation(ctx context.Context, in *CancelOperationRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
+ out := new(empty.Empty)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CancelOperation", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) GetServerConfig(ctx context.Context, in *GetServerConfigRequest, opts ...grpc.CallOption) (*ServerConfig, error) {
+ out := new(ServerConfig)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetServerConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) ListNodePools(ctx context.Context, in *ListNodePoolsRequest, opts ...grpc.CallOption) (*ListNodePoolsResponse, error) {
+ out := new(ListNodePoolsResponse)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/ListNodePools", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) GetNodePool(ctx context.Context, in *GetNodePoolRequest, opts ...grpc.CallOption) (*NodePool, error) {
+ out := new(NodePool)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/GetNodePool", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) CreateNodePool(ctx context.Context, in *CreateNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CreateNodePool", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) DeleteNodePool(ctx context.Context, in *DeleteNodePoolRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/DeleteNodePool", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, in *RollbackNodePoolUpgradeRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetNodePoolManagement(ctx context.Context, in *SetNodePoolManagementRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolManagement", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetLabels(ctx context.Context, in *SetLabelsRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLabels", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetLegacyAbac(ctx context.Context, in *SetLegacyAbacRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetLegacyAbac", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) StartIPRotation(ctx context.Context, in *StartIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/StartIPRotation", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) CompleteIPRotation(ctx context.Context, in *CompleteIPRotationRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/CompleteIPRotation", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetNodePoolSize(ctx context.Context, in *SetNodePoolSizeRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNodePoolSize", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetNetworkPolicy(ctx context.Context, in *SetNetworkPolicyRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetNetworkPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterManagerClient) SetMaintenancePolicy(ctx context.Context, in *SetMaintenancePolicyRequest, opts ...grpc.CallOption) (*Operation, error) {
+ out := new(Operation)
+ err := c.cc.Invoke(ctx, "/google.container.v1.ClusterManager/SetMaintenancePolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ClusterManagerServer is the server API for ClusterManager service.
+type ClusterManagerServer interface {
+ // Lists all clusters owned by a project in either the specified zone or all
+ // zones.
+ ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
+ // Gets the details of a specific cluster.
+ GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
+ // Creates a cluster, consisting of the specified number and type of Google
+ // Compute Engine instances.
+ //
+ // By default, the cluster is created in the project's
+ // [default network](/compute/docs/networks-and-firewalls#networks).
+ //
+ // One firewall is added for the cluster. After cluster creation,
+ // the cluster creates routes for each node to allow the containers
+ // on that node to communicate with all other instances in the
+ // cluster.
+ //
+ // Finally, an entry is added to the project's global metadata indicating
+ // which CIDR range is being used by the cluster.
+ CreateCluster(context.Context, *CreateClusterRequest) (*Operation, error)
+ // Updates the settings of a specific cluster.
+ UpdateCluster(context.Context, *UpdateClusterRequest) (*Operation, error)
+ // Updates the version and/or image type for a specific node pool.
+ UpdateNodePool(context.Context, *UpdateNodePoolRequest) (*Operation, error)
+ // Sets the autoscaling settings for a specific node pool.
+ SetNodePoolAutoscaling(context.Context, *SetNodePoolAutoscalingRequest) (*Operation, error)
+ // Sets the logging service for a specific cluster.
+ SetLoggingService(context.Context, *SetLoggingServiceRequest) (*Operation, error)
+ // Sets the monitoring service for a specific cluster.
+ SetMonitoringService(context.Context, *SetMonitoringServiceRequest) (*Operation, error)
+ // Sets the addons for a specific cluster.
+ SetAddonsConfig(context.Context, *SetAddonsConfigRequest) (*Operation, error)
+ // Sets the locations for a specific cluster.
+ SetLocations(context.Context, *SetLocationsRequest) (*Operation, error)
+ // Updates the master for a specific cluster.
+ UpdateMaster(context.Context, *UpdateMasterRequest) (*Operation, error)
+ // Used to set master auth materials. Currently supports :-
+ // Changing the admin password for a specific cluster.
+ // This can be either via password generation or explicitly set the password.
+ SetMasterAuth(context.Context, *SetMasterAuthRequest) (*Operation, error)
+ // Deletes the cluster, including the Kubernetes endpoint and all worker
+ // nodes.
+ //
+ // Firewalls and routes that were configured during cluster creation
+ // are also deleted.
+ //
+ // Other Google Compute Engine resources that might be in use by the cluster
+ // (e.g. load balancer resources) will not be deleted if they weren't present
+ // at the initial create time.
+ DeleteCluster(context.Context, *DeleteClusterRequest) (*Operation, error)
+ // Lists all operations in a project in a specific zone or all zones.
+ ListOperations(context.Context, *ListOperationsRequest) (*ListOperationsResponse, error)
+ // Gets the specified operation.
+ GetOperation(context.Context, *GetOperationRequest) (*Operation, error)
+ // Cancels the specified operation.
+ CancelOperation(context.Context, *CancelOperationRequest) (*empty.Empty, error)
+ // Returns configuration info about the Kubernetes Engine service.
+ GetServerConfig(context.Context, *GetServerConfigRequest) (*ServerConfig, error)
+ // Lists the node pools for a cluster.
+ ListNodePools(context.Context, *ListNodePoolsRequest) (*ListNodePoolsResponse, error)
+ // Retrieves the node pool requested.
+ GetNodePool(context.Context, *GetNodePoolRequest) (*NodePool, error)
+ // Creates a node pool for a cluster.
+ CreateNodePool(context.Context, *CreateNodePoolRequest) (*Operation, error)
+ // Deletes a node pool from a cluster.
+ DeleteNodePool(context.Context, *DeleteNodePoolRequest) (*Operation, error)
+ // Roll back the previously Aborted or Failed NodePool upgrade.
+ // This will be an no-op if the last upgrade successfully completed.
+ RollbackNodePoolUpgrade(context.Context, *RollbackNodePoolUpgradeRequest) (*Operation, error)
+ // Sets the NodeManagement options for a node pool.
+ SetNodePoolManagement(context.Context, *SetNodePoolManagementRequest) (*Operation, error)
+ // Sets labels on a cluster.
+ SetLabels(context.Context, *SetLabelsRequest) (*Operation, error)
+ // Enables or disables the ABAC authorization mechanism on a cluster.
+ SetLegacyAbac(context.Context, *SetLegacyAbacRequest) (*Operation, error)
+ // Start master IP rotation.
+ StartIPRotation(context.Context, *StartIPRotationRequest) (*Operation, error)
+ // Completes master IP rotation.
+ CompleteIPRotation(context.Context, *CompleteIPRotationRequest) (*Operation, error)
+ // Sets the size for a specific node pool.
+ SetNodePoolSize(context.Context, *SetNodePoolSizeRequest) (*Operation, error)
+ // Enables/Disables Network Policy for a cluster.
+ SetNetworkPolicy(context.Context, *SetNetworkPolicyRequest) (*Operation, error)
+ // Sets the maintenance policy for a cluster.
+ SetMaintenancePolicy(context.Context, *SetMaintenancePolicyRequest) (*Operation, error)
+}
+
+func RegisterClusterManagerServer(s *grpc.Server, srv ClusterManagerServer) {
+ s.RegisterService(&_ClusterManager_serviceDesc, srv)
+}
+
+func _ClusterManager_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListClustersRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).ListClusters(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/ListClusters",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).ListClusters(ctx, req.(*ListClustersRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetClusterRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).GetCluster(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/GetCluster",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).GetCluster(ctx, req.(*GetClusterRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateClusterRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).CreateCluster(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/CreateCluster",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).CreateCluster(ctx, req.(*CreateClusterRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateClusterRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).UpdateCluster(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/UpdateCluster",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).UpdateCluster(ctx, req.(*UpdateClusterRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_UpdateNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateNodePoolRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).UpdateNodePool(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/UpdateNodePool",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).UpdateNodePool(ctx, req.(*UpdateNodePoolRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetNodePoolAutoscaling_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetNodePoolAutoscalingRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetNodePoolAutoscaling(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetNodePoolAutoscaling",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetNodePoolAutoscaling(ctx, req.(*SetNodePoolAutoscalingRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetLoggingService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetLoggingServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetLoggingService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetLoggingService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetLoggingService(ctx, req.(*SetLoggingServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetMonitoringService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetMonitoringServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetMonitoringService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetMonitoringService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetMonitoringService(ctx, req.(*SetMonitoringServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetAddonsConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetAddonsConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetAddonsConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetAddonsConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetAddonsConfig(ctx, req.(*SetAddonsConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetLocations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetLocationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetLocations(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetLocations",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetLocations(ctx, req.(*SetLocationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_UpdateMaster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateMasterRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).UpdateMaster(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/UpdateMaster",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).UpdateMaster(ctx, req.(*UpdateMasterRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetMasterAuth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetMasterAuthRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetMasterAuth(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetMasterAuth",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetMasterAuth(ctx, req.(*SetMasterAuthRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteClusterRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).DeleteCluster(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/DeleteCluster",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).DeleteCluster(ctx, req.(*DeleteClusterRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_ListOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListOperationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).ListOperations(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/ListOperations",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).ListOperations(ctx, req.(*ListOperationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_GetOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetOperationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).GetOperation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/GetOperation",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).GetOperation(ctx, req.(*GetOperationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_CancelOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CancelOperationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).CancelOperation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/CancelOperation",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).CancelOperation(ctx, req.(*CancelOperationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_GetServerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetServerConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).GetServerConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/GetServerConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).GetServerConfig(ctx, req.(*GetServerConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_ListNodePools_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNodePoolsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).ListNodePools(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/ListNodePools",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).ListNodePools(ctx, req.(*ListNodePoolsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_GetNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNodePoolRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).GetNodePool(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/GetNodePool",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).GetNodePool(ctx, req.(*GetNodePoolRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_CreateNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateNodePoolRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).CreateNodePool(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/CreateNodePool",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).CreateNodePool(ctx, req.(*CreateNodePoolRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_DeleteNodePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteNodePoolRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).DeleteNodePool(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/DeleteNodePool",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).DeleteNodePool(ctx, req.(*DeleteNodePoolRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_RollbackNodePoolUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RollbackNodePoolUpgradeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).RollbackNodePoolUpgrade(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).RollbackNodePoolUpgrade(ctx, req.(*RollbackNodePoolUpgradeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetNodePoolManagement_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetNodePoolManagementRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetNodePoolManagement(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetNodePoolManagement",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetNodePoolManagement(ctx, req.(*SetNodePoolManagementRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetLabels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetLabelsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetLabels(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetLabels",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetLabels(ctx, req.(*SetLabelsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetLegacyAbac_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetLegacyAbacRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetLegacyAbac(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetLegacyAbac",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetLegacyAbac(ctx, req.(*SetLegacyAbacRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_StartIPRotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StartIPRotationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).StartIPRotation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/StartIPRotation",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).StartIPRotation(ctx, req.(*StartIPRotationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_CompleteIPRotation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CompleteIPRotationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).CompleteIPRotation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/CompleteIPRotation",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).CompleteIPRotation(ctx, req.(*CompleteIPRotationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetNodePoolSize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetNodePoolSizeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetNodePoolSize(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetNodePoolSize",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetNodePoolSize(ctx, req.(*SetNodePoolSizeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetNetworkPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetNetworkPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetNetworkPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetNetworkPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetNetworkPolicy(ctx, req.(*SetNetworkPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ClusterManager_SetMaintenancePolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SetMaintenancePolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterManagerServer).SetMaintenancePolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.container.v1.ClusterManager/SetMaintenancePolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterManagerServer).SetMaintenancePolicy(ctx, req.(*SetMaintenancePolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _ClusterManager_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.container.v1.ClusterManager",
+ HandlerType: (*ClusterManagerServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListClusters",
+ Handler: _ClusterManager_ListClusters_Handler,
+ },
+ {
+ MethodName: "GetCluster",
+ Handler: _ClusterManager_GetCluster_Handler,
+ },
+ {
+ MethodName: "CreateCluster",
+ Handler: _ClusterManager_CreateCluster_Handler,
+ },
+ {
+ MethodName: "UpdateCluster",
+ Handler: _ClusterManager_UpdateCluster_Handler,
+ },
+ {
+ MethodName: "UpdateNodePool",
+ Handler: _ClusterManager_UpdateNodePool_Handler,
+ },
+ {
+ MethodName: "SetNodePoolAutoscaling",
+ Handler: _ClusterManager_SetNodePoolAutoscaling_Handler,
+ },
+ {
+ MethodName: "SetLoggingService",
+ Handler: _ClusterManager_SetLoggingService_Handler,
+ },
+ {
+ MethodName: "SetMonitoringService",
+ Handler: _ClusterManager_SetMonitoringService_Handler,
+ },
+ {
+ MethodName: "SetAddonsConfig",
+ Handler: _ClusterManager_SetAddonsConfig_Handler,
+ },
+ {
+ MethodName: "SetLocations",
+ Handler: _ClusterManager_SetLocations_Handler,
+ },
+ {
+ MethodName: "UpdateMaster",
+ Handler: _ClusterManager_UpdateMaster_Handler,
+ },
+ {
+ MethodName: "SetMasterAuth",
+ Handler: _ClusterManager_SetMasterAuth_Handler,
+ },
+ {
+ MethodName: "DeleteCluster",
+ Handler: _ClusterManager_DeleteCluster_Handler,
+ },
+ {
+ MethodName: "ListOperations",
+ Handler: _ClusterManager_ListOperations_Handler,
+ },
+ {
+ MethodName: "GetOperation",
+ Handler: _ClusterManager_GetOperation_Handler,
+ },
+ {
+ MethodName: "CancelOperation",
+ Handler: _ClusterManager_CancelOperation_Handler,
+ },
+ {
+ MethodName: "GetServerConfig",
+ Handler: _ClusterManager_GetServerConfig_Handler,
+ },
+ {
+ MethodName: "ListNodePools",
+ Handler: _ClusterManager_ListNodePools_Handler,
+ },
+ {
+ MethodName: "GetNodePool",
+ Handler: _ClusterManager_GetNodePool_Handler,
+ },
+ {
+ MethodName: "CreateNodePool",
+ Handler: _ClusterManager_CreateNodePool_Handler,
+ },
+ {
+ MethodName: "DeleteNodePool",
+ Handler: _ClusterManager_DeleteNodePool_Handler,
+ },
+ {
+ MethodName: "RollbackNodePoolUpgrade",
+ Handler: _ClusterManager_RollbackNodePoolUpgrade_Handler,
+ },
+ {
+ MethodName: "SetNodePoolManagement",
+ Handler: _ClusterManager_SetNodePoolManagement_Handler,
+ },
+ {
+ MethodName: "SetLabels",
+ Handler: _ClusterManager_SetLabels_Handler,
+ },
+ {
+ MethodName: "SetLegacyAbac",
+ Handler: _ClusterManager_SetLegacyAbac_Handler,
+ },
+ {
+ MethodName: "StartIPRotation",
+ Handler: _ClusterManager_StartIPRotation_Handler,
+ },
+ {
+ MethodName: "CompleteIPRotation",
+ Handler: _ClusterManager_CompleteIPRotation_Handler,
+ },
+ {
+ MethodName: "SetNodePoolSize",
+ Handler: _ClusterManager_SetNodePoolSize_Handler,
+ },
+ {
+ MethodName: "SetNetworkPolicy",
+ Handler: _ClusterManager_SetNetworkPolicy_Handler,
+ },
+ {
+ MethodName: "SetMaintenancePolicy",
+ Handler: _ClusterManager_SetMaintenancePolicy_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/container/v1/cluster_service.proto",
+}